Run pgindent on 9.2 source tree in preparation for first 9.3
authorBruce Momjian <bruce@momjian.us>
Sun, 10 Jun 2012 19:20:04 +0000 (15:20 -0400)
committerBruce Momjian <bruce@momjian.us>
Sun, 10 Jun 2012 19:20:04 +0000 (15:20 -0400)
commit-fest.

494 files changed:
contrib/auto_explain/auto_explain.c
contrib/dblink/dblink.c
contrib/file_fdw/file_fdw.c
contrib/pg_archivecleanup/pg_archivecleanup.c
contrib/pg_stat_statements/pg_stat_statements.c
contrib/pg_test_fsync/pg_test_fsync.c
contrib/pg_test_timing/pg_test_timing.c
contrib/pg_trgm/trgm_gist.c
contrib/pg_upgrade/check.c
contrib/pg_upgrade/controldata.c
contrib/pg_upgrade/exec.c
contrib/pg_upgrade/file.c
contrib/pg_upgrade/function.c
contrib/pg_upgrade/info.c
contrib/pg_upgrade/option.c
contrib/pg_upgrade/pg_upgrade.c
contrib/pg_upgrade/pg_upgrade.h
contrib/pg_upgrade/relfilenode.c
contrib/pg_upgrade/server.c
contrib/pg_upgrade/tablespace.c
contrib/pg_upgrade/version_old_8_3.c
contrib/pgbench/pgbench.c
contrib/pgcrypto/crypt-md5.c
contrib/pgcrypto/px.h
contrib/pgstattuple/pgstatindex.c
contrib/pgstattuple/pgstattuple.c
contrib/sepgsql/database.c
contrib/sepgsql/dml.c
contrib/sepgsql/hooks.c
contrib/sepgsql/label.c
contrib/sepgsql/proc.c
contrib/sepgsql/relation.c
contrib/sepgsql/schema.c
contrib/sepgsql/sepgsql.h
contrib/sepgsql/uavc.c
contrib/spi/refint.c
contrib/vacuumlo/vacuumlo.c
contrib/xml2/xpath.c
contrib/xml2/xslt_proc.c
src/backend/access/gist/gist.c
src/backend/access/gist/gistbuild.c
src/backend/access/gist/gistbuildbuffers.c
src/backend/access/gist/gistproc.c
src/backend/access/gist/gistscan.c
src/backend/access/gist/gistsplit.c
src/backend/access/hash/hashovfl.c
src/backend/access/heap/heapam.c
src/backend/access/heap/hio.c
src/backend/access/heap/tuptoaster.c
src/backend/access/heap/visibilitymap.c
src/backend/access/index/genam.c
src/backend/access/index/indexam.c
src/backend/access/nbtree/nbtcompare.c
src/backend/access/nbtree/nbtpage.c
src/backend/access/nbtree/nbtree.c
src/backend/access/nbtree/nbtsearch.c
src/backend/access/nbtree/nbtutils.c
src/backend/access/spgist/spgdoinsert.c
src/backend/access/spgist/spginsert.c
src/backend/access/spgist/spgkdtreeproc.c
src/backend/access/spgist/spgquadtreeproc.c
src/backend/access/spgist/spgscan.c
src/backend/access/spgist/spgtextproc.c
src/backend/access/spgist/spgutils.c
src/backend/access/spgist/spgvacuum.c
src/backend/access/spgist/spgxlog.c
src/backend/access/transam/clog.c
src/backend/access/transam/slru.c
src/backend/access/transam/twophase.c
src/backend/access/transam/varsup.c
src/backend/access/transam/xact.c
src/backend/access/transam/xlog.c
src/backend/access/transam/xlogutils.c
src/backend/catalog/aclchk.c
src/backend/catalog/dependency.c
src/backend/catalog/heap.c
src/backend/catalog/index.c
src/backend/catalog/namespace.c
src/backend/catalog/objectaddress.c
src/backend/catalog/pg_constraint.c
src/backend/catalog/pg_depend.c
src/backend/catalog/pg_proc.c
src/backend/catalog/pg_shdepend.c
src/backend/catalog/storage.c
src/backend/commands/analyze.c
src/backend/commands/cluster.c
src/backend/commands/copy.c
src/backend/commands/createas.c
src/backend/commands/dbcommands.c
src/backend/commands/dropcmds.c
src/backend/commands/explain.c
src/backend/commands/extension.c
src/backend/commands/foreigncmds.c
src/backend/commands/functioncmds.c
src/backend/commands/indexcmds.c
src/backend/commands/lockcmds.c
src/backend/commands/opclasscmds.c
src/backend/commands/prepare.c
src/backend/commands/proclang.c
src/backend/commands/seclabel.c
src/backend/commands/sequence.c
src/backend/commands/tablecmds.c
src/backend/commands/tablespace.c
src/backend/commands/trigger.c
src/backend/commands/typecmds.c
src/backend/commands/user.c
src/backend/commands/vacuum.c
src/backend/commands/vacuumlazy.c
src/backend/commands/view.c
src/backend/executor/execCurrent.c
src/backend/executor/execMain.c
src/backend/executor/execQual.c
src/backend/executor/execUtils.c
src/backend/executor/functions.c
src/backend/executor/nodeBitmapHeapscan.c
src/backend/executor/nodeIndexonlyscan.c
src/backend/executor/nodeMaterial.c
src/backend/executor/nodeMergeAppend.c
src/backend/executor/nodeMergejoin.c
src/backend/executor/nodeModifyTable.c
src/backend/executor/nodeSetOp.c
src/backend/executor/spi.c
src/backend/libpq/auth.c
src/backend/libpq/be-secure.c
src/backend/libpq/hba.c
src/backend/libpq/pqcomm.c
src/backend/nodes/bitmapset.c
src/backend/nodes/copyfuncs.c
src/backend/nodes/equalfuncs.c
src/backend/nodes/list.c
src/backend/nodes/nodeFuncs.c
src/backend/nodes/outfuncs.c
src/backend/nodes/print.c
src/backend/nodes/readfuncs.c
src/backend/nodes/tidbitmap.c
src/backend/optimizer/geqo/geqo_selection.c
src/backend/optimizer/path/allpaths.c
src/backend/optimizer/path/costsize.c
src/backend/optimizer/path/equivclass.c
src/backend/optimizer/path/indxpath.c
src/backend/optimizer/path/joinpath.c
src/backend/optimizer/path/joinrels.c
src/backend/optimizer/path/orindxpath.c
src/backend/optimizer/path/pathkeys.c
src/backend/optimizer/plan/createplan.c
src/backend/optimizer/plan/initsplan.c
src/backend/optimizer/plan/planagg.c
src/backend/optimizer/plan/planmain.c
src/backend/optimizer/plan/planner.c
src/backend/optimizer/plan/setrefs.c
src/backend/optimizer/plan/subselect.c
src/backend/optimizer/prep/prepjointree.c
src/backend/optimizer/prep/prepunion.c
src/backend/optimizer/util/clauses.c
src/backend/optimizer/util/pathnode.c
src/backend/optimizer/util/placeholder.c
src/backend/optimizer/util/plancat.c
src/backend/optimizer/util/predtest.c
src/backend/optimizer/util/relnode.c
src/backend/optimizer/util/var.c
src/backend/parser/analyze.c
src/backend/parser/parse_coerce.c
src/backend/parser/parse_expr.c
src/backend/parser/parse_func.c
src/backend/parser/parse_relation.c
src/backend/parser/parse_target.c
src/backend/parser/parse_type.c
src/backend/parser/parse_utilcmd.c
src/backend/port/darwin/system.c
src/backend/port/dynloader/aix.h
src/backend/port/dynloader/cygwin.h
src/backend/port/dynloader/freebsd.h
src/backend/port/dynloader/irix.h
src/backend/port/dynloader/linux.h
src/backend/port/dynloader/netbsd.h
src/backend/port/dynloader/openbsd.h
src/backend/port/dynloader/osf.h
src/backend/port/dynloader/sco.h
src/backend/port/dynloader/solaris.h
src/backend/port/dynloader/unixware.h
src/backend/port/dynloader/win32.h
src/backend/port/unix_latch.c
src/backend/port/win32/mingwcompat.c
src/backend/port/win32/socket.c
src/backend/port/win32/timer.c
src/backend/port/win32_latch.c
src/backend/port/win32_sema.c
src/backend/postmaster/autovacuum.c
src/backend/postmaster/bgwriter.c
src/backend/postmaster/checkpointer.c
src/backend/postmaster/pgarch.c
src/backend/postmaster/pgstat.c
src/backend/postmaster/postmaster.c
src/backend/postmaster/syslogger.c
src/backend/postmaster/walwriter.c
src/backend/regex/regc_locale.c
src/backend/regex/regc_pg_locale.c
src/backend/regex/regcomp.c
src/backend/regex/rege_dfa.c
src/backend/regex/regerror.c
src/backend/regex/regexec.c
src/backend/replication/basebackup.c
src/backend/replication/syncrep.c
src/backend/replication/walreceiver.c
src/backend/replication/walreceiverfuncs.c
src/backend/replication/walsender.c
src/backend/rewrite/rewriteDefine.c
src/backend/rewrite/rewriteSupport.c
src/backend/storage/buffer/bufmgr.c
src/backend/storage/buffer/freelist.c
src/backend/storage/file/fd.c
src/backend/storage/ipc/pmsignal.c
src/backend/storage/ipc/procarray.c
src/backend/storage/ipc/sinval.c
src/backend/storage/ipc/sinvaladt.c
src/backend/storage/ipc/standby.c
src/backend/storage/lmgr/lock.c
src/backend/storage/lmgr/lwlock.c
src/backend/storage/lmgr/predicate.c
src/backend/storage/lmgr/proc.c
src/backend/storage/lmgr/s_lock.c
src/backend/storage/smgr/md.c
src/backend/storage/smgr/smgr.c
src/backend/tcop/postgres.c
src/backend/tcop/utility.c
src/backend/tsearch/dict_thesaurus.c
src/backend/tsearch/spell.c
src/backend/tsearch/to_tsany.c
src/backend/tsearch/ts_utils.c
src/backend/utils/adt/acl.c
src/backend/utils/adt/array_selfuncs.c
src/backend/utils/adt/array_typanalyze.c
src/backend/utils/adt/cash.c
src/backend/utils/adt/date.c
src/backend/utils/adt/datetime.c
src/backend/utils/adt/dbsize.c
src/backend/utils/adt/float.c
src/backend/utils/adt/formatting.c
src/backend/utils/adt/inet_net_pton.c
src/backend/utils/adt/json.c
src/backend/utils/adt/lockfuncs.c
src/backend/utils/adt/mac.c
src/backend/utils/adt/misc.c
src/backend/utils/adt/numeric.c
src/backend/utils/adt/pg_locale.c
src/backend/utils/adt/pgstatfuncs.c
src/backend/utils/adt/rangetypes.c
src/backend/utils/adt/rangetypes_gist.c
src/backend/utils/adt/ruleutils.c
src/backend/utils/adt/selfuncs.c
src/backend/utils/adt/timestamp.c
src/backend/utils/adt/tsgistidx.c
src/backend/utils/adt/tsquery_util.c
src/backend/utils/adt/tsrank.c
src/backend/utils/adt/tsvector_op.c
src/backend/utils/adt/varbit.c
src/backend/utils/adt/varchar.c
src/backend/utils/adt/varlena.c
src/backend/utils/adt/xml.c
src/backend/utils/cache/catcache.c
src/backend/utils/cache/inval.c
src/backend/utils/cache/lsyscache.c
src/backend/utils/cache/plancache.c
src/backend/utils/cache/relcache.c
src/backend/utils/cache/ts_cache.c
src/backend/utils/error/elog.c
src/backend/utils/fmgr/fmgr.c
src/backend/utils/fmgr/funcapi.c
src/backend/utils/init/miscinit.c
src/backend/utils/mb/wchar.c
src/backend/utils/misc/guc.c
src/backend/utils/mmgr/portalmem.c
src/backend/utils/sort/sortsupport.c
src/backend/utils/sort/tuplesort.c
src/backend/utils/sort/tuplestore.c
src/backend/utils/time/snapmgr.c
src/backend/utils/time/tqual.c
src/bin/initdb/findtimezone.c
src/bin/initdb/initdb.c
src/bin/pg_basebackup/pg_basebackup.c
src/bin/pg_basebackup/pg_receivexlog.c
src/bin/pg_basebackup/receivelog.c
src/bin/pg_basebackup/receivelog.h
src/bin/pg_basebackup/streamutil.c
src/bin/pg_ctl/pg_ctl.c
src/bin/pg_dump/common.c
src/bin/pg_dump/dumputils.c
src/bin/pg_dump/dumputils.h
src/bin/pg_dump/pg_backup.h
src/bin/pg_dump/pg_backup_archiver.c
src/bin/pg_dump/pg_backup_archiver.h
src/bin/pg_dump/pg_backup_custom.c
src/bin/pg_dump/pg_backup_db.c
src/bin/pg_dump/pg_backup_directory.c
src/bin/pg_dump/pg_backup_tar.c
src/bin/pg_dump/pg_dump.c
src/bin/pg_dump/pg_dump_sort.c
src/bin/pg_dump/pg_dumpall.c
src/bin/pgevent/pgevent.c
src/bin/psql/command.c
src/bin/psql/common.c
src/bin/psql/copy.c
src/bin/psql/describe.c
src/bin/psql/help.c
src/bin/psql/input.c
src/bin/psql/print.c
src/bin/psql/print.h
src/bin/psql/startup.c
src/bin/psql/stringutils.c
src/bin/psql/tab-complete.c
src/bin/psql/variables.c
src/bin/scripts/clusterdb.c
src/bin/scripts/common.c
src/bin/scripts/common.h
src/bin/scripts/createlang.c
src/bin/scripts/dropdb.c
src/bin/scripts/droplang.c
src/bin/scripts/reindexdb.c
src/bin/scripts/vacuumdb.c
src/include/access/gist_private.h
src/include/access/heapam.h
src/include/access/htup.h
src/include/access/nbtree.h
src/include/access/slru.h
src/include/access/spgist.h
src/include/access/spgist_private.h
src/include/access/xact.h
src/include/access/xlog_internal.h
src/include/catalog/catalog.h
src/include/catalog/genbki.h
src/include/catalog/index.h
src/include/catalog/namespace.h
src/include/catalog/objectaccess.h
src/include/catalog/objectaddress.h
src/include/catalog/pg_aggregate.h
src/include/catalog/pg_attrdef.h
src/include/catalog/pg_attribute.h
src/include/catalog/pg_constraint.h
src/include/catalog/pg_control.h
src/include/catalog/pg_database.h
src/include/catalog/pg_db_role_setting.h
src/include/catalog/pg_default_acl.h
src/include/catalog/pg_description.h
src/include/catalog/pg_extension.h
src/include/catalog/pg_foreign_data_wrapper.h
src/include/catalog/pg_foreign_server.h
src/include/catalog/pg_foreign_table.h
src/include/catalog/pg_index.h
src/include/catalog/pg_language.h
src/include/catalog/pg_largeobject.h
src/include/catalog/pg_largeobject_metadata.h
src/include/catalog/pg_namespace.h
src/include/catalog/pg_opclass.h
src/include/catalog/pg_operator.h
src/include/catalog/pg_pltemplate.h
src/include/catalog/pg_proc.h
src/include/catalog/pg_range.h
src/include/catalog/pg_rewrite.h
src/include/catalog/pg_seclabel.h
src/include/catalog/pg_shdescription.h
src/include/catalog/pg_shseclabel.h
src/include/catalog/pg_statistic.h
src/include/catalog/pg_tablespace.h
src/include/catalog/pg_trigger.h
src/include/catalog/pg_ts_dict.h
src/include/catalog/pg_type.h
src/include/commands/createas.h
src/include/commands/defrem.h
src/include/commands/explain.h
src/include/commands/tablecmds.h
src/include/commands/typecmds.h
src/include/commands/vacuum.h
src/include/datatype/timestamp.h
src/include/executor/executor.h
src/include/executor/instrument.h
src/include/executor/spi_priv.h
src/include/foreign/fdwapi.h
src/include/lib/stringinfo.h
src/include/libpq/hba.h
src/include/libpq/ip.h
src/include/nodes/execnodes.h
src/include/nodes/parsenodes.h
src/include/nodes/primnodes.h
src/include/nodes/relation.h
src/include/optimizer/cost.h
src/include/optimizer/pathnode.h
src/include/optimizer/paths.h
src/include/optimizer/prep.h
src/include/optimizer/subselect.h
src/include/parser/analyze.h
src/include/pg_config_manual.h
src/include/pg_trace.h
src/include/pgstat.h
src/include/port.h
src/include/port/win32.h
src/include/postgres.h
src/include/postmaster/postmaster.h
src/include/regex/regguts.h
src/include/replication/walprotocol.h
src/include/replication/walreceiver.h
src/include/replication/walsender_private.h
src/include/rewrite/rewriteSupport.h
src/include/snowball/header.h
src/include/storage/barrier.h
src/include/storage/latch.h
src/include/storage/lock.h
src/include/storage/lwlock.h
src/include/storage/predicate.h
src/include/storage/proc.h
src/include/storage/procarray.h
src/include/storage/sinval.h
src/include/storage/smgr.h
src/include/tsearch/ts_public.h
src/include/utils/acl.h
src/include/utils/builtins.h
src/include/utils/guc.h
src/include/utils/guc_tables.h
src/include/utils/json.h
src/include/utils/lsyscache.h
src/include/utils/memutils.h
src/include/utils/pg_crc_tables.h
src/include/utils/plancache.h
src/include/utils/rangetypes.h
src/include/utils/rel.h
src/include/utils/selfuncs.h
src/include/utils/sortsupport.h
src/include/utils/timestamp.h
src/include/utils/tqual.h
src/include/utils/typcache.h
src/include/utils/xml.h
src/interfaces/ecpg/ecpglib/connect.c
src/interfaces/ecpg/ecpglib/execute.c
src/interfaces/ecpg/ecpglib/extern.h
src/interfaces/ecpg/pgtypeslib/dt.h
src/interfaces/ecpg/preproc/type.c
src/interfaces/libpq/fe-connect.c
src/interfaces/libpq/fe-exec.c
src/interfaces/libpq/fe-protocol2.c
src/interfaces/libpq/fe-protocol3.c
src/interfaces/libpq/fe-secure.c
src/interfaces/libpq/libpq-fe.h
src/interfaces/libpq/libpq-int.h
src/interfaces/libpq/test/uri-regress.c
src/pl/plperl/plperl.c
src/pl/plperl/plperl_helpers.h
src/pl/plpgsql/src/pl_comp.c
src/pl/plpgsql/src/pl_exec.c
src/pl/plpython/plpy_cursorobject.c
src/pl/plpython/plpy_cursorobject.h
src/pl/plpython/plpy_elog.c
src/pl/plpython/plpy_elog.h
src/pl/plpython/plpy_exec.c
src/pl/plpython/plpy_exec.h
src/pl/plpython/plpy_main.c
src/pl/plpython/plpy_main.h
src/pl/plpython/plpy_planobject.h
src/pl/plpython/plpy_plpymodule.c
src/pl/plpython/plpy_plpymodule.h
src/pl/plpython/plpy_procedure.h
src/pl/plpython/plpy_resultobject.c
src/pl/plpython/plpy_resultobject.h
src/pl/plpython/plpy_spi.c
src/pl/plpython/plpy_spi.h
src/pl/plpython/plpy_subxactobject.c
src/pl/plpython/plpy_subxactobject.h
src/pl/plpython/plpy_typeio.c
src/pl/plpython/plpy_typeio.h
src/pl/plpython/plpy_util.c
src/pl/plpython/plpy_util.h
src/pl/plpython/plpython.h
src/port/erand48.c
src/port/fls.c
src/port/getaddrinfo.c
src/port/path.c
src/port/win32setlocale.c
src/test/isolation/isolationtester.c
src/test/regress/pg_regress.c
src/test/thread/thread_test.c
src/timezone/pgtz.c
src/tools/msvc/Install.pm
src/tools/msvc/MSBuildProject.pm
src/tools/msvc/Mkvcbuild.pm
src/tools/msvc/Project.pm
src/tools/msvc/Solution.pm
src/tools/msvc/VCBuildProject.pm
src/tools/msvc/VSObjectFactory.pm
src/tools/msvc/build.pl
src/tools/msvc/builddoc.pl
src/tools/msvc/config_default.pl
src/tools/msvc/gendef.pl
src/tools/msvc/install.pl
src/tools/msvc/pgbison.pl
src/tools/msvc/pgflex.pl
src/tools/msvc/vcregress.pl

index e48ea489dc5bb87f48037d058a4aae9f30a54f0d..ad333b664471aee25c15ff6dd4dcde5ac702845c 100644 (file)
@@ -23,7 +23,7 @@ static int    auto_explain_log_min_duration = -1; /* msec or -1 */
 static bool auto_explain_log_analyze = false;
 static bool auto_explain_log_verbose = false;
 static bool auto_explain_log_buffers = false;
-static bool auto_explain_log_timing  = false;
+static bool auto_explain_log_timing = false;
 static int     auto_explain_log_format = EXPLAIN_FORMAT_TEXT;
 static bool auto_explain_log_nested_statements = false;
 
index 71acb35af6ff3a7d6e5a7cd3e57688cb56a687fb..1e62d8091a9d2bdf60af6745d5a01ee14ee5cf5a 100644 (file)
@@ -1140,7 +1140,7 @@ storeHandler(PGresult *res, const PGdataValue *columns,
         * strings and add null termination.  As a micro-optimization, allocate
         * all the strings with one palloc.
         */
-       pbuflen = nfields;              /* count the null terminators themselves */
+       pbuflen = nfields;                      /* count the null terminators themselves */
        for (i = 0; i < nfields; i++)
        {
                int                     len = columns[i].len;
index 66fd0e62cc8da2b9f9b31ae4568ddf1fb3b0d3b6..e3b9223b3ee7a102efef7e1c18192eb64494864b 100644 (file)
@@ -109,17 +109,17 @@ PG_FUNCTION_INFO_V1(file_fdw_validator);
  * FDW callback routines
  */
 static void fileGetForeignRelSize(PlannerInfo *root,
-                                                                 RelOptInfo *baserel,
-                                                                 Oid foreigntableid);
+                                         RelOptInfo *baserel,
+                                         Oid foreigntableid);
 static void fileGetForeignPaths(PlannerInfo *root,
-                                                               RelOptInfo *baserel,
-                                                               Oid foreigntableid);
+                                       RelOptInfo *baserel,
+                                       Oid foreigntableid);
 static ForeignScan *fileGetForeignPlan(PlannerInfo *root,
-                                                                          RelOptInfo *baserel,
-                                                                          Oid foreigntableid,
-                                                                          ForeignPath *best_path,
-                                                                          List *tlist,
-                                                                          List *scan_clauses);
+                                  RelOptInfo *baserel,
+                                  Oid foreigntableid,
+                                  ForeignPath *best_path,
+                                  List *tlist,
+                                  List *scan_clauses);
 static void fileExplainForeignScan(ForeignScanState *node, ExplainState *es);
 static void fileBeginForeignScan(ForeignScanState *node, int eflags);
 static TupleTableSlot *fileIterateForeignScan(ForeignScanState *node);
@@ -141,7 +141,7 @@ static void estimate_size(PlannerInfo *root, RelOptInfo *baserel,
 static void estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
                           FileFdwPlanState *fdw_private,
                           Cost *startup_cost, Cost *total_cost);
-static int     file_acquire_sample_rows(Relation onerel, int elevel,
+static int file_acquire_sample_rows(Relation onerel, int elevel,
                                                 HeapTuple *rows, int targrows,
                                                 double *totalrows, double *totaldeadrows);
 
@@ -180,7 +180,7 @@ file_fdw_validator(PG_FUNCTION_ARGS)
        List       *options_list = untransformRelOptions(PG_GETARG_DATUM(0));
        Oid                     catalog = PG_GETARG_OID(1);
        char       *filename = NULL;
-       DefElem    *force_not_null = NULL;
+       DefElem    *force_not_null = NULL;
        List       *other_options = NIL;
        ListCell   *cell;
 
@@ -233,7 +233,7 @@ file_fdw_validator(PG_FUNCTION_ARGS)
                                         buf.len > 0
                                         ? errhint("Valid options in this context are: %s",
                                                           buf.data)
-                                        : errhint("There are no valid options in this context.")));
+                                 : errhint("There are no valid options in this context.")));
                }
 
                /*
@@ -393,13 +393,13 @@ get_file_fdw_attribute_options(Oid relid)
                options = GetForeignColumnOptions(relid, attnum);
                foreach(lc, options)
                {
-                       DefElem    *def = (DefElem *) lfirst(lc);
+                       DefElem    *def = (DefElem *) lfirst(lc);
 
                        if (strcmp(def->defname, "force_not_null") == 0)
                        {
                                if (defGetBoolean(def))
                                {
-                                       char   *attname = pstrdup(NameStr(attr->attname));
+                                       char       *attname = pstrdup(NameStr(attr->attname));
 
                                        fnncolumns = lappend(fnncolumns, makeString(attname));
                                }
@@ -429,8 +429,8 @@ fileGetForeignRelSize(PlannerInfo *root,
        FileFdwPlanState *fdw_private;
 
        /*
-        * Fetch options.  We only need filename at this point, but we might
-        * as well get everything and not need to re-fetch it later in planning.
+        * Fetch options.  We only need filename at this point, but we might as
+        * well get everything and not need to re-fetch it later in planning.
         */
        fdw_private = (FileFdwPlanState *) palloc(sizeof(FileFdwPlanState));
        fileGetOptions(foreigntableid,
@@ -468,13 +468,14 @@ fileGetForeignPaths(PlannerInfo *root,
                                                                         baserel->rows,
                                                                         startup_cost,
                                                                         total_cost,
-                                                                        NIL, /* no pathkeys */
-                                                                        NULL, /* no outer rel either */
-                                                                        NIL)); /* no fdw_private data */
+                                                                        NIL,           /* no pathkeys */
+                                                                        NULL,          /* no outer rel either */
+                                                                        NIL));         /* no fdw_private data */
 
        /*
         * If data file was sorted, and we knew it somehow, we could insert
-        * appropriate pathkeys into the ForeignPath node to tell the planner that.
+        * appropriate pathkeys into the ForeignPath node to tell the planner
+        * that.
         */
 }
 
@@ -505,8 +506,8 @@ fileGetForeignPlan(PlannerInfo *root,
        return make_foreignscan(tlist,
                                                        scan_clauses,
                                                        scan_relid,
-                                                       NIL, /* no expressions to evaluate */
-                                                       NIL); /* no private state either */
+                                                       NIL,    /* no expressions to evaluate */
+                                                       NIL);           /* no private state either */
 }
 
 /*
@@ -665,14 +666,14 @@ fileAnalyzeForeignTable(Relation relation,
 {
        char       *filename;
        List       *options;
-       struct stat     stat_buf;
+       struct stat stat_buf;
 
        /* Fetch options of foreign table */
        fileGetOptions(RelationGetRelid(relation), &filename, &options);
 
        /*
-        * Get size of the file.  (XXX if we fail here, would it be better to
-        * just return false to skip analyzing the table?)
+        * Get size of the file.  (XXX if we fail here, would it be better to just
+        * return false to skip analyzing the table?)
         */
        if (stat(filename, &stat_buf) < 0)
                ereport(ERROR,
@@ -746,7 +747,7 @@ estimate_size(PlannerInfo *root, RelOptInfo *baserel,
                 * planner's idea of the relation width; which is bogus if not all
                 * columns are being read, not to mention that the text representation
                 * of a row probably isn't the same size as its internal
-                * representation.  Possibly we could do something better, but the
+                * representation.      Possibly we could do something better, but the
                 * real answer to anyone who complains is "ANALYZE" ...
                 */
                int                     tuple_width;
@@ -811,7 +812,7 @@ estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
  * which must have at least targrows entries.
  * The actual number of rows selected is returned as the function result.
  * We also count the total number of rows in the file and return it into
- * *totalrows.  Note that *totaldeadrows is always set to 0.
+ * *totalrows. Note that *totaldeadrows is always set to 0.
  *
  * Note that the returned list of rows is not always in order by physical
  * position in the file.  Therefore, correlation estimates derived later
@@ -824,7 +825,7 @@ file_acquire_sample_rows(Relation onerel, int elevel,
                                                 double *totalrows, double *totaldeadrows)
 {
        int                     numrows = 0;
-       double          rowstoskip = -1; /* -1 means not set yet */
+       double          rowstoskip = -1;        /* -1 means not set yet */
        double          rstate;
        TupleDesc       tupDesc;
        Datum      *values;
@@ -853,8 +854,8 @@ file_acquire_sample_rows(Relation onerel, int elevel,
        cstate = BeginCopyFrom(onerel, filename, NIL, options);
 
        /*
-        * Use per-tuple memory context to prevent leak of memory used to read rows
-        * from the file with Copy routines.
+        * Use per-tuple memory context to prevent leak of memory used to read
+        * rows from the file with Copy routines.
         */
        tupcontext = AllocSetContextCreate(CurrentMemoryContext,
                                                                           "file_fdw temporary context",
@@ -912,10 +913,10 @@ file_acquire_sample_rows(Relation onerel, int elevel,
                        if (rowstoskip <= 0)
                        {
                                /*
-                                * Found a suitable tuple, so save it, replacing one
-                                * old tuple at random
+                                * Found a suitable tuple, so save it, replacing one old tuple
+                                * at random
                                 */
-                               int             k = (int) (targrows * anl_random_fract());
+                               int                     k = (int) (targrows * anl_random_fract());
 
                                Assert(k >= 0 && k < targrows);
                                heap_freetuple(rows[k]);
index 20977805c8deab6df569263894069afa4a494ca5..a226101bbc47b36e3f21a9ae374b65adbcf5b166 100644 (file)
@@ -37,7 +37,7 @@ const char *progname;
 /* Options and defaults */
 bool           debug = false;          /* are we debugging? */
 bool           dryrun = false;         /* are we performing a dry-run operation? */
-char      *additional_ext = NULL;      /* Extension to remove from filenames */
+char      *additional_ext = NULL;              /* Extension to remove from filenames */
 
 char      *archiveLocation;    /* where to find the archive? */
 char      *restartWALFileName; /* the file from which we can restart restore */
@@ -136,12 +136,13 @@ CleanupPriorWALFiles(void)
                         * they were originally written, in case this worries you.
                         */
                        if (strlen(walfile) == XLOG_DATA_FNAME_LEN &&
-                       strspn(walfile, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN &&
+                               strspn(walfile, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN &&
                                strcmp(walfile + 8, exclusiveCleanupFileName + 8) < 0)
                        {
-                               /* 
-                                * Use the original file name again now, including any extension
-                                * that might have been chopped off before testing the sequence.
+                               /*
+                                * Use the original file name again now, including any
+                                * extension that might have been chopped off before testing
+                                * the sequence.
                                 */
                                snprintf(WALFilePath, MAXPGPATH, "%s/%s",
                                                 archiveLocation, xlde->d_name);
@@ -150,7 +151,7 @@ CleanupPriorWALFiles(void)
                                {
                                        /*
                                         * Prints the name of the file to be removed and skips the
-                                        * actual removal.  The regular printout is so that the
+                                        * actual removal.      The regular printout is so that the
                                         * user can pipe the output into some other program.
                                         */
                                        printf("%s\n", WALFilePath);
@@ -298,7 +299,8 @@ main(int argc, char **argv)
                                dryrun = true;
                                break;
                        case 'x':
-                               additional_ext = optarg; /* Extension to remove from xlogfile names */
+                               additional_ext = optarg;                /* Extension to remove from
+                                                                                                * xlogfile names */
                                break;
                        default:
                                fprintf(stderr, "Try \"%s --help\" for more information.\n", progname);
index 06869fa344e8a5b23c613574f26787dd2e424fb8..aa11c144d68cf4c8a224d1cdcdd61a74c824ed47 100644 (file)
@@ -103,19 +103,19 @@ typedef struct Counters
        int64           calls;                  /* # of times executed */
        double          total_time;             /* total execution time, in msec */
        int64           rows;                   /* total # of retrieved or affected rows */
-       int64           shared_blks_hit;                /* # of shared buffer hits */
+       int64           shared_blks_hit;        /* # of shared buffer hits */
        int64           shared_blks_read;               /* # of shared disk blocks read */
        int64           shared_blks_dirtied;    /* # of shared disk blocks dirtied */
        int64           shared_blks_written;    /* # of shared disk blocks written */
-       int64           local_blks_hit;                 /* # of local buffer hits */
-       int64           local_blks_read;                /* # of local disk blocks read */
+       int64           local_blks_hit; /* # of local buffer hits */
+       int64           local_blks_read;        /* # of local disk blocks read */
        int64           local_blks_dirtied;             /* # of local disk blocks dirtied */
        int64           local_blks_written;             /* # of local disk blocks written */
-       int64           temp_blks_read;                 /* # of temp blocks read */
+       int64           temp_blks_read; /* # of temp blocks read */
        int64           temp_blks_written;              /* # of temp blocks written */
-       double          blk_read_time;                  /* time spent reading, in msec */
-       double          blk_write_time;                 /* time spent writing, in msec */
-       double          usage;                                  /* usage factor */
+       double          blk_read_time;  /* time spent reading, in msec */
+       double          blk_write_time; /* time spent writing, in msec */
+       double          usage;                  /* usage factor */
 } Counters;
 
 /*
@@ -140,7 +140,7 @@ typedef struct pgssSharedState
 {
        LWLockId        lock;                   /* protects hashtable search/modification */
        int                     query_size;             /* max query length in bytes */
-       double          cur_median_usage;       /* current median usage in hashtable */
+       double          cur_median_usage;               /* current median usage in hashtable */
 } pgssSharedState;
 
 /*
@@ -150,7 +150,7 @@ typedef struct pgssLocationLen
 {
        int                     location;               /* start offset in query text */
        int                     length;                 /* length in bytes, or -1 to ignore */
-}      pgssLocationLen;
+} pgssLocationLen;
 
 /*
  * Working state for computing a query jumble and producing a normalized
@@ -172,7 +172,7 @@ typedef struct pgssJumbleState
 
        /* Current number of valid entries in clocations array */
        int                     clocations_count;
-}      pgssJumbleState;
+} pgssJumbleState;
 
 /*---- Local variables ----*/
 
@@ -248,21 +248,21 @@ static uint32 pgss_hash_string(const char *str);
 static void pgss_store(const char *query, uint32 queryId,
                   double total_time, uint64 rows,
                   const BufferUsage *bufusage,
-                  pgssJumbleState * jstate);
+                  pgssJumbleState *jstate);
 static Size pgss_memsize(void);
 static pgssEntry *entry_alloc(pgssHashKey *key, const char *query,
-                                                         int query_len, bool sticky);
+                       int query_len, bool sticky);
 static void entry_dealloc(void);
 static void entry_reset(void);
-static void AppendJumble(pgssJumbleState * jstate,
+static void AppendJumble(pgssJumbleState *jstate,
                         const unsigned char *item, Size size);
-static void JumbleQuery(pgssJumbleState * jstate, Query *query);
-static void JumbleRangeTable(pgssJumbleState * jstate, List *rtable);
-static void JumbleExpr(pgssJumbleState * jstate, Node *node);
-static void RecordConstLocation(pgssJumbleState * jstate, int location);
-static char *generate_normalized_query(pgssJumbleState * jstate, const char *query,
+static void JumbleQuery(pgssJumbleState *jstate, Query *query);
+static void JumbleRangeTable(pgssJumbleState *jstate, List *rtable);
+static void JumbleExpr(pgssJumbleState *jstate, Node *node);
+static void RecordConstLocation(pgssJumbleState *jstate, int location);
+static char *generate_normalized_query(pgssJumbleState *jstate, const char *query,
                                                  int *query_len_p, int encoding);
-static void fill_in_constant_lengths(pgssJumbleState * jstate, const char *query);
+static void fill_in_constant_lengths(pgssJumbleState *jstate, const char *query);
 static int     comp_location(const void *a, const void *b);
 
 
@@ -513,8 +513,8 @@ pgss_shmem_startup(void)
        FreeFile(file);
 
        /*
-        * Remove the file so it's not included in backups/replication
-        * slaves, etc. A new file will be written on next shutdown.
+        * Remove the file so it's not included in backups/replication slaves,
+        * etc. A new file will be written on next shutdown.
         */
        unlink(PGSS_DUMP_FILE);
 
@@ -600,7 +600,7 @@ error:
        ereport(LOG,
                        (errcode_for_file_access(),
                         errmsg("could not write pg_stat_statement file \"%s\": %m",
-                                       PGSS_DUMP_FILE  ".tmp")));
+                                       PGSS_DUMP_FILE ".tmp")));
        if (file)
                FreeFile(file);
        unlink(PGSS_DUMP_FILE ".tmp");
@@ -626,8 +626,8 @@ pgss_post_parse_analyze(ParseState *pstate, Query *query)
         * the statement contains an optimizable statement for which a queryId
         * could be derived (such as EXPLAIN or DECLARE CURSOR).  For such cases,
         * runtime control will first go through ProcessUtility and then the
-        * executor, and we don't want the executor hooks to do anything, since
-        * we are already measuring the statement's costs at the utility level.
+        * executor, and we don't want the executor hooks to do anything, since we
+        * are already measuring the statement's costs at the utility level.
         */
        if (query->utilityStmt)
        {
@@ -768,7 +768,7 @@ pgss_ExecutorEnd(QueryDesc *queryDesc)
 
                pgss_store(queryDesc->sourceText,
                                   queryId,
-                                  queryDesc->totaltime->total * 1000.0, /* convert to msec */
+                                  queryDesc->totaltime->total * 1000.0,                /* convert to msec */
                                   queryDesc->estate->es_processed,
                                   &queryDesc->totaltime->bufusage,
                                   NULL);
@@ -789,10 +789,9 @@ pgss_ProcessUtility(Node *parsetree, const char *queryString,
                                        DestReceiver *dest, char *completionTag)
 {
        /*
-        * If it's an EXECUTE statement, we don't track it and don't increment
-        * the nesting level.  This allows the cycles to be charged to the
-        * underlying PREPARE instead (by the Executor hooks), which is much more
-        * useful.
+        * If it's an EXECUTE statement, we don't track it and don't increment the
+        * nesting level.  This allows the cycles to be charged to the underlying
+        * PREPARE instead (by the Executor hooks), which is much more useful.
         *
         * We also don't track execution of PREPARE.  If we did, we would get one
         * hash table entry for the PREPARE (with hash calculated from the query
@@ -942,7 +941,7 @@ static void
 pgss_store(const char *query, uint32 queryId,
                   double total_time, uint64 rows,
                   const BufferUsage *bufusage,
-                  pgssJumbleState * jstate)
+                  pgssJumbleState *jstate)
 {
        pgssHashKey key;
        pgssEntry  *entry;
@@ -1355,7 +1354,7 @@ entry_reset(void)
  * the current jumble.
  */
 static void
-AppendJumble(pgssJumbleState * jstate, const unsigned char *item, Size size)
+AppendJumble(pgssJumbleState *jstate, const unsigned char *item, Size size)
 {
        unsigned char *jumble = jstate->jumble;
        Size            jumble_len = jstate->jumble_len;
@@ -1404,7 +1403,7 @@ AppendJumble(pgssJumbleState * jstate, const unsigned char *item, Size size)
  * of information).
  */
 static void
-JumbleQuery(pgssJumbleState * jstate, Query *query)
+JumbleQuery(pgssJumbleState *jstate, Query *query)
 {
        Assert(IsA(query, Query));
        Assert(query->utilityStmt == NULL);
@@ -1431,7 +1430,7 @@ JumbleQuery(pgssJumbleState * jstate, Query *query)
  * Jumble a range table
  */
 static void
-JumbleRangeTable(pgssJumbleState * jstate, List *rtable)
+JumbleRangeTable(pgssJumbleState *jstate, List *rtable)
 {
        ListCell   *lc;
 
@@ -1485,11 +1484,11 @@ JumbleRangeTable(pgssJumbleState * jstate, List *rtable)
  *
  * Note: the reason we don't simply use expression_tree_walker() is that the
  * point of that function is to support tree walkers that don't care about
- * most tree node types, but here we care about all types.  We should complain
+ * most tree node types, but here we care about all types.     We should complain
  * about any unrecognized node type.
  */
 static void
-JumbleExpr(pgssJumbleState * jstate, Node *node)
+JumbleExpr(pgssJumbleState *jstate, Node *node)
 {
        ListCell   *temp;
 
@@ -1874,7 +1873,7 @@ JumbleExpr(pgssJumbleState * jstate, Node *node)
  * that is currently being walked.
  */
 static void
-RecordConstLocation(pgssJumbleState * jstate, int location)
+RecordConstLocation(pgssJumbleState *jstate, int location)
 {
        /* -1 indicates unknown or undefined location */
        if (location >= 0)
@@ -1909,7 +1908,7 @@ RecordConstLocation(pgssJumbleState * jstate, int location)
  * Returns a palloc'd string, which is not necessarily null-terminated.
  */
 static char *
-generate_normalized_query(pgssJumbleState * jstate, const char *query,
+generate_normalized_query(pgssJumbleState *jstate, const char *query,
                                                  int *query_len_p, int encoding)
 {
        char       *norm_query;
@@ -2007,7 +2006,7 @@ generate_normalized_query(pgssJumbleState * jstate, const char *query,
  * a problem.
  *
  * Duplicate constant pointers are possible, and will have their lengths
- * marked as '-1', so that they are later ignored.  (Actually, we assume the
+ * marked as '-1', so that they are later ignored.     (Actually, we assume the
  * lengths were initialized as -1 to start with, and don't change them here.)
  *
  * N.B. There is an assumption that a '-' character at a Const location begins
@@ -2015,7 +2014,7 @@ generate_normalized_query(pgssJumbleState * jstate, const char *query,
  * reason for a constant to start with a '-'.
  */
 static void
-fill_in_constant_lengths(pgssJumbleState * jstate, const char *query)
+fill_in_constant_lengths(pgssJumbleState *jstate, const char *query)
 {
        pgssLocationLen *locs;
        core_yyscan_t yyscanner;
index 7f92bc88182beedf3bfd1881f90ec5c4d0fcf4ce..9fe2301e419a569d6f6111c606a22c01c2690c9a 100644 (file)
@@ -29,7 +29,7 @@
 
 /* These are macros to avoid timing the function call overhead. */
 #ifndef WIN32
-#define START_TIMER    \
+#define START_TIMER \
 do { \
        alarm_triggered = false; \
        alarm(secs_per_test); \
@@ -37,7 +37,7 @@ do { \
 } while (0)
 #else
 /* WIN32 doesn't support alarm, so we create a thread and sleep there */
-#define START_TIMER    \
+#define START_TIMER \
 do { \
        alarm_triggered = false; \
        if (CreateThread(NULL, 0, process_alarm, NULL, 0, NULL) == \
@@ -55,7 +55,7 @@ do { \
        gettimeofday(&stop_t, NULL); \
        print_elapse(start_t, stop_t, ops); \
 } while (0)
-               
+
 
 static const char *progname;
 
@@ -77,6 +77,7 @@ static void test_sync(int writes_per_op);
 static void test_open_syncs(void);
 static void test_open_sync(const char *msg, int writes_size);
 static void test_file_descriptor_sync(void);
+
 #ifndef WIN32
 static void process_alarm(int sig);
 #else
index 4e43694338bed648fe0b674952465b694e058844..b3f98abe5cb6b1bc37253d00d0ba68b6fd75e543 100644 (file)
@@ -1,7 +1,7 @@
 /*
  *     pg_test_timing.c
- *             tests overhead of timing calls and their monotonicity:  that
- *             they always move forward
+ *             tests overhead of timing calls and their monotonicity:  that
+ *             they always move forward
  */
 
 #include "postgres_fe.h"
@@ -35,8 +35,8 @@ handle_args(int argc, char *argv[])
                {"duration", required_argument, NULL, 'd'},
                {NULL, 0, NULL, 0}
        };
-       int option;                     /* Command line option */
-       int     optindex = 0;   /* used by getopt_long */
+       int                     option;                 /* Command line option */
+       int                     optindex = 0;   /* used by getopt_long */
 
        if (argc > 1)
        {
@@ -87,7 +87,7 @@ handle_args(int argc, char *argv[])
        else
        {
                fprintf(stderr,
-                               "%s: duration must be a positive integer (duration is \"%d\")\n",
+                       "%s: duration must be a positive integer (duration is \"%d\")\n",
                                progname, test_duration);
                fprintf(stderr, "Try \"%s --help\" for more information.\n",
                                progname);
@@ -98,16 +98,22 @@ handle_args(int argc, char *argv[])
 static void
 test_timing(int32 duration)
 {
-       uint64 total_time;
-       int64 time_elapsed = 0;
-       uint64 loop_count = 0;
-       uint64 prev, cur;
-       int32 diff, i, bits, found;
-
-       instr_time start_time, end_time, temp;
+       uint64          total_time;
+       int64           time_elapsed = 0;
+       uint64          loop_count = 0;
+       uint64          prev,
+                               cur;
+       int32           diff,
+                               i,
+                               bits,
+                               found;
+
+       instr_time      start_time,
+                               end_time,
+                               temp;
 
        static int64 histogram[32];
-       char    buf[100];
+       char            buf[100];
 
        total_time = duration > 0 ? duration * 1000000 : 0;
 
@@ -146,7 +152,7 @@ test_timing(int32 duration)
        INSTR_TIME_SUBTRACT(end_time, start_time);
 
        printf("Per loop time including overhead: %0.2f nsec\n",
-                       INSTR_TIME_GET_DOUBLE(end_time) * 1e9 / loop_count);
+                  INSTR_TIME_GET_DOUBLE(end_time) * 1e9 / loop_count);
        printf("Histogram of timing durations:\n");
        printf("%9s: %10s %9s\n", "< usec", "count", "percent");
 
index 57bce012070a49bb2c5e2d45140bffcd8a307669..d59c8eb670b44c4c8b79e6b92baa0f68e6ce4e73 100644 (file)
@@ -199,9 +199,9 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
         * trigram extraction is relatively CPU-expensive.      We must include
         * strategy number because trigram extraction depends on strategy.
         *
-        * The cached structure contains the strategy number, then the input
-        * query (starting at a MAXALIGN boundary), then the TRGM value (also
-        * starting at a MAXALIGN boundary).
+        * The cached structure contains the strategy number, then the input query
+        * (starting at a MAXALIGN boundary), then the TRGM value (also starting
+        * at a MAXALIGN boundary).
         */
        if (cache == NULL ||
                strategy != *((StrategyNumber *) cache) ||
@@ -341,8 +341,7 @@ gtrgm_distance(PG_FUNCTION_ARGS)
        char       *cache = (char *) fcinfo->flinfo->fn_extra;
 
        /*
-        * Cache the generated trigrams across multiple calls with the same
-        * query.
+        * Cache the generated trigrams across multiple calls with the same query.
         */
        if (cache == NULL ||
                VARSIZE(cache) != querysize ||
index 2669c096586743aa2580696915d00f10abaeed3c..eed4a1eba7c7cebf271aa9ba22728f515a65d358 100644 (file)
@@ -168,7 +168,7 @@ issue_warnings(char *sequence_script_file_name)
                                          SYSTEMQUOTE "\"%s/psql\" --echo-queries "
                                          "--set ON_ERROR_STOP=on "
                                          "--no-psqlrc --port %d --username \"%s\" "
-                                         "-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE,
+                                  "-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE,
                                          new_cluster.bindir, new_cluster.port, os_info.user,
                                          sequence_script_file_name, UTILITY_LOG_FILE);
                        unlink(sequence_script_file_name);
@@ -204,7 +204,7 @@ output_completion_banner(char *analyze_script_file_name,
        else
                pg_log(PG_REPORT,
                           "Optimizer statistics and free space information are not transferred\n"
-                          "by pg_upgrade so, once you start the new server, consider running:\n"
+               "by pg_upgrade so, once you start the new server, consider running:\n"
                           "    %s\n\n", analyze_script_file_name);
 
        pg_log(PG_REPORT,
@@ -238,7 +238,8 @@ check_cluster_versions(void)
 
        /*
         * We can't allow downgrading because we use the target pg_dumpall, and
-        * pg_dumpall cannot operate on new database versions, only older versions.
+        * pg_dumpall cannot operate on new database versions, only older
+        * versions.
         */
        if (old_cluster.major_version > new_cluster.major_version)
                pg_log(PG_FATAL, "This utility cannot be used to downgrade to older major PostgreSQL versions.\n");
@@ -402,31 +403,31 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
 #endif
 
        fprintf(script, "echo %sThis script will generate minimal optimizer statistics rapidly%s\n",
-                                       ECHO_QUOTE, ECHO_QUOTE);
+                       ECHO_QUOTE, ECHO_QUOTE);
        fprintf(script, "echo %sso your system is usable, and then gather statistics twice more%s\n",
-                                       ECHO_QUOTE, ECHO_QUOTE);
+                       ECHO_QUOTE, ECHO_QUOTE);
        fprintf(script, "echo %swith increasing accuracy.  When it is done, your system will%s\n",
-                                       ECHO_QUOTE, ECHO_QUOTE);
+                       ECHO_QUOTE, ECHO_QUOTE);
        fprintf(script, "echo %shave the default level of optimizer statistics.%s\n",
-                                       ECHO_QUOTE, ECHO_QUOTE);
+                       ECHO_QUOTE, ECHO_QUOTE);
        fprintf(script, "echo\n\n");
 
        fprintf(script, "echo %sIf you have used ALTER TABLE to modify the statistics target for%s\n",
-                                       ECHO_QUOTE, ECHO_QUOTE);
+                       ECHO_QUOTE, ECHO_QUOTE);
        fprintf(script, "echo %sany tables, you might want to remove them and restore them after%s\n",
-                                       ECHO_QUOTE, ECHO_QUOTE);
+                       ECHO_QUOTE, ECHO_QUOTE);
        fprintf(script, "echo %srunning this script because they will delay fast statistics generation.%s\n",
-                                       ECHO_QUOTE, ECHO_QUOTE);
+                       ECHO_QUOTE, ECHO_QUOTE);
        fprintf(script, "echo\n\n");
 
        fprintf(script, "echo %sIf you would like default statistics as quickly as possible, cancel%s\n",
-                                       ECHO_QUOTE, ECHO_QUOTE);
+                       ECHO_QUOTE, ECHO_QUOTE);
        fprintf(script, "echo %sthis script and run:%s\n",
-                                       ECHO_QUOTE, ECHO_QUOTE);
+                       ECHO_QUOTE, ECHO_QUOTE);
        fprintf(script, "echo %s    vacuumdb --all %s%s\n", ECHO_QUOTE,
-               /* Did we copy the free space files? */
-               (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
-               "--analyze-only" : "--analyze", ECHO_QUOTE);
+       /* Did we copy the free space files? */
+                       (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
+                       "--analyze-only" : "--analyze", ECHO_QUOTE);
        fprintf(script, "echo\n\n");
 
 #ifndef WIN32
@@ -441,15 +442,15 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
 #endif
 
        fprintf(script, "echo %sGenerating minimal optimizer statistics (1 target)%s\n",
-                                       ECHO_QUOTE, ECHO_QUOTE);
+                       ECHO_QUOTE, ECHO_QUOTE);
        fprintf(script, "echo %s--------------------------------------------------%s\n",
-                                       ECHO_QUOTE, ECHO_QUOTE);
+                       ECHO_QUOTE, ECHO_QUOTE);
        fprintf(script, "vacuumdb --all --analyze-only\n");
        fprintf(script, "echo\n");
        fprintf(script, "echo %sThe server is now available with minimal optimizer statistics.%s\n",
-                                       ECHO_QUOTE, ECHO_QUOTE);
+                       ECHO_QUOTE, ECHO_QUOTE);
        fprintf(script, "echo %sQuery performance will be optimal once this script completes.%s\n",
-                                       ECHO_QUOTE, ECHO_QUOTE);
+                       ECHO_QUOTE, ECHO_QUOTE);
        fprintf(script, "echo\n\n");
 
 #ifndef WIN32
@@ -462,9 +463,9 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
 #endif
 
        fprintf(script, "echo %sGenerating medium optimizer statistics (10 targets)%s\n",
-                                       ECHO_QUOTE, ECHO_QUOTE);
+                       ECHO_QUOTE, ECHO_QUOTE);
        fprintf(script, "echo %s---------------------------------------------------%s\n",
-                                       ECHO_QUOTE, ECHO_QUOTE);
+                       ECHO_QUOTE, ECHO_QUOTE);
        fprintf(script, "vacuumdb --all --analyze-only\n");
        fprintf(script, "echo\n\n");
 
@@ -475,17 +476,17 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
 #endif
 
        fprintf(script, "echo %sGenerating default (full) optimizer statistics (100 targets?)%s\n",
-                                       ECHO_QUOTE, ECHO_QUOTE);
+                       ECHO_QUOTE, ECHO_QUOTE);
        fprintf(script, "echo %s-------------------------------------------------------------%s\n",
-                                       ECHO_QUOTE, ECHO_QUOTE);
+                       ECHO_QUOTE, ECHO_QUOTE);
        fprintf(script, "vacuumdb --all %s\n",
-               /* Did we copy the free space files? */
-               (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
-               "--analyze-only" : "--analyze");
+       /* Did we copy the free space files? */
+                       (GET_MAJOR_VERSION(old_cluster.major_version) >= 804) ?
+                       "--analyze-only" : "--analyze");
 
        fprintf(script, "echo\n\n");
        fprintf(script, "echo %sDone%s\n",
-                                       ECHO_QUOTE, ECHO_QUOTE);
+                       ECHO_QUOTE, ECHO_QUOTE);
 
        fclose(script);
 
@@ -716,8 +717,8 @@ check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster)
                pg_log(PG_REPORT, "fatal\n");
                pg_log(PG_FATAL,
                           "Your installation contains \"contrib/isn\" functions which rely on the\n"
-                          "bigint data type.  Your old and new clusters pass bigint values\n"
-                          "differently so this cluster cannot currently be upgraded.  You can\n"
+                 "bigint data type.  Your old and new clusters pass bigint values\n"
+               "differently so this cluster cannot currently be upgraded.  You can\n"
                           "manually upgrade databases that use \"contrib/isn\" facilities and remove\n"
                           "\"contrib/isn\" from the old cluster and restart the upgrade.  A list of\n"
                           "the problem functions is in the file:\n"
@@ -764,9 +765,9 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
                PGconn     *conn = connectToServer(cluster, active_db->db_name);
 
                /*
-                *      While several relkinds don't store any data, e.g. views, they
-                *      can be used to define data types of other columns, so we
-                *      check all relkinds.
+                * While several relkinds don't store any data, e.g. views, they can
+                * be used to define data types of other columns, so we check all
+                * relkinds.
                 */
                res = executeQueryOrDie(conn,
                                                                "SELECT n.nspname, c.relname, a.attname "
@@ -777,16 +778,16 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
                                                                "               NOT a.attisdropped AND "
                                                                "               a.atttypid IN ( "
                  "                     'pg_catalog.regproc'::pg_catalog.regtype, "
-                 "                     'pg_catalog.regprocedure'::pg_catalog.regtype, "
+                                                               "                       'pg_catalog.regprocedure'::pg_catalog.regtype, "
                  "                     'pg_catalog.regoper'::pg_catalog.regtype, "
-                 "                     'pg_catalog.regoperator'::pg_catalog.regtype, "
+                                                               "                       'pg_catalog.regoperator'::pg_catalog.regtype, "
                /* regclass.oid is preserved, so 'regclass' is OK */
                /* regtype.oid is preserved, so 'regtype' is OK */
-                 "                     'pg_catalog.regconfig'::pg_catalog.regtype, "
-                 "                     'pg_catalog.regdictionary'::pg_catalog.regtype) AND "
-                 "             c.relnamespace = n.oid AND "
-                 "             n.nspname != 'pg_catalog' AND "
-                 "             n.nspname != 'information_schema'");
+               "                       'pg_catalog.regconfig'::pg_catalog.regtype, "
+                                                               "                       'pg_catalog.regdictionary'::pg_catalog.regtype) AND "
+                                                               "               c.relnamespace = n.oid AND "
+                                                         "             n.nspname != 'pg_catalog' AND "
+                                                "              n.nspname != 'information_schema'");
 
                ntups = PQntuples(res);
                i_nspname = PQfnumber(res, "nspname");
@@ -822,8 +823,8 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
                pg_log(PG_REPORT, "fatal\n");
                pg_log(PG_FATAL,
                           "Your installation contains one of the reg* data types in user tables.\n"
-                          "These data types reference system OIDs that are not preserved by\n"
-                          "pg_upgrade, so this cluster cannot currently be upgraded.  You can\n"
+                "These data types reference system OIDs that are not preserved by\n"
+               "pg_upgrade, so this cluster cannot currently be upgraded.  You can\n"
                           "remove the problem tables and restart the upgrade.  A list of the problem\n"
                           "columns is in the file:\n"
                           "    %s\n\n", output_path);
@@ -836,9 +837,11 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
 static void
 get_bin_version(ClusterInfo *cluster)
 {
-       char            cmd[MAXPGPATH], cmd_output[MAX_STRING];
+       char            cmd[MAXPGPATH],
+                               cmd_output[MAX_STRING];
        FILE       *output;
-       int                     pre_dot, post_dot;
+       int                     pre_dot,
+                               post_dot;
 
        snprintf(cmd, sizeof(cmd), "\"%s/pg_ctl\" --version", cluster->bindir);
 
@@ -858,4 +861,3 @@ get_bin_version(ClusterInfo *cluster)
 
        cluster->bin_version = (pre_dot * 100 + post_dot) * 100;
 }
-
index e01280db9e284b5527c5d7db0afc481247e1e017..6bffe549e59610846eaa391f63bb956ab05d37f3 100644 (file)
@@ -129,6 +129,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
                pg_log(PG_VERBOSE, "%s", bufin);
 
 #ifdef WIN32
+
                /*
                 * Due to an installer bug, LANG=C doesn't work for PG 8.3.3, but does
                 * work 8.2.6 and 8.3.7, so check for non-ASCII output and suggest a
@@ -506,7 +507,7 @@ check_control_data(ControlData *oldctrl,
                 * This is a common 8.3 -> 8.4 upgrade problem, so we are more verbose
                 */
                pg_log(PG_FATAL,
-                          "You will need to rebuild the new server with configure option\n"
+                       "You will need to rebuild the new server with configure option\n"
                           "--disable-integer-datetimes or get server binaries built with those\n"
                           "options.\n");
        }
@@ -531,6 +532,6 @@ disable_old_cluster(void)
        pg_log(PG_REPORT, "\n"
                   "If you want to start the old cluster, you will need to remove\n"
                   "the \".old\" suffix from %s/global/pg_control.old.\n"
-                  "Because \"link\" mode was used, the old cluster cannot be safely\n"
-                  "started once the new cluster has been started.\n\n", old_cluster.pgdata);
+                "Because \"link\" mode was used, the old cluster cannot be safely\n"
+       "started once the new cluster has been started.\n\n", old_cluster.pgdata);
 }
index 68cf0795aabdb7f734d2528980cf4bbad6a8a46f..9e63bd5856839b1c6f099cdb0b5e53ea34ec19d4 100644 (file)
@@ -18,8 +18,9 @@
 static void check_data_dir(const char *pg_data);
 static void check_bin_dir(ClusterInfo *cluster);
 static void validate_exec(const char *dir, const char *cmdName);
+
 #ifdef WIN32
-static int win32_check_directory_write_permissions(void);
+static int     win32_check_directory_write_permissions(void);
 #endif
 
 
@@ -64,7 +65,7 @@ exec_prog(bool throw_error, bool is_priv,
                pg_log(throw_error ? PG_FATAL : PG_REPORT,
                           "Consult the last few lines of \"%s\" for\n"
                           "the probable cause of the failure.\n",
-                               log_file);
+                          log_file);
                return 1;
        }
 
@@ -142,12 +143,12 @@ verify_directories(void)
 static int
 win32_check_directory_write_permissions(void)
 {
-       int fd;
+       int                     fd;
 
        /*
-        *      We open a file we would normally create anyway.  We do this even in
-        *      'check' mode, which isn't ideal, but this is the best we can do.
-        */     
+        * We open a file we would normally create anyway.      We do this even in
+        * 'check' mode, which isn't ideal, but this is the best we can do.
+        */
        if ((fd = open(GLOBALS_DUMP_FILE, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR)) < 0)
                return -1;
        close(fd);
@@ -184,7 +185,7 @@ check_data_dir(const char *pg_data)
                struct stat statBuf;
 
                snprintf(subDirName, sizeof(subDirName), "%s%s%s", pg_data,
-                       /* Win32 can't stat() a directory with a trailing slash. */
+               /* Win32 can't stat() a directory with a trailing slash. */
                                 *requiredSubdirs[subdirnum] ? "/" : "",
                                 requiredSubdirs[subdirnum]);
 
index 0276636e03fa4208134b0d4aa91c2c037c9c432f..1dd3722142c9e83c1ec228099c3a3fd302a2179b 100644 (file)
@@ -233,7 +233,7 @@ copy_file(const char *srcfile, const char *dstfile, bool force)
  * large number of times.
  */
 int
-load_directory(const char *dirname, struct dirent ***namelist)
+load_directory(const char *dirname, struct dirent *** namelist)
 {
        DIR                *dirdesc;
        struct dirent *direntry;
@@ -251,7 +251,7 @@ load_directory(const char *dirname, struct dirent ***namelist)
                count++;
 
                *namelist = (struct dirent **) realloc((void *) (*namelist),
-                                       (size_t) ((name_num + 1) * sizeof(struct dirent *)));
+                                               (size_t) ((name_num + 1) * sizeof(struct dirent *)));
 
                if (*namelist == NULL)
                {
@@ -314,7 +314,6 @@ win32_pghardlink(const char *src, const char *dst)
        else
                return 0;
 }
-
 #endif
 
 
@@ -322,13 +321,11 @@ win32_pghardlink(const char *src, const char *dst)
 FILE *
 fopen_priv(const char *path, const char *mode)
 {
-       mode_t old_umask = umask(S_IRWXG | S_IRWXO);
-       FILE    *fp;
+       mode_t          old_umask = umask(S_IRWXG | S_IRWXO);
+       FILE       *fp;
 
        fp = fopen(path, mode);
        umask(old_umask);
 
        return fp;
 }
-       
-
index b4b17badb28ce8abfa2f946c33572c01f98fa021..77bd3a0359771498d7b6aaabc6467ba37a7ef55c 100644 (file)
@@ -133,7 +133,7 @@ get_loadable_libraries(void)
        int                     totaltups;
        int                     dbnum;
        bool            found_public_plpython_handler = false;
-       
+
        ress = (PGresult **) pg_malloc(old_cluster.dbarr.ndbs * sizeof(PGresult *));
        totaltups = 0;
 
@@ -144,10 +144,10 @@ get_loadable_libraries(void)
                PGconn     *conn = connectToServer(&old_cluster, active_db->db_name);
 
                /*
-                *      Fetch all libraries referenced in this DB.  We can't exclude
-                *      the "pg_catalog" schema because, while such functions are not
-                *      explicitly dumped by pg_dump, they do reference implicit objects
-                *      that pg_dump does dump, e.g. CREATE LANGUAGE plperl.
+                * Fetch all libraries referenced in this DB.  We can't exclude the
+                * "pg_catalog" schema because, while such functions are not
+                * explicitly dumped by pg_dump, they do reference implicit objects
+                * that pg_dump does dump, e.g. CREATE LANGUAGE plperl.
                 */
                ress[dbnum] = executeQueryOrDie(conn,
                                                                                "SELECT DISTINCT probin "
@@ -158,26 +158,26 @@ get_loadable_libraries(void)
                                                                                FirstNormalObjectId);
                totaltups += PQntuples(ress[dbnum]);
 
-                /*
-                 *     Systems that install plpython before 8.1 have
-                 *     plpython_call_handler() defined in the "public" schema, causing
-                 *     pg_dumpall to dump it.  However that function still references
-                 *     "plpython" (no "2"), so it throws an error on restore.  This code
-                 *     checks for the problem function, reports affected databases to the
-                 *     user and explains how to remove them.
-                 *     8.1 git commit: e0dedd0559f005d60c69c9772163e69c204bac69
-                 *     http://archives.postgresql.org/pgsql-hackers/2012-03/msg01101.php
-                 *     http://archives.postgresql.org/pgsql-bugs/2012-05/msg00206.php
-                 */
+               /*
+                * Systems that install plpython before 8.1 have
+                * plpython_call_handler() defined in the "public" schema, causing
+                * pg_dumpall to dump it.  However that function still references
+                * "plpython" (no "2"), so it throws an error on restore.  This code
+                * checks for the problem function, reports affected databases to the
+                * user and explains how to remove them. 8.1 git commit:
+                * e0dedd0559f005d60c69c9772163e69c204bac69
+                * http://archives.postgresql.org/pgsql-hackers/2012-03/msg01101.php
+                * http://archives.postgresql.org/pgsql-bugs/2012-05/msg00206.php
+                */
                if (GET_MAJOR_VERSION(old_cluster.major_version) < 901)
                {
-                       PGresult  *res;
+                       PGresult   *res;
 
                        res = executeQueryOrDie(conn,
                                                                        "SELECT 1 "
-                                                                       "FROM   pg_catalog.pg_proc JOIN pg_namespace "
-                                                                       "               ON pronamespace = pg_namespace.oid "
-                                                                       "WHERE proname = 'plpython_call_handler' AND "
+                                                  "FROM        pg_catalog.pg_proc JOIN pg_namespace "
+                                                        "              ON pronamespace = pg_namespace.oid "
+                                                          "WHERE proname = 'plpython_call_handler' AND "
                                                                        "nspname = 'public' AND "
                                                                        "prolang = 13 /* C */ AND "
                                                                        "probin = '$libdir/plpython' AND "
@@ -188,23 +188,23 @@ get_loadable_libraries(void)
                                if (!found_public_plpython_handler)
                                {
                                        pg_log(PG_WARNING,
-                  "\nThe old cluster has a \"plpython_call_handler\" function defined\n"
-                       "in the \"public\" schema which is a duplicate of the one defined\n"
-                   "in the \"pg_catalog\" schema.  You can confirm this by executing\n"
-                       "in psql:\n"
-                       "\n"
-                       "       \\df *.plpython_call_handler\n"
-                       "\n"
-                       "The \"public\" schema version of this function was created by a\n"
-                       "pre-8.1 install of plpython, and must be removed for pg_upgrade\n"
-                       "to complete because it references a now-obsolete \"plpython\"\n"
-                       "shared object file.  You can remove the \"public\" schema version\n"
-                       "of this function by running the following command:\n"
-                       "\n"
-                             DROP FUNCTION public.plpython_call_handler()\n"
-                       "\n"
-                       "in each affected database:\n"
-                       "\n");
+                                                  "\nThe old cluster has a \"plpython_call_handler\" function defined\n"
+                                                  "in the \"public\" schema which is a duplicate of the one defined\n"
+                                                  "in the \"pg_catalog\" schema.  You can confirm this by executing\n"
+                                                  "in psql:\n"
+                                                  "\n"
+                                                  "    \\df *.plpython_call_handler\n"
+                                                  "\n"
+                                                  "The \"public\" schema version of this function was created by a\n"
+                                                  "pre-8.1 install of plpython, and must be removed for pg_upgrade\n"
+                                                  "to complete because it references a now-obsolete \"plpython\"\n"
+                                                  "shared object file.  You can remove the \"public\" schema version\n"
+                                          "of this function by running the following command:\n"
+                                                  "\n"
+                                                "      DROP FUNCTION public.plpython_call_handler()\n"
+                                                  "\n"
+                                                  "in each affected database:\n"
+                                                  "\n");
                                }
                                pg_log(PG_WARNING, "    %s\n", active_db->db_name);
                                found_public_plpython_handler = true;
@@ -217,9 +217,9 @@ get_loadable_libraries(void)
 
        if (found_public_plpython_handler)
                pg_log(PG_FATAL,
-                  "Remove the problem functions from the old cluster to continue.\n");
-       
-       totaltups++;    /* reserve for pg_upgrade_support */
+                "Remove the problem functions from the old cluster to continue.\n");
+
+       totaltups++;                            /* reserve for pg_upgrade_support */
 
        /* Allocate what's certainly enough space */
        os_info.libraries = (char **) pg_malloc(totaltups * sizeof(char *));
@@ -293,17 +293,17 @@ check_loadable_libraries(void)
                PGresult   *res;
 
                /*
-                *      In Postgres 9.0, Python 3 support was added, and to do that, a
-                *      plpython2u language was created with library name plpython2.so
-                *      as a symbolic link to plpython.so.  In Postgres 9.1, only the
-                *      plpython2.so library was created, and both plpythonu and
-                *      plpython2u pointing to it.  For this reason, any reference to
-                *      library name "plpython" in an old PG <= 9.1 cluster must look
-                *      for "plpython2" in the new cluster.
+                * In Postgres 9.0, Python 3 support was added, and to do that, a
+                * plpython2u language was created with library name plpython2.so as a
+                * symbolic link to plpython.so.  In Postgres 9.1, only the
+                * plpython2.so library was created, and both plpythonu and plpython2u
+                * pointing to it.      For this reason, any reference to library name
+                * "plpython" in an old PG <= 9.1 cluster must look for "plpython2" in
+                * the new cluster.
                 *
-                *      For this case, we could check pg_pltemplate, but that only works
-                *      for languages, and does not help with function shared objects,
-                *      so we just do a general fix.
+                * For this case, we could check pg_pltemplate, but that only works
+                * for languages, and does not help with function shared objects, so
+                * we just do a general fix.
                 */
                if (GET_MAJOR_VERSION(old_cluster.major_version) < 901 &&
                        strcmp(lib, "$libdir/plpython") == 0)
@@ -325,7 +325,7 @@ check_loadable_libraries(void)
                        /* exit and report missing support library with special message */
                        if (strcmp(lib, PG_UPGRADE_SUPPORT) == 0)
                                pg_log(PG_FATAL,
-                                  "The pg_upgrade_support module must be created and installed in the new cluster.\n");
+                                          "The pg_upgrade_support module must be created and installed in the new cluster.\n");
 
                        if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
                                pg_log(PG_FATAL, "Could not open file \"%s\": %s\n",
index 5b2b9eb28c18a126b15d9df65d2b17d1d39c7ab6..74b13e782da28e17788f7b44061269cb6acf17fc 100644 (file)
@@ -57,12 +57,12 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
                                   old_db->db_name, old_rel->reloid, new_rel->reloid);
 
                /*
-                * TOAST table names initially match the heap pg_class oid.
-                * In pre-8.4, TOAST table names change during CLUSTER; in pre-9.0,
-                * TOAST table names change during ALTER TABLE ALTER COLUMN SET TYPE.
-                * In >= 9.0, TOAST relation names always use heap table oids, hence
-                * we cannot check relation names when upgrading from pre-9.0.
-                * Clusters upgraded to 9.0 will get matching TOAST names.
+                * TOAST table names initially match the heap pg_class oid. In
+                * pre-8.4, TOAST table names change during CLUSTER; in pre-9.0, TOAST
+                * table names change during ALTER TABLE ALTER COLUMN SET TYPE. In >=
+                * 9.0, TOAST relation names always use heap table oids, hence we
+                * cannot check relation names when upgrading from pre-9.0. Clusters
+                * upgraded to 9.0 will get matching TOAST names.
                 */
                if (strcmp(old_rel->nspname, new_rel->nspname) != 0 ||
                        ((GET_MAJOR_VERSION(old_cluster.major_version) >= 900 ||
@@ -194,16 +194,16 @@ get_db_infos(ClusterInfo *cluster)
        char            query[QUERY_ALLOC];
 
        snprintf(query, sizeof(query),
-                       "SELECT d.oid, d.datname, %s "
-                       "FROM pg_catalog.pg_database d "
-                       " LEFT OUTER JOIN pg_catalog.pg_tablespace t "
-                       " ON d.dattablespace = t.oid "
-                       "WHERE d.datallowconn = true "
+                        "SELECT d.oid, d.datname, %s "
+                        "FROM pg_catalog.pg_database d "
+                        " LEFT OUTER JOIN pg_catalog.pg_tablespace t "
+                        " ON d.dattablespace = t.oid "
+                        "WHERE d.datallowconn = true "
        /* we don't preserve pg_database.oid so we sort by name */
-                       "ORDER BY 2",
+                        "ORDER BY 2",
        /* 9.2 removed the spclocation column */
-                       (GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
-                       "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");
+                        (GET_MAJOR_VERSION(cluster->major_version) <= 901) ?
+                        "t.spclocation" : "pg_catalog.pg_tablespace_location(t.oid) AS spclocation");
 
        res = executeQueryOrDie(conn, "%s", query);
 
@@ -276,7 +276,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
                         "  LEFT OUTER JOIN pg_catalog.pg_tablespace t "
                         "         ON c.reltablespace = t.oid "
                         "WHERE relkind IN ('r','t', 'i'%s) AND "
-                        /* exclude possible orphaned temp tables */
+       /* exclude possible orphaned temp tables */
                         "  ((n.nspname !~ '^pg_temp_' AND "
                         "    n.nspname !~ '^pg_toast_temp_' AND "
                         "    n.nspname NOT IN ('pg_catalog', 'information_schema', 'binary_upgrade') AND "
index 66a70cac8ac983243a9d8fd6eb391e6af90326eb..ccf00434d3b610238900fa372360a8dcdcbe70b4 100644 (file)
@@ -56,10 +56,10 @@ parseCommandLine(int argc, char *argv[])
        int                     option;                 /* Command line option */
        int                     optindex = 0;   /* used by getopt_long */
        int                     os_user_effective_id;
-       FILE            *fp;
-       char            **filename;
+       FILE       *fp;
+       char      **filename;
        time_t          run_time = time(NULL);
-       
+
        user_opts.transfer_mode = TRANSFER_MODE_COPY;
 
        os_info.progname = get_progname(argv[0]);
@@ -138,11 +138,11 @@ parseCommandLine(int argc, char *argv[])
                                new_cluster.pgopts = pg_strdup(optarg);
                                break;
 
-                       /*
-                        * Someday, the port number option could be removed and
-                        * passed using -o/-O, but that requires postmaster -C
-                        * to be supported on all old/new versions.
-                        */
+                               /*
+                                * Someday, the port number option could be removed and passed
+                                * using -o/-O, but that requires postmaster -C to be
+                                * supported on all old/new versions.
+                                */
                        case 'p':
                                if ((old_cluster.port = atoi(optarg)) <= 0)
                                {
@@ -196,21 +196,21 @@ parseCommandLine(int argc, char *argv[])
                /* Start with newline because we might be appending to a file. */
                fprintf(fp, "\n"
                "-----------------------------------------------------------------\n"
-               "  pg_upgrade run on %s"
-               "-----------------------------------------------------------------\n\n",
-               ctime(&run_time));
+                               "  pg_upgrade run on %s"
+                               "-----------------------------------------------------------------\n\n",
+                               ctime(&run_time));
                fclose(fp);
        }
 
        /* Get values from env if not already set */
        check_required_directory(&old_cluster.bindir, "PGBINOLD", "-b",
-                                                       "old cluster binaries reside");
+                                                        "old cluster binaries reside");
        check_required_directory(&new_cluster.bindir, "PGBINNEW", "-B",
-                                                       "new cluster binaries reside");
+                                                        "new cluster binaries reside");
        check_required_directory(&old_cluster.pgdata, "PGDATAOLD", "-d",
-                                                       "old cluster data resides");
+                                                        "old cluster data resides");
        check_required_directory(&new_cluster.pgdata, "PGDATANEW", "-D",
-                                                       "new cluster data resides");
+                                                        "new cluster data resides");
 }
 
 
@@ -285,7 +285,7 @@ or\n"), old_cluster.port, new_cluster.port, os_info.user);
  */
 static void
 check_required_directory(char **dirpath, char *envVarName,
-                                               char *cmdLineOption, char *description)
+                                                char *cmdLineOption, char *description)
 {
        if (*dirpath == NULL || strlen(*dirpath) == 0)
        {
@@ -322,8 +322,10 @@ void
 adjust_data_dir(ClusterInfo *cluster)
 {
        char            filename[MAXPGPATH];
-       char            cmd[MAXPGPATH], cmd_output[MAX_STRING];
-       FILE       *fp, *output;
+       char            cmd[MAXPGPATH],
+                               cmd_output[MAX_STRING];
+       FILE       *fp,
+                          *output;
 
        /* If there is no postgresql.conf, it can't be a config-only dir */
        snprintf(filename, sizeof(filename), "%s/postgresql.conf", cluster->pgconfig);
@@ -345,10 +347,9 @@ adjust_data_dir(ClusterInfo *cluster)
                                CLUSTER_NAME(cluster));
 
        /*
-        * We don't have a data directory yet, so we can't check the PG
-        * version, so this might fail --- only works for PG 9.2+.   If this
-        * fails, pg_upgrade will fail anyway because the data files will not
-        * be found.
+        * We don't have a data directory yet, so we can't check the PG version,
+        * so this might fail --- only works for PG 9.2+.       If this fails,
+        * pg_upgrade will fail anyway because the data files will not be found.
         */
        snprintf(cmd, sizeof(cmd), "\"%s/postmaster\" -D \"%s\" -C data_directory",
                         cluster->bindir, cluster->pgconfig);
@@ -356,7 +357,7 @@ adjust_data_dir(ClusterInfo *cluster)
        if ((output = popen(cmd, "r")) == NULL ||
                fgets(cmd_output, sizeof(cmd_output), output) == NULL)
                pg_log(PG_FATAL, "Could not get data directory using %s: %s\n",
-               cmd, getErrorText(errno));
+                          cmd, getErrorText(errno));
 
        pclose(output);
 
index 465ecdd6b324275293044646b9fcf49a6d180600..3537fc2bd05bbd9b9c7dfda6028826eb376485a9 100644 (file)
@@ -55,7 +55,7 @@ ClusterInfo old_cluster,
                        new_cluster;
 OSInfo         os_info;
 
-char *output_files[] = {
+char      *output_files[] = {
        SERVER_LOG_FILE,
 #ifdef WIN32
        /* unique file for pg_ctl start */
@@ -122,11 +122,10 @@ main(int argc, char **argv)
        stop_postmaster(false);
 
        /*
-        *      Most failures happen in create_new_objects(), which has
-        *      completed at this point.  We do this here because it is just
-        *      before linking, which will link the old and new cluster data
-        *      files, preventing the old cluster from being safely started
-        *      once the new cluster is started.
+        * Most failures happen in create_new_objects(), which has completed at
+        * this point.  We do this here because it is just before linking, which
+        * will link the old and new cluster data files, preventing the old
+        * cluster from being safely started once the new cluster is started.
         */
        if (user_opts.transfer_mode == TRANSFER_MODE_LINK)
                disable_old_cluster();
@@ -215,8 +214,8 @@ prepare_new_cluster(void)
        exec_prog(true, true, UTILITY_LOG_FILE,
                          SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
                          "--all --analyze %s >> \"%s\" 2>&1" SYSTEMQUOTE,
-         new_cluster.bindir, new_cluster.port, os_info.user,
-         log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
+                         new_cluster.bindir, new_cluster.port, os_info.user,
+                         log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
        check_ok();
 
        /*
@@ -229,8 +228,8 @@ prepare_new_cluster(void)
        exec_prog(true, true, UTILITY_LOG_FILE,
                          SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --username \"%s\" "
                          "--all --freeze %s >> \"%s\" 2>&1" SYSTEMQUOTE,
-         new_cluster.bindir, new_cluster.port, os_info.user,
-         log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
+                         new_cluster.bindir, new_cluster.port, os_info.user,
+                         log_opts.verbose ? "--verbose" : "", UTILITY_LOG_FILE);
        check_ok();
 
        get_pg_database_relfilenode(&new_cluster);
@@ -252,8 +251,8 @@ prepare_new_databases(void)
 
        /*
         * Install support functions in the global-object restore database to
-        * preserve pg_authid.oid.  pg_dumpall uses 'template0' as its template
-        * database so objects we add into 'template1' are not propogated.  They
+        * preserve pg_authid.oid.      pg_dumpall uses 'template0' as its template
+        * database so objects we add into 'template1' are not propogated.      They
         * are removed on pg_upgrade exit.
         */
        install_support_functions_in_new_db("template1");
@@ -267,7 +266,7 @@ prepare_new_databases(void)
        exec_prog(true, true, RESTORE_LOG_FILE,
                          SYSTEMQUOTE "\"%s/psql\" --echo-queries "
                          "--set ON_ERROR_STOP=on "
-                         /* --no-psqlrc prevents AUTOCOMMIT=off */
+       /* --no-psqlrc prevents AUTOCOMMIT=off */
                          "--no-psqlrc --port %d --username \"%s\" "
                          "-f \"%s\" --dbname template1 >> \"%s\" 2>&1" SYSTEMQUOTE,
                          new_cluster.bindir, new_cluster.port, os_info.user,
@@ -453,13 +452,13 @@ set_frozenxids(void)
 static void
 cleanup(void)
 {
-       
+
        fclose(log_opts.internal);
 
        /* Remove dump and log files? */
        if (!log_opts.retain)
        {
-               char            **filename;
+               char      **filename;
 
                for (filename = output_files; *filename != NULL; filename++)
                        unlink(*filename);
index 26aa7bb1d24732f6a1528c662dd4de71dd566e1d..d12590ac6ba294d4d76551b6d5f7726fbcde17ef 100644 (file)
@@ -75,7 +75,7 @@ extern char *output_files[];
 #define RM_CMD                         "rm -f"
 #define RMDIR_CMD                      "rm -rf"
 #define SCRIPT_EXT                     "sh"
-#define        ECHO_QUOTE      "'"
+#define ECHO_QUOTE     "'"
 #else
 #define pg_copy_file           CopyFile
 #define pg_mv_file                     pgrename
@@ -85,7 +85,7 @@ extern char *output_files[];
 #define RMDIR_CMD                      "RMDIR /s/q"
 #define SCRIPT_EXT                     "bat"
 #define EXE_EXT                                ".exe"
-#define        ECHO_QUOTE      ""
+#define ECHO_QUOTE     ""
 #endif
 
 #define CLUSTER_NAME(cluster)  ((cluster) == &old_cluster ? "old" : \
@@ -98,7 +98,7 @@ extern char *output_files[];
 /* postmaster/postgres -b (binary_upgrade) flag added during PG 9.1 development */
 #define BINARY_UPGRADE_SERVER_FLAG_CAT_VER 201104251
 /*
- *     Visibility map changed with this 9.2 commit,
+ *     Visibility map changed with this 9.2 commit,
  *     8f9fe6edce358f7904e0db119416b4d1080a83aa; pick later catalog version.
  */
 #define VISIBILITY_MAP_CRASHSAFE_CAT_VER 201107031
@@ -114,7 +114,7 @@ typedef struct
        Oid                     reloid;                 /* relation oid */
        Oid                     relfilenode;    /* relation relfile node */
        /* relation tablespace path, or "" for the cluster default */
-       char            tablespace[MAXPGPATH];  
+       char            tablespace[MAXPGPATH];
 } RelInfo;
 
 typedef struct
@@ -222,9 +222,11 @@ typedef struct
        ControlData controldata;        /* pg_control information */
        DbInfoArr       dbarr;                  /* dbinfos array */
        char       *pgdata;                     /* pathname for cluster's $PGDATA directory */
-       char       *pgconfig;           /* pathname for cluster's config file directory */
+       char       *pgconfig;           /* pathname for cluster's config file
+                                                                * directory */
        char       *bindir;                     /* pathname for cluster's executable directory */
-       char       *pgopts;                     /* options to pass to the server, like pg_ctl -o */
+       char       *pgopts;                     /* options to pass to the server, like pg_ctl
+                                                                * -o */
        unsigned short port;            /* port number where postmaster is waiting */
        uint32          major_version;  /* PG_VERSION of cluster */
        char            major_version_str[64];  /* string PG_VERSION of cluster */
@@ -291,8 +293,8 @@ void check_old_cluster(bool live_check,
 void           check_new_cluster(void);
 void           report_clusters_compatible(void);
 void           issue_warnings(char *sequence_script_file_name);
-void           output_completion_banner(char *analyze_script_file_name,
-                                                                        char *deletion_script_file_name);
+void output_completion_banner(char *analyze_script_file_name,
+                                                char *deletion_script_file_name);
 void           check_cluster_versions(void);
 void           check_cluster_compatibility(bool live_check);
 void           create_script_for_old_cluster_deletion(char **deletion_script_file_name);
@@ -314,9 +316,10 @@ void               split_old_dump(void);
 
 /* exec.c */
 
-int exec_prog(bool throw_error, bool is_priv,
-       const char *log_file, const char *cmd, ...)
-       __attribute__((format(PG_PRINTF_ATTRIBUTE, 4, 5)));
+int
+exec_prog(bool throw_error, bool is_priv,
+                 const char *log_file, const char *cmd,...)
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 4, 5)));
 void           verify_directories(void);
 bool           is_server_running(const char *datadir);
 
@@ -353,14 +356,14 @@ const char *setupPageConverter(pageCnvCtx **result);
 typedef void *pageCnvCtx;
 #endif
 
-int load_directory(const char *dirname, struct dirent ***namelist);
+int                    load_directory(const char *dirname, struct dirent *** namelist);
 const char *copyAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
                                  const char *dst, bool force);
 const char *linkAndUpdateFile(pageCnvCtx *pageConverter, const char *src,
                                  const char *dst);
 
 void           check_hard_link(void);
-FILE      *fopen_priv(const char *path, const char *mode);
+FILE      *fopen_priv(const char *path, const char *mode);
 
 /* function.c */
 
@@ -399,8 +402,9 @@ void                init_tablespaces(void);
 /* server.c */
 
 PGconn    *connectToServer(ClusterInfo *cluster, const char *db_name);
-PGresult   *executeQueryOrDie(PGconn *conn, const char *fmt, ...)
-       __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
+PGresult *
+executeQueryOrDie(PGconn *conn, const char *fmt,...)
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
 
 void           start_postmaster(ClusterInfo *cluster);
 void           stop_postmaster(bool fast);
@@ -413,12 +417,15 @@ void              check_pghost_envvar(void);
 char      *quote_identifier(const char *s);
 int                    get_user_info(char **user_name);
 void           check_ok(void);
-void           report_status(eLogType type, const char *fmt, ...)
-       __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
-void           pg_log(eLogType type, char *fmt, ...)
-       __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
-void           prep_status(const char *fmt, ...)
-       __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+void
+report_status(eLogType type, const char *fmt,...)
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
+void
+pg_log(eLogType type, char *fmt,...)
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
+void
+prep_status(const char *fmt,...)
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
 void           check_ok(void);
 char      *pg_strdup(const char *s);
 void      *pg_malloc(int size);
index 45d6c5415bf15a92a16f9ae9d3938408a7c40b47..3509585de7b568af921d09443d077d7ed3b3bf47 100644 (file)
@@ -34,26 +34,28 @@ const char *
 transfer_all_new_dbs(DbInfoArr *old_db_arr,
                                   DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata)
 {
-       int                     old_dbnum, new_dbnum;
+       int                     old_dbnum,
+                               new_dbnum;
        const char *msg = NULL;
 
        prep_status("%s user relation files\n",
-               user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying");
+         user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying");
 
        /* Scan the old cluster databases and transfer their files */
        for (old_dbnum = new_dbnum = 0;
                 old_dbnum < old_db_arr->ndbs;
                 old_dbnum++, new_dbnum++)
        {
-               DbInfo     *old_db = &old_db_arr->dbs[old_dbnum], *new_db = NULL;
+               DbInfo     *old_db = &old_db_arr->dbs[old_dbnum],
+                                  *new_db = NULL;
                FileNameMap *mappings;
                int                     n_maps;
                pageCnvCtx *pageConverter = NULL;
 
                /*
-                *      Advance past any databases that exist in the new cluster
-                *      but not in the old, e.g. "postgres".  (The user might
-                *      have removed the 'postgres' database from the old cluster.)
+                * Advance past any databases that exist in the new cluster but not in
+                * the old, e.g. "postgres".  (The user might have removed the
+                * 'postgres' database from the old cluster.)
                 */
                for (; new_dbnum < new_db_arr->ndbs; new_dbnum++)
                {
@@ -83,8 +85,8 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr,
                }
        }
 
-       prep_status(" ");                       /* in case nothing printed; pass a space so gcc
-                                                                * doesn't complain about empty format
+       prep_status(" ");                       /* in case nothing printed; pass a space so
+                                                                * gcc doesn't complain about empty format
                                                                 * string */
        check_ok();
 
@@ -137,14 +139,14 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
        int                     mapnum;
        int                     fileno;
        bool            vm_crashsafe_change = false;
-       
+
        old_dir[0] = '\0';
 
        /* Do not copy non-crashsafe vm files for binaries that assume crashsafety */
        if (old_cluster.controldata.cat_ver < VISIBILITY_MAP_CRASHSAFE_CAT_VER &&
                new_cluster.controldata.cat_ver >= VISIBILITY_MAP_CRASHSAFE_CAT_VER)
                vm_crashsafe_change = true;
-       
+
        for (mapnum = 0; mapnum < size; mapnum++)
        {
                char            old_file[MAXPGPATH];
@@ -190,8 +192,8 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
 
                        for (fileno = 0; fileno < numFiles; fileno++)
                        {
-                               char *vm_offset = strstr(namelist[fileno]->d_name, "_vm");
-                               bool is_vm_file = false;
+                               char       *vm_offset = strstr(namelist[fileno]->d_name, "_vm");
+                               bool            is_vm_file = false;
 
                                /* Is a visibility map file? (name ends with _vm) */
                                if (vm_offset && strlen(vm_offset) == strlen("_vm"))
index f557453df2d92e0ff946588eacaa1fae5962ca1e..f83d6fa866cc87e142a11a2289ff938c8c2f1ab6 100644 (file)
@@ -161,7 +161,7 @@ start_postmaster(ClusterInfo *cluster)
        snprintf(cmd, sizeof(cmd),
                         SYSTEMQUOTE "\"%s/pg_ctl\" -w -l \"%s\" -D \"%s\" "
                         "-o \"-p %d %s %s\" start >> \"%s\" 2>&1" SYSTEMQUOTE,
-                        cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port,
+                 cluster->bindir, SERVER_LOG_FILE, cluster->pgconfig, cluster->port,
                         (cluster->controldata.cat_ver >=
                          BINARY_UPGRADE_SERVER_FLAG_CAT_VER) ? "-b" :
                         "-c autovacuum=off -c autovacuum_freeze_max_age=2000000000",
@@ -172,11 +172,11 @@ start_postmaster(ClusterInfo *cluster)
         * it might supply a reason for the failure.
         */
        pg_ctl_return = exec_prog(false, true,
-                                       /* pass both file names if the differ */
-                                       (strcmp(SERVER_LOG_FILE, SERVER_START_LOG_FILE) == 0) ?
-                                               SERVER_LOG_FILE :
-                                               SERVER_LOG_FILE " or " SERVER_START_LOG_FILE,
-                                       "%s", cmd);
+       /* pass both file names if the differ */
+                                         (strcmp(SERVER_LOG_FILE, SERVER_START_LOG_FILE) == 0) ?
+                                                         SERVER_LOG_FILE :
+                                                         SERVER_LOG_FILE " or " SERVER_START_LOG_FILE,
+                                                         "%s", cmd);
 
        /* Check to see if we can connect to the server; if not, report it. */
        if ((conn = get_db_conn(cluster, "template1")) == NULL ||
@@ -211,14 +211,14 @@ stop_postmaster(bool fast)
        else if (os_info.running_cluster == &new_cluster)
                cluster = &new_cluster;
        else
-               return;         /* no cluster running */
+               return;                                 /* no cluster running */
 
        snprintf(cmd, sizeof(cmd),
                         SYSTEMQUOTE "\"%s/pg_ctl\" -w -D \"%s\" -o \"%s\" "
                         "%s stop >> \"%s\" 2>&1" SYSTEMQUOTE,
                         cluster->bindir, cluster->pgconfig,
                         cluster->pgopts ? cluster->pgopts : "",
-                       fast ? "-m fast" : "", SERVER_STOP_LOG_FILE);
+                        fast ? "-m fast" : "", SERVER_STOP_LOG_FILE);
 
        exec_prog(fast ? false : true, true, SERVER_STOP_LOG_FILE, "%s", cmd);
 
index 6b61f4bac11cc320b106ea65adc7152016be4820..b783b6251e6e248f01953fcd0ade964044505363 100644 (file)
@@ -52,8 +52,8 @@ get_tablespace_paths(void)
                         "WHERE spcname != 'pg_default' AND "
                         "              spcname != 'pg_global'",
        /* 9.2 removed the spclocation column */
-                       (GET_MAJOR_VERSION(old_cluster.major_version) <= 901) ?
-                       "spclocation" : "pg_catalog.pg_tablespace_location(oid) AS spclocation");
+                        (GET_MAJOR_VERSION(old_cluster.major_version) <= 901) ?
+       "spclocation" : "pg_catalog.pg_tablespace_location(oid) AS spclocation");
 
        res = executeQueryOrDie(conn, "%s", query);
 
index 542425c7c9eff3d761f7839904fcfab7ac90478f..b681c0984e6e9d0bee6c0a561907d6efbbc3a1a4 100644 (file)
@@ -60,10 +60,10 @@ old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster)
                                                                "               NOT a.attisdropped AND "
                                                                "               a.atttypid = 'pg_catalog.name'::pg_catalog.regtype AND "
                                                                "               c.relnamespace = n.oid AND "
-                                                                /* exclude possible orphaned temp tables */
+               /* exclude possible orphaned temp tables */
                                                                "               n.nspname !~ '^pg_temp_' AND "
-                                                               "               n.nspname !~ '^pg_toast_temp_' AND "
-                                                               "               n.nspname NOT IN ('pg_catalog', 'information_schema')");
+                                                "              n.nspname !~ '^pg_toast_temp_' AND "
+                                                               "               n.nspname NOT IN ('pg_catalog', 'information_schema')");
 
                ntups = PQntuples(res);
                i_nspname = PQfnumber(res, "nspname");
@@ -98,9 +98,9 @@ old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster)
                pg_log(PG_REPORT, "fatal\n");
                pg_log(PG_FATAL,
                           "Your installation contains the \"name\" data type in user tables.  This\n"
-                          "data type changed its internal alignment between your old and new\n"
+               "data type changed its internal alignment between your old and new\n"
                           "clusters so this cluster cannot currently be upgraded.  You can remove\n"
-                          "the problem tables and restart the upgrade.  A list of the problem\n"
+               "the problem tables and restart the upgrade.  A list of the problem\n"
                           "columns is in the file:\n"
                           "    %s\n\n", output_path);
        }
@@ -150,10 +150,10 @@ old_8_3_check_for_tsquery_usage(ClusterInfo *cluster)
                                                                "               NOT a.attisdropped AND "
                                                                "               a.atttypid = 'pg_catalog.tsquery'::pg_catalog.regtype AND "
                                                                "               c.relnamespace = n.oid AND "
-                                                                /* exclude possible orphaned temp tables */
+               /* exclude possible orphaned temp tables */
                                                                "               n.nspname !~ '^pg_temp_' AND "
-                                                               "               n.nspname !~ '^pg_toast_temp_' AND "
-                                                               "               n.nspname NOT IN ('pg_catalog', 'information_schema')");
+                                                "              n.nspname !~ '^pg_toast_temp_' AND "
+                                                               "               n.nspname NOT IN ('pg_catalog', 'information_schema')");
 
                ntups = PQntuples(res);
                i_nspname = PQfnumber(res, "nspname");
@@ -189,7 +189,7 @@ old_8_3_check_for_tsquery_usage(ClusterInfo *cluster)
                pg_log(PG_FATAL,
                           "Your installation contains the \"tsquery\" data type.    This data type\n"
                           "added a new internal field between your old and new clusters so this\n"
-                          "cluster cannot currently be upgraded.  You can remove the problem\n"
+               "cluster cannot currently be upgraded.  You can remove the problem\n"
                           "columns and restart the upgrade.  A list of the problem columns is in the\n"
                           "file:\n"
                           "    %s\n\n", output_path);
@@ -328,10 +328,10 @@ old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode)
                                                                "               NOT a.attisdropped AND "
                                                                "               a.atttypid = 'pg_catalog.tsvector'::pg_catalog.regtype AND "
                                                                "               c.relnamespace = n.oid AND "
-                                                                /* exclude possible orphaned temp tables */
+               /* exclude possible orphaned temp tables */
                                                                "               n.nspname !~ '^pg_temp_' AND "
-                                                               "               n.nspname !~ '^pg_toast_temp_' AND "
-                                                               "               n.nspname NOT IN ('pg_catalog', 'information_schema')");
+                                                "              n.nspname !~ '^pg_toast_temp_' AND "
+                                                               "               n.nspname NOT IN ('pg_catalog', 'information_schema')");
 
 /*
  *     This macro is used below to avoid reindexing indexes already rebuilt
@@ -527,7 +527,7 @@ old_8_3_invalidate_hash_gin_indexes(ClusterInfo *cluster, bool check_mode)
                                   "must be reindexed with the REINDEX command.  The file:\n"
                                   "    %s\n"
                                   "when executed by psql by the database superuser will recreate all invalid\n"
-                                  "indexes; until then, none of these indexes will be used.\n\n",
+                         "indexes; until then, none of these indexes will be used.\n\n",
                                   output_path);
        }
        else
@@ -648,10 +648,10 @@ old_8_3_invalidate_bpchar_pattern_ops_indexes(ClusterInfo *cluster,
                        pg_log(PG_WARNING, "\n"
                                   "Your installation contains indexes using \"bpchar_pattern_ops\".  These\n"
                                   "indexes have different internal formats between your old and new clusters\n"
-                                  "so they must be reindexed with the REINDEX command.  The file:\n"
+                       "so they must be reindexed with the REINDEX command.  The file:\n"
                                   "    %s\n"
                                   "when executed by psql by the database superuser will recreate all invalid\n"
-                                  "indexes; until then, none of these indexes will be used.\n\n",
+                         "indexes; until then, none of these indexes will be used.\n\n",
                                   output_path);
        }
        else
@@ -699,10 +699,10 @@ old_8_3_create_sequence_script(ClusterInfo *cluster)
                                                                "               pg_catalog.pg_namespace n "
                                                                "WHERE  c.relkind = 'S' AND "
                                                                "               c.relnamespace = n.oid AND "
-                                                                /* exclude possible orphaned temp tables */
+               /* exclude possible orphaned temp tables */
                                                                "               n.nspname !~ '^pg_temp_' AND "
-                                                               "               n.nspname !~ '^pg_toast_temp_' AND "
-                                                               "               n.nspname NOT IN ('pg_catalog', 'information_schema')");
+                                                "              n.nspname !~ '^pg_toast_temp_' AND "
+                                                               "               n.nspname NOT IN ('pg_catalog', 'information_schema')");
 
                ntups = PQntuples(res);
                i_nspname = PQfnumber(res, "nspname");
index b0e699187b5c99fd331f3d34348b663173718ab4..25fb15a847155db6f852ae82343bc98b1d5d0b64 100644 (file)
@@ -66,7 +66,7 @@
 typedef struct win32_pthread *pthread_t;
 typedef int pthread_attr_t;
 
-static int     pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg);
+static int     pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
 static int     pthread_join(pthread_t th, void **thread_return);
 #elif defined(ENABLE_THREAD_SAFETY)
 /* Use platform-dependent pthread capability */
@@ -84,7 +84,7 @@ static int    pthread_join(pthread_t th, void **thread_return);
 typedef struct fork_pthread *pthread_t;
 typedef int pthread_attr_t;
 
-static int     pthread_create(pthread_t *thread, pthread_attr_t * attr, void *(*start_routine) (void *), void *arg);
+static int     pthread_create(pthread_t *thread, pthread_attr_t *attr, void *(*start_routine) (void *), void *arg);
 static int     pthread_join(pthread_t th, void **thread_return);
 #endif
 
@@ -198,7 +198,7 @@ typedef struct
        instr_time      start_time;             /* thread start time */
        instr_time *exec_elapsed;       /* time spent executing cmds (per Command) */
        int                *exec_count;         /* number of cmd executions (per Command) */
-       unsigned short random_state[3]; /* separate randomness for each thread */
+       unsigned short random_state[3];         /* separate randomness for each thread */
 } TState;
 
 #define INVALID_THREAD         ((pthread_t) 0)
@@ -1075,7 +1075,7 @@ top:
 
                        /*
                         * getrand() neeeds to be able to subtract max from min and add
-                        * one the result without overflowing.  Since we know max > min,
+                        * one the result without overflowing.  Since we know max > min,
                         * we can detect overflow just by checking for a negative result.
                         * But we must check both that the subtraction doesn't overflow,
                         * and that adding one to the result doesn't overflow either.
@@ -1267,10 +1267,11 @@ init(void)
         * versions.  Since pgbench has never pretended to be fully TPC-B
         * compliant anyway, we stick with the historical behavior.
         */
-       struct ddlinfo {
-               char *table;
-               char *cols;
-               int declare_fillfactor;
+       struct ddlinfo
+       {
+               char       *table;
+               char       *cols;
+               int                     declare_fillfactor;
        };
        struct ddlinfo DDLs[] = {
                {
@@ -1321,15 +1322,16 @@ init(void)
                /* Construct new create table statement. */
                opts[0] = '\0';
                if (ddl->declare_fillfactor)
-                       snprintf(opts+strlen(opts), 256-strlen(opts),
-                               " with (fillfactor=%d)", fillfactor);
+                       snprintf(opts + strlen(opts), 256 - strlen(opts),
+                                        " with (fillfactor=%d)", fillfactor);
                if (tablespace != NULL)
                {
-                       char *escape_tablespace;
+                       char       *escape_tablespace;
+
                        escape_tablespace = PQescapeIdentifier(con, tablespace,
                                                                                                   strlen(tablespace));
-                       snprintf(opts+strlen(opts), 256-strlen(opts),
-                               " tablespace %s", escape_tablespace);
+                       snprintf(opts + strlen(opts), 256 - strlen(opts),
+                                        " tablespace %s", escape_tablespace);
                        PQfreemem(escape_tablespace);
                }
                snprintf(buffer, 256, "create%s table %s(%s)%s",
@@ -1404,17 +1406,18 @@ init(void)
        fprintf(stderr, "set primary key...\n");
        for (i = 0; i < lengthof(DDLAFTERs); i++)
        {
-               char    buffer[256];
+               char            buffer[256];
 
                strncpy(buffer, DDLAFTERs[i], 256);
 
                if (index_tablespace != NULL)
                {
-                       char *escape_tablespace;
+                       char       *escape_tablespace;
+
                        escape_tablespace = PQescapeIdentifier(con, index_tablespace,
                                                                                                   strlen(index_tablespace));
-                       snprintf(buffer+strlen(buffer), 256-strlen(buffer),
-                               " using index tablespace %s", escape_tablespace);
+                       snprintf(buffer + strlen(buffer), 256 - strlen(buffer),
+                                        " using index tablespace %s", escape_tablespace);
                        PQfreemem(escape_tablespace);
                }
 
@@ -1861,10 +1864,10 @@ main(int argc, char **argv)
        int                     i;
 
        static struct option long_options[] = {
-                       {"index-tablespace", required_argument, NULL, 3},
-                       {"tablespace", required_argument, NULL, 2},
-                       {"unlogged-tables", no_argument, &unlogged_tables, 1},
-                       {NULL, 0, NULL, 0}
+               {"index-tablespace", required_argument, NULL, 3},
+               {"tablespace", required_argument, NULL, 2},
+               {"unlogged-tables", no_argument, &unlogged_tables, 1},
+               {NULL, 0, NULL, 0}
        };
 
 #ifdef HAVE_GETRLIMIT
@@ -2065,10 +2068,10 @@ main(int argc, char **argv)
                        case 0:
                                /* This covers long options which take no argument. */
                                break;
-                       case 2:                                                 /* tablespace */
+                       case 2:                         /* tablespace */
                                tablespace = optarg;
                                break;
-                       case 3:                                                 /* index-tablespace */
+                       case 3:                         /* index-tablespace */
                                index_tablespace = optarg;
                                break;
                        default:
@@ -2571,7 +2574,7 @@ typedef struct fork_pthread
 
 static int
 pthread_create(pthread_t *thread,
-                          pthread_attr_t * attr,
+                          pthread_attr_t *attr,
                           void *(*start_routine) (void *),
                           void *arg)
 {
@@ -2687,7 +2690,7 @@ win32_pthread_run(void *arg)
 
 static int
 pthread_create(pthread_t *thread,
-                          pthread_attr_t * attr,
+                          pthread_attr_t *attr,
                           void *(*start_routine) (void *),
                           void *arg)
 {
index 6c7a2b329e487cd9d5c164f6ca5302ad8c558524..2a5cd70208a90a8cbd047a77aca7e30ed6bd5ea2 100644 (file)
@@ -34,8 +34,8 @@ char *
 px_crypt_md5(const char *pw, const char *salt, char *passwd, unsigned dstlen)
 {
        static char *magic = "$1$"; /* This string is magic for this algorithm.
-                                                                * Having it this way, we can get better
-                                                                * later on */
+                                                                * Having it this way, we can get better later
+                                                                * on */
        static char *p;
        static const char *sp,
                           *ep;
index 610b7fad789a83d655c38e21431d5c1d49c57e3c..80e8624460ddea1549fd868645d3a221137f2990 100644 (file)
@@ -204,8 +204,9 @@ const char *px_resolve_alias(const PX_Alias *aliases, const char *name);
 void           px_set_debug_handler(void (*handler) (const char *));
 
 #ifdef PX_DEBUG
-void           px_debug(const char *fmt, ...)
-       __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
+void
+px_debug(const char *fmt,...)
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2)));
 #else
 #define px_debug(...)
 #endif
index 9f2ec1f210840a45852a8b3dbd7d9892c3b1c4c8..d4fc8a0fd609f359c1cfba928f140519c99a17da 100644 (file)
@@ -95,7 +95,7 @@ pgstatindex(PG_FUNCTION_ARGS)
        BlockNumber nblocks;
        BlockNumber blkno;
        BTIndexStat indexStat;
-       BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
+       BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
 
        if (!superuser())
                ereport(ERROR,
@@ -160,7 +160,7 @@ pgstatindex(PG_FUNCTION_ARGS)
                CHECK_FOR_INTERRUPTS();
 
                /* Read and lock buffer */
-               buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
+               buffer = ReadBufferExtended(rel, MAIN_FORKNUM, blkno, RBM_NORMAL, bstrategy);
                LockBuffer(buffer, BUFFER_LOCK_SHARE);
 
                page = BufferGetPage(buffer);
index c9be8c92e4bf2aecdd53e2e60683cb9a8cd5b524..2b62b7850639d4bbaff7ddb4473ea0d671ba06e5 100644 (file)
@@ -62,7 +62,7 @@ typedef struct pgstattuple_type
 } pgstattuple_type;
 
 typedef void (*pgstat_page) (pgstattuple_type *, Relation, BlockNumber,
-                                                        BufferAccessStrategy);
+                                                                                BufferAccessStrategy);
 
 static Datum build_pgstattuple_type(pgstattuple_type *stat,
                                           FunctionCallInfo fcinfo);
index 0c395c42a3cb0643c6d5f99808c84a625468e316..5a4246752a3d358a0483f5f1f1f5615bad221ce7 100644 (file)
@@ -32,19 +32,19 @@ void
 sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
 {
        Relation        rel;
-       ScanKeyData     skey;
-       SysScanDesc     sscan;
+       ScanKeyData skey;
+       SysScanDesc sscan;
        HeapTuple       tuple;
        char       *tcontext;
        char       *ncontext;
        char            audit_name[NAMEDATALEN + 20];
-       ObjectAddress           object;
-       Form_pg_database        datForm;
+       ObjectAddress object;
+       Form_pg_database datForm;
 
        /*
-        * Oid of the source database is not saved in pg_database catalog,
-        * so we collect its identifier using contextual information.
-        * If NULL, its default is "template1" according to createdb().
+        * Oid of the source database is not saved in pg_database catalog, so we
+        * collect its identifier using contextual information. If NULL, its
+        * default is "template1" according to createdb().
         */
        if (!dtemplate)
                dtemplate = "template1";
@@ -56,6 +56,7 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
        tcontext = sepgsql_get_label(object.classId,
                                                                 object.objectId,
                                                                 object.objectSubId);
+
        /*
         * check db_database:{getattr} permission
         */
@@ -67,11 +68,11 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
                                                                  true);
 
        /*
-        * Compute a default security label of the newly created database
-        * based on a pair of security label of client and source database.
+        * Compute a default security label of the newly created database based on
+        * a pair of security label of client and source database.
         *
-        * XXX - uncoming version of libselinux supports to take object
-        * name to handle special treatment on default security label.
+        * XXX - uncoming version of libselinux supports to take object name to
+        * handle special treatment on default security label.
         */
        rel = heap_open(DatabaseRelationId, AccessShareLock);
 
@@ -91,6 +92,7 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
        ncontext = sepgsql_compute_create(sepgsql_get_client_label(),
                                                                          tcontext,
                                                                          SEPG_CLASS_DB_DATABASE);
+
        /*
         * check db_database:{create} permission
         */
@@ -126,8 +128,8 @@ sepgsql_database_post_create(Oid databaseId, const char *dtemplate)
 void
 sepgsql_database_drop(Oid databaseId)
 {
-       ObjectAddress   object;
-       char               *audit_name;
+       ObjectAddress object;
+       char       *audit_name;
 
        /*
         * check db_database:{drop} permission
@@ -153,8 +155,8 @@ sepgsql_database_drop(Oid databaseId)
 void
 sepgsql_database_relabel(Oid databaseId, const char *seclabel)
 {
-       ObjectAddress   object;
-       char               *audit_name;
+       ObjectAddress object;
+       char       *audit_name;
 
        object.classId = DatabaseRelationId;
        object.objectId = databaseId;
@@ -170,6 +172,7 @@ sepgsql_database_relabel(Oid databaseId, const char *seclabel)
                                                        SEPG_DB_DATABASE__RELABELFROM,
                                                        audit_name,
                                                        true);
+
        /*
         * check db_database:{relabelto} permission
         */
index 17aa41cf4e7eea43ccf033aa49d21a00d9fc153f..47a108741746f99f9a542051f13f68812a43e585 100644 (file)
@@ -150,7 +150,7 @@ check_relation_privileges(Oid relOid,
                                                  uint32 required,
                                                  bool abort)
 {
-       ObjectAddress   object;
+       ObjectAddress object;
        char       *audit_name;
        Bitmapset  *columns;
        int                     index;
index ffa078677c8490ab0973991726be6afc79c288ba..914519109c84bb1053bb3eef9fde06d92a79904d 100644 (file)
@@ -52,9 +52,9 @@ typedef struct
         * command. Elsewhere (including the case of default) NULL.
         */
        const char *createdb_dtemplate;
-} sepgsql_context_info_t;
+}      sepgsql_context_info_t;
 
-static sepgsql_context_info_t  sepgsql_context_info;
+static sepgsql_context_info_t sepgsql_context_info;
 
 /*
  * GUC: sepgsql.permissive = (on|off)
@@ -101,7 +101,7 @@ sepgsql_object_access(ObjectAccessType access,
                        {
                                case DatabaseRelationId:
                                        sepgsql_database_post_create(objectId,
-                                                               sepgsql_context_info.createdb_dtemplate);
+                                                                       sepgsql_context_info.createdb_dtemplate);
                                        break;
 
                                case NamespaceRelationId:
@@ -115,9 +115,8 @@ sepgsql_object_access(ObjectAccessType access,
                                                 * All cases we want to apply permission checks on
                                                 * creation of a new relation are invocation of the
                                                 * heap_create_with_catalog via DefineRelation or
-                                                * OpenIntoRel.
-                                                * Elsewhere, we need neither assignment of security
-                                                * label nor permission checks.
+                                                * OpenIntoRel. Elsewhere, we need neither assignment
+                                                * of security label nor permission checks.
                                                 */
                                                switch (sepgsql_context_info.cmdtype)
                                                {
@@ -150,12 +149,12 @@ sepgsql_object_access(ObjectAccessType access,
 
                case OAT_DROP:
                        {
-                               ObjectAccessDrop *drop_arg = (ObjectAccessDrop *)arg;
+                               ObjectAccessDrop *drop_arg = (ObjectAccessDrop *) arg;
 
                                /*
-                                * No need to apply permission checks on object deletion
-                                * due to internal cleanups; such as removal of temporary
-                                * database object on session closed.
+                                * No need to apply permission checks on object deletion due
+                                * to internal cleanups; such as removal of temporary database
+                                * object on session closed.
                                 */
                                if ((drop_arg->dropflags & PERFORM_DELETION_INTERNAL) != 0)
                                        break;
@@ -219,13 +218,13 @@ sepgsql_exec_check_perms(List *rangeTabls, bool abort)
 /*
  * sepgsql_executor_start
  *
- * It saves contextual information during ExecutorStart to distinguish 
+ * It saves contextual information during ExecutorStart to distinguish
  * a case with/without permission checks later.
  */
 static void
 sepgsql_executor_start(QueryDesc *queryDesc, int eflags)
 {
-       sepgsql_context_info_t  saved_context_info = sepgsql_context_info;
+       sepgsql_context_info_t saved_context_info = sepgsql_context_info;
 
        PG_TRY();
        {
@@ -270,28 +269,29 @@ sepgsql_utility_command(Node *parsetree,
                                                DestReceiver *dest,
                                                char *completionTag)
 {
-       sepgsql_context_info_t  saved_context_info = sepgsql_context_info;
-       ListCell           *cell;
+       sepgsql_context_info_t saved_context_info = sepgsql_context_info;
+       ListCell   *cell;
 
        PG_TRY();
        {
                /*
                 * Check command tag to avoid nefarious operations, and save the
-                * current contextual information to determine whether we should
-                * apply permission checks here, or not.
+                * current contextual information to determine whether we should apply
+                * permission checks here, or not.
                 */
                sepgsql_context_info.cmdtype = nodeTag(parsetree);
 
                switch (nodeTag(parsetree))
                {
                        case T_CreatedbStmt:
+
                                /*
                                 * We hope to reference name of the source database, but it
                                 * does not appear in system catalog. So, we save it here.
                                 */
-                               foreach (cell, ((CreatedbStmt *) parsetree)->options)
+                               foreach(cell, ((CreatedbStmt *) parsetree)->options)
                                {
-                                       DefElem    *defel = (DefElem *) lfirst(cell);
+                                       DefElem    *defel = (DefElem *) lfirst(cell);
 
                                        if (strcmp(defel->defname, "template") == 0)
                                        {
@@ -303,6 +303,7 @@ sepgsql_utility_command(Node *parsetree,
                                break;
 
                        case T_LoadStmt:
+
                                /*
                                 * We reject LOAD command across the board on enforcing mode,
                                 * because a binary module can arbitrarily override hooks.
@@ -315,6 +316,7 @@ sepgsql_utility_command(Node *parsetree,
                                }
                                break;
                        default:
+
                                /*
                                 * Right now we don't check any other utility commands,
                                 * because it needs more detailed information to make access
index 85f4efe072ad552e6f93e0b3da6139bc6cf3502d..23577b5844db3d37e3a6aaddbaf39cd6b980b140 100644 (file)
@@ -58,17 +58,18 @@ static fmgr_hook_type next_fmgr_hook = NULL;
  * we use the list client_label_pending of pending_label to keep track of which
  * labels were set during the (sub-)transactions.
  */
-static char *client_label_peer         = NULL; /* set by getpeercon(3) */
-static List *client_label_pending      = NIL;  /* pending list being set by
-                                                                                        * sepgsql_setcon() */
-static char *client_label_committed    = NULL; /* set by sepgsql_setcon(),
-                                                                                        * and already committed */
-static char *client_label_func         = NULL; /* set by trusted procedure */
-
-typedef struct {
-       SubTransactionId        subid;
-       char                       *label;
-} pending_label;
+static char *client_label_peer = NULL; /* set by getpeercon(3) */
+static List *client_label_pending = NIL;               /* pending list being set by
+                                                                                                * sepgsql_setcon() */
+static char *client_label_committed = NULL;            /* set by sepgsql_setcon(),
+                                                                                                * and already committed */
+static char *client_label_func = NULL; /* set by trusted procedure */
+
+typedef struct
+{
+       SubTransactionId subid;
+       char       *label;
+}      pending_label;
 
 /*
  * sepgsql_get_client_label
@@ -87,7 +88,7 @@ sepgsql_get_client_label(void)
        /* uncommitted sepgsql_setcon() value */
        if (client_label_pending)
        {
-               pending_label  *plabel = llast(client_label_pending);
+               pending_label *plabel = llast(client_label_pending);
 
                if (plabel->label)
                        return plabel->label;
@@ -104,16 +105,16 @@ sepgsql_get_client_label(void)
  * sepgsql_set_client_label
  *
  * This routine tries to switch the current security label of the client, and
- * checks related permissions.  The supplied new label shall be added to the
+ * checks related permissions. The supplied new label shall be added to the
  * client_label_pending list, then saved at transaction-commit time to ensure
  * transaction-awareness.
  */
 static void
 sepgsql_set_client_label(const char *new_label)
 {
-       const char         *tcontext;
-       MemoryContext   oldcxt;
-       pending_label  *plabel;
+       const char *tcontext;
+       MemoryContext oldcxt;
+       pending_label *plabel;
 
        /* Reset to the initial client label, if NULL */
        if (!new_label)
@@ -140,9 +141,10 @@ sepgsql_set_client_label(const char *new_label)
                                                                  SEPG_PROCESS__DYNTRANSITION,
                                                                  NULL,
                                                                  true);
+
        /*
-        * Append the supplied new_label on the pending list until
-        * the current transaction is committed.
+        * Append the supplied new_label on the pending list until the current
+        * transaction is committed.
         */
        oldcxt = MemoryContextSwitchTo(CurTransactionContext);
 
@@ -158,7 +160,7 @@ sepgsql_set_client_label(const char *new_label)
 /*
  * sepgsql_xact_callback
  *
- * A callback routine of transaction commit/abort/prepare.  Commmit or abort
+ * A callback routine of transaction commit/abort/prepare.     Commmit or abort
  * changes in the client_label_pending list.
  */
 static void
@@ -168,8 +170,8 @@ sepgsql_xact_callback(XactEvent event, void *arg)
        {
                if (client_label_pending != NIL)
                {
-                       pending_label  *plabel = llast(client_label_pending);
-                       char               *new_label;
+                       pending_label *plabel = llast(client_label_pending);
+                       char       *new_label;
 
                        if (plabel->label)
                                new_label = MemoryContextStrdup(TopMemoryContext,
@@ -181,10 +183,11 @@ sepgsql_xact_callback(XactEvent event, void *arg)
                                pfree(client_label_committed);
 
                        client_label_committed = new_label;
+
                        /*
-                        * XXX - Note that items of client_label_pending are allocated
-                        * on CurTransactionContext, thus, all acquired memory region
-                        * shall be released implicitly.
+                        * XXX - Note that items of client_label_pending are allocated on
+                        * CurTransactionContext, thus, all acquired memory region shall
+                        * be released implicitly.
                         */
                        client_label_pending = NIL;
                }
@@ -212,7 +215,8 @@ sepgsql_subxact_callback(SubXactEvent event, SubTransactionId mySubid,
                prev = NULL;
                for (cell = list_head(client_label_pending); cell; cell = next)
                {
-                       pending_label  *plabel = lfirst(cell);
+                       pending_label *plabel = lfirst(cell);
+
                        next = lnext(cell);
 
                        if (plabel->subid == mySubid)
@@ -272,7 +276,7 @@ sepgsql_client_auth(Port *port, int status)
 static bool
 sepgsql_needs_fmgr_hook(Oid functionId)
 {
-       ObjectAddress   object;
+       ObjectAddress object;
 
        if (next_needs_fmgr_hook &&
                (*next_needs_fmgr_hook) (functionId))
@@ -340,8 +344,8 @@ sepgsql_fmgr_hook(FmgrHookEventType event,
 
                                /*
                                 * process:transition permission between old and new label,
-                                * when user tries to switch security label of the client
-                                * on execution of trusted procedure.
+                                * when user tries to switch security label of the client on
+                                * execution of trusted procedure.
                                 */
                                if (stack->new_label)
                                        sepgsql_avc_check_perms_label(stack->new_label,
index 1efbc906c6b63ef6ec837ca292e120b06a374118..b68314d87851de0dea8cab5f501e9c33e9dcdbb1 100644 (file)
@@ -42,9 +42,9 @@ sepgsql_proc_post_create(Oid functionId)
        char       *tcontext;
        char       *ncontext;
        int                     i;
-       StringInfoData  audit_name;
-       ObjectAddress   object;
-       Form_pg_proc    proForm;
+       StringInfoData audit_name;
+       ObjectAddress object;
+       Form_pg_proc proForm;
 
        /*
         * Fetch namespace of the new procedure. Because pg_proc entry is not
@@ -77,6 +77,7 @@ sepgsql_proc_post_create(Oid functionId)
                                                        SEPG_DB_SCHEMA__ADD_NAME,
                                                        getObjectDescription(&object),
                                                        true);
+
        /*
         * XXX - db_language:{implement} also should be checked here
         */
@@ -97,9 +98,10 @@ sepgsql_proc_post_create(Oid functionId)
         */
        initStringInfo(&audit_name);
        appendStringInfo(&audit_name, "function %s(", NameStr(proForm->proname));
-       for (i=0; i < proForm->pronargs; i++)
+       for (i = 0; i < proForm->pronargs; i++)
        {
-               Oid             typeoid = proForm->proargtypes.values[i];
+               Oid                     typeoid = proForm->proargtypes.values[i];
+
                if (i > 0)
                        appendStringInfoChar(&audit_name, ',');
                appendStringInfoString(&audit_name, format_type_be(typeoid));
@@ -111,6 +113,7 @@ sepgsql_proc_post_create(Oid functionId)
                                                                  SEPG_DB_PROCEDURE__CREATE,
                                                                  audit_name.data,
                                                                  true);
+
        /*
         * Assign the default security label on a new procedure
         */
@@ -138,8 +141,8 @@ sepgsql_proc_post_create(Oid functionId)
 void
 sepgsql_proc_drop(Oid functionId)
 {
-       ObjectAddress   object;
-       char               *audit_name;
+       ObjectAddress object;
+       char       *audit_name;
 
        /*
         * check db_schema:{remove_name} permission
@@ -156,19 +159,19 @@ sepgsql_proc_drop(Oid functionId)
                                                        true);
        pfree(audit_name);
 
-    /*
-     * check db_procedure:{drop} permission
-     */
+       /*
+        * check db_procedure:{drop} permission
+        */
        object.classId = ProcedureRelationId;
        object.objectId = functionId;
        object.objectSubId = 0;
        audit_name = getObjectDescription(&object);
 
-    sepgsql_avc_check_perms(&object,
-                            SEPG_CLASS_DB_PROCEDURE,
-                            SEPG_DB_PROCEDURE__DROP,
-                            audit_name,
-                            true);
+       sepgsql_avc_check_perms(&object,
+                                                       SEPG_CLASS_DB_PROCEDURE,
+                                                       SEPG_DB_PROCEDURE__DROP,
+                                                       audit_name,
+                                                       true);
        pfree(audit_name);
 }
 
@@ -181,8 +184,8 @@ sepgsql_proc_drop(Oid functionId)
 void
 sepgsql_proc_relabel(Oid functionId, const char *seclabel)
 {
-       ObjectAddress   object;
-       char               *audit_name;
+       ObjectAddress object;
+       char       *audit_name;
 
        object.classId = ProcedureRelationId;
        object.objectId = functionId;
@@ -198,6 +201,7 @@ sepgsql_proc_relabel(Oid functionId, const char *seclabel)
                                                        SEPG_DB_PROCEDURE__RELABELFROM,
                                                        audit_name,
                                                        true);
+
        /*
         * check db_procedure:{relabelto} permission
         */
index 259be492684ffb14ae127861b333bd5f790ad936..e759a7d98ee686ded63385ce4475a127817bd44a 100644 (file)
@@ -44,9 +44,9 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
        char       *scontext;
        char       *tcontext;
        char       *ncontext;
-       char            audit_name[2*NAMEDATALEN + 20];
+       char            audit_name[2 * NAMEDATALEN + 20];
        ObjectAddress object;
-       Form_pg_attribute       attForm;
+       Form_pg_attribute attForm;
 
        /*
         * Only attributes within regular relation have individual security
@@ -84,6 +84,7 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
        tcontext = sepgsql_get_label(RelationRelationId, relOid, 0);
        ncontext = sepgsql_compute_create(scontext, tcontext,
                                                                          SEPG_CLASS_DB_COLUMN);
+
        /*
         * check db_column:{create} permission
         */
@@ -118,8 +119,8 @@ sepgsql_attribute_post_create(Oid relOid, AttrNumber attnum)
 void
 sepgsql_attribute_drop(Oid relOid, AttrNumber attnum)
 {
-       ObjectAddress   object;
-       char               *audit_name;
+       ObjectAddress object;
+       char       *audit_name;
 
        if (get_rel_relkind(relOid) != RELKIND_RELATION)
                return;
@@ -151,7 +152,7 @@ sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum,
                                                  const char *seclabel)
 {
        ObjectAddress object;
-       char             *audit_name;
+       char       *audit_name;
 
        if (get_rel_relkind(relOid) != RELKIND_RELATION)
                ereport(ERROR,
@@ -172,6 +173,7 @@ sepgsql_attribute_relabel(Oid relOid, AttrNumber attnum,
                                                        SEPG_DB_COLUMN__RELABELFROM,
                                                        audit_name,
                                                        true);
+
        /*
         * check db_column:{relabelto} permission
         */
@@ -203,7 +205,7 @@ sepgsql_relation_post_create(Oid relOid)
        char       *tcontext;           /* schema */
        char       *rcontext;           /* relation */
        char       *ccontext;           /* column */
-       char            audit_name[2*NAMEDATALEN + 20];
+       char            audit_name[2 * NAMEDATALEN + 20];
 
        /*
         * Fetch catalog record of the new relation. Because pg_class entry is not
@@ -254,6 +256,7 @@ sepgsql_relation_post_create(Oid relOid)
                                                        SEPG_DB_SCHEMA__ADD_NAME,
                                                        getObjectDescription(&object),
                                                        true);
+
        /*
         * Compute a default security label when we create a new relation object
         * under the specified namespace.
@@ -273,6 +276,7 @@ sepgsql_relation_post_create(Oid relOid)
                                                                  SEPG_DB_DATABASE__CREATE,
                                                                  audit_name,
                                                                  true);
+
        /*
         * Assign the default security label on the new relation
         */
@@ -288,10 +292,10 @@ sepgsql_relation_post_create(Oid relOid)
        if (classForm->relkind == RELKIND_RELATION)
        {
                Relation        arel;
-               ScanKeyData     akey;
-               SysScanDesc     ascan;
+               ScanKeyData akey;
+               SysScanDesc ascan;
                HeapTuple       atup;
-               Form_pg_attribute       attForm;
+               Form_pg_attribute attForm;
 
                arel = heap_open(AttributeRelationId, AccessShareLock);
 
@@ -315,6 +319,7 @@ sepgsql_relation_post_create(Oid relOid)
                        ccontext = sepgsql_compute_create(scontext,
                                                                                          rcontext,
                                                                                          SEPG_CLASS_DB_COLUMN);
+
                        /*
                         * check db_column:{create} permission
                         */
@@ -348,10 +353,10 @@ out:
 void
 sepgsql_relation_drop(Oid relOid)
 {
-       ObjectAddress   object;
-       char               *audit_name;
-       uint16_t                tclass = 0;
-       char                    relkind;
+       ObjectAddress object;
+       char       *audit_name;
+       uint16_t        tclass = 0;
+       char            relkind;
 
        relkind = get_rel_relkind(relOid);
        if (relkind == RELKIND_RELATION)
@@ -398,13 +403,13 @@ sepgsql_relation_drop(Oid relOid)
         */
        if (relkind == RELKIND_RELATION)
        {
-               Form_pg_attribute       attForm;
+               Form_pg_attribute attForm;
                CatCList   *attrList;
                HeapTuple       atttup;
                int                     i;
 
                attrList = SearchSysCacheList1(ATTNUM, ObjectIdGetDatum(relOid));
-               for (i=0; i < attrList->n_members; i++)
+               for (i = 0; i < attrList->n_members; i++)
                {
                        atttup = &attrList->members[i]->tuple;
                        attForm = (Form_pg_attribute) GETSTRUCT(atttup);
@@ -436,7 +441,7 @@ sepgsql_relation_drop(Oid relOid)
 void
 sepgsql_relation_relabel(Oid relOid, const char *seclabel)
 {
-       ObjectAddress   object;
+       ObjectAddress object;
        char       *audit_name;
        char            relkind;
        uint16_t        tclass = 0;
@@ -468,6 +473,7 @@ sepgsql_relation_relabel(Oid relOid, const char *seclabel)
                                                        SEPG_DB_TABLE__RELABELFROM,
                                                        audit_name,
                                                        true);
+
        /*
         * check db_xxx:{relabelto} permission
         */
index 31d60efe18514daa722af0080240f0210360e781..230449dc4b3558d9c1d67cd2111afa0e846e7496 100644 (file)
@@ -35,22 +35,22 @@ void
 sepgsql_schema_post_create(Oid namespaceId)
 {
        Relation        rel;
-       ScanKeyData     skey;
-       SysScanDesc     sscan;
+       ScanKeyData skey;
+       SysScanDesc sscan;
        HeapTuple       tuple;
        char       *tcontext;
        char       *ncontext;
        char            audit_name[NAMEDATALEN + 20];
-       ObjectAddress           object;
-       Form_pg_namespace       nspForm;
+       ObjectAddress object;
+       Form_pg_namespace nspForm;
 
        /*
         * Compute a default security label when we create a new schema object
         * under the working database.
         *
-        * XXX - uncoming version of libselinux supports to take object
-        * name to handle special treatment on default security label;
-        * such as special label on "pg_temp" schema.
+        * XXX - uncoming version of libselinux supports to take object name to
+        * handle special treatment on default security label; such as special
+        * label on "pg_temp" schema.
         */
        rel = heap_open(NamespaceRelationId, AccessShareLock);
 
@@ -71,6 +71,7 @@ sepgsql_schema_post_create(Oid namespaceId)
        ncontext = sepgsql_compute_create(sepgsql_get_client_label(),
                                                                          tcontext,
                                                                          SEPG_CLASS_DB_SCHEMA);
+
        /*
         * check db_schema:{create}
         */
@@ -104,8 +105,8 @@ sepgsql_schema_post_create(Oid namespaceId)
 void
 sepgsql_schema_drop(Oid namespaceId)
 {
-       ObjectAddress   object;
-       char               *audit_name;
+       ObjectAddress object;
+       char       *audit_name;
 
        /*
         * check db_schema:{drop} permission
@@ -116,7 +117,7 @@ sepgsql_schema_drop(Oid namespaceId)
        audit_name = getObjectDescription(&object);
 
        sepgsql_avc_check_perms(&object,
-                            SEPG_CLASS_DB_SCHEMA,
+                                                       SEPG_CLASS_DB_SCHEMA,
                                                        SEPG_DB_SCHEMA__DROP,
                                                        audit_name,
                                                        true);
@@ -132,8 +133,8 @@ sepgsql_schema_drop(Oid namespaceId)
 void
 sepgsql_schema_relabel(Oid namespaceId, const char *seclabel)
 {
-       ObjectAddress   object;
-       char               *audit_name;
+       ObjectAddress object;
+       char       *audit_name;
 
        object.classId = NamespaceRelationId;
        object.objectId = namespaceId;
@@ -149,6 +150,7 @@ sepgsql_schema_relabel(Oid namespaceId, const char *seclabel)
                                                        SEPG_DB_SCHEMA__RELABELFROM,
                                                        audit_name,
                                                        true);
+
        /*
         * check db_schema:{relabelto} permission
         */
index 708d4ee6563c82dd90d8da8b0e67b3f82baa49f0..479b136909bca0b49c8a8e67dbc4ae87867e56be 100644 (file)
@@ -248,20 +248,21 @@ extern bool sepgsql_check_perms(const char *scontext,
                                        uint32 required,
                                        const char *audit_name,
                                        bool abort);
+
 /*
  * uavc.c
  */
 #define SEPGSQL_AVC_NOAUDIT                    ((void *)(-1))
 extern bool sepgsql_avc_check_perms_label(const char *tcontext,
-                                                                                 uint16 tclass,
-                                                                                 uint32 required,
-                                                                                 const char *audit_name,
-                                                                                 bool abort);
+                                                         uint16 tclass,
+                                                         uint32 required,
+                                                         const char *audit_name,
+                                                         bool abort);
 extern bool sepgsql_avc_check_perms(const ObjectAddress *tobject,
-                                                                       uint16 tclass,
-                                                                       uint32 required,
-                                                                       const char *audit_name,
-                                                                       bool abort);
+                                               uint16 tclass,
+                                               uint32 required,
+                                               const char *audit_name,
+                                               bool abort);
 extern char *sepgsql_avc_trusted_proc(Oid functionId);
 extern void sepgsql_avc_init(void);
 
@@ -269,7 +270,7 @@ extern void sepgsql_avc_init(void);
  * label.c
  */
 extern char *sepgsql_get_client_label(void);
-extern void  sepgsql_init_client_label(void);
+extern void sepgsql_init_client_label(void);
 extern char *sepgsql_get_label(Oid relOid, Oid objOid, int32 subId);
 
 extern void sepgsql_object_relabel(const ObjectAddress *object,
@@ -290,7 +291,7 @@ extern bool sepgsql_dml_privileges(List *rangeTabls, bool abort);
  * database.c
  */
 extern void sepgsql_database_post_create(Oid databaseId,
-                                                                                const char *dtemplate);
+                                                        const char *dtemplate);
 extern void sepgsql_database_drop(Oid databaseId);
 extern void sepgsql_database_relabel(Oid databaseId, const char *seclabel);
 
index 905f87dfc8dabc5f5eb40a66f619ae2982ea6c24..9641a17d79e84aec4fe8c521516e3d2d7662a595 100644 (file)
  */
 typedef struct
 {
-       uint32          hash;           /* hash value of this cache entry */
-       char       *scontext;   /* security context of the subject */
-       char       *tcontext;   /* security context of the target */
-       uint16          tclass;         /* object class of the target */
+       uint32          hash;                   /* hash value of this cache entry */
+       char       *scontext;           /* security context of the subject */
+       char       *tcontext;           /* security context of the target */
+       uint16          tclass;                 /* object class of the target */
 
-       uint32          allowed;        /* permissions to be allowed */
-       uint32          auditallow;     /* permissions to be audited on allowed */
-       uint32          auditdeny;      /* permissions to be audited on denied */
+       uint32          allowed;                /* permissions to be allowed */
+       uint32          auditallow;             /* permissions to be audited on allowed */
+       uint32          auditdeny;              /* permissions to be audited on denied */
 
-       bool            permissive;     /* true, if permissive rule */
-       bool            hot_cache;      /* true, if recently referenced */
+       bool            permissive;             /* true, if permissive rule */
+       bool            hot_cache;              /* true, if recently referenced */
        bool            tcontext_is_valid;
-                                                       /* true, if tcontext is valid */
-       char       *ncontext;   /* temporary scontext on execution of trusted
-                                                        * procedure, or NULL elsewhere */
-} avc_cache;
+       /* true, if tcontext is valid */
+       char       *ncontext;           /* temporary scontext on execution of trusted
+                                                                * procedure, or NULL elsewhere */
+}      avc_cache;
 
 /*
  * Declaration of static variables
@@ -54,12 +54,12 @@ typedef struct
 #define AVC_NUM_RECLAIM                16
 #define AVC_DEF_THRESHOLD      384
 
-static MemoryContext   avc_mem_cxt;
-static List       *avc_slots[AVC_NUM_SLOTS];   /* avc's hash buckets */
-static int             avc_num_caches; /* number of caches currently used */
-static int             avc_lru_hint;   /* index of the buckets to be reclaimed next */
-static int             avc_threshold;  /* threshold to launch cache-reclaiming  */
-static char       *avc_unlabeled;      /* system 'unlabeled' label */
+static MemoryContext avc_mem_cxt;
+static List *avc_slots[AVC_NUM_SLOTS]; /* avc's hash buckets */
+static int     avc_num_caches;         /* number of caches currently used */
+static int     avc_lru_hint;           /* index of the buckets to be reclaimed next */
+static int     avc_threshold;          /* threshold to launch cache-reclaiming  */
+static char *avc_unlabeled;            /* system 'unlabeled' label */
 
 /*
  * Hash function
@@ -67,8 +67,8 @@ static char      *avc_unlabeled;      /* system 'unlabeled' label */
 static uint32
 sepgsql_avc_hash(const char *scontext, const char *tcontext, uint16 tclass)
 {
-       return hash_any((const unsigned char *)scontext, strlen(scontext))
-               ^ hash_any((const unsigned char *)tcontext, strlen(tcontext))
+       return hash_any((const unsigned char *) scontext, strlen(scontext))
+               ^ hash_any((const unsigned char *) tcontext, strlen(tcontext))
                ^ tclass;
 }
 
@@ -88,7 +88,7 @@ sepgsql_avc_reset(void)
 
 /*
  * Reclaim caches recently unreferenced
- */    
+ */
 static void
 sepgsql_avc_reclaim(void)
 {
@@ -142,15 +142,15 @@ sepgsql_avc_reclaim(void)
  * Access control decisions must be atomic, but multiple system calls may
  * be required to make a decision; thus, when referencing the access vector
  * cache, we must loop until we complete without an intervening cache flush
- * event.  In practice, looping even once should be very rare.  Callers should
+ * event.  In practice, looping even once should be very rare. Callers should
  * do something like this:
  *
- *   sepgsql_avc_check_valid();
- *   do {
- *           :
- *       <reference to uavc>
- *           :
- *   } while (!sepgsql_avc_check_valid())
+ *      sepgsql_avc_check_valid();
+ *      do {
+ *                      :
+ *              <reference to uavc>
+ *                      :
+ *      } while (!sepgsql_avc_check_valid())
  *
  * -------------------------------------------------------------------------
  */
@@ -169,7 +169,7 @@ sepgsql_avc_check_valid(void)
 /*
  * sepgsql_avc_unlabeled
  *
- * Returns an alternative label to be applied when no label or an invalid 
+ * Returns an alternative label to be applied when no label or an invalid
  * label would otherwise be assigned.
  */
 static char *
@@ -177,12 +177,12 @@ sepgsql_avc_unlabeled(void)
 {
        if (!avc_unlabeled)
        {
-               security_context_t      unlabeled;
+               security_context_t unlabeled;
 
                if (security_get_initial_context_raw("unlabeled", &unlabeled) < 0)
                        ereport(ERROR,
-                    (errcode(ERRCODE_INTERNAL_ERROR),
-                     errmsg("SELinux: failed to get initial security label: %m")));
+                                       (errcode(ERRCODE_INTERNAL_ERROR),
+                          errmsg("SELinux: failed to get initial security label: %m")));
                PG_TRY();
                {
                        avc_unlabeled = MemoryContextStrdup(avc_mem_cxt, unlabeled);
@@ -200,7 +200,7 @@ sepgsql_avc_unlabeled(void)
 }
 
 /*
- * sepgsql_avc_compute 
+ * sepgsql_avc_compute
  *
  * A fallback path, when cache mishit. It asks SELinux its access control
  * decision for the supplied pair of security context and object class.
@@ -208,24 +208,24 @@ sepgsql_avc_unlabeled(void)
 static avc_cache *
 sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass)
 {
-       char               *ucontext = NULL;
-       char               *ncontext = NULL;
-       MemoryContext   oldctx;
-       avc_cache          *cache;
-       uint32                  hash;
-       int                             index;
-       struct av_decision      avd;
+       char       *ucontext = NULL;
+       char       *ncontext = NULL;
+       MemoryContext oldctx;
+       avc_cache  *cache;
+       uint32          hash;
+       int                     index;
+       struct av_decision avd;
 
        hash = sepgsql_avc_hash(scontext, tcontext, tclass);
        index = hash % AVC_NUM_SLOTS;
 
        /*
-        * Validation check of the supplied security context.
-        * Because it always invoke system-call, frequent check should be avoided.
-        * Unless security policy is reloaded, validation status shall be kept, so
-        * we also cache whether the supplied security context was valid, or not.
+        * Validation check of the supplied security context. Because it always
+        * invoke system-call, frequent check should be avoided. Unless security
+        * policy is reloaded, validation status shall be kept, so we also cache
+        * whether the supplied security context was valid, or not.
         */
-       if (security_check_context_raw((security_context_t)tcontext) != 0)
+       if (security_check_context_raw((security_context_t) tcontext) != 0)
                ucontext = sepgsql_avc_unlabeled();
 
        /*
@@ -237,15 +237,14 @@ sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass)
                sepgsql_compute_avd(scontext, ucontext, tclass, &avd);
 
        /*
-        * It also caches a security label to be switched when a client
-        * labeled as 'scontext' executes a procedure labeled as 'tcontext',
-        * not only access control decision on the procedure.
-        * The security label to be switched shall be computed uniquely on
-        * a pair of 'scontext' and 'tcontext', thus, it is reasonable to
-        * cache the new label on avc, and enables to reduce unnecessary
-        * system calls.
-        * It shall be referenced at sepgsql_needs_fmgr_hook to check whether
-        * the supplied function is a trusted procedure, or not.
+        * It also caches a security label to be switched when a client labeled as
+        * 'scontext' executes a procedure labeled as 'tcontext', not only access
+        * control decision on the procedure. The security label to be switched
+        * shall be computed uniquely on a pair of 'scontext' and 'tcontext',
+        * thus, it is reasonable to cache the new label on avc, and enables to
+        * reduce unnecessary system calls. It shall be referenced at
+        * sepgsql_needs_fmgr_hook to check whether the supplied function is a
+        * trusted procedure, or not.
         */
        if (tclass == SEPG_CLASS_DB_PROCEDURE)
        {
@@ -269,7 +268,7 @@ sepgsql_avc_compute(const char *scontext, const char *tcontext, uint16 tclass)
 
        cache = palloc0(sizeof(avc_cache));
 
-       cache->hash     = hash;
+       cache->hash = hash;
        cache->scontext = pstrdup(scontext);
        cache->tcontext = pstrdup(tcontext);
        cache->tclass = tclass;
@@ -314,7 +313,7 @@ sepgsql_avc_lookup(const char *scontext, const char *tcontext, uint16 tclass)
        hash = sepgsql_avc_hash(scontext, tcontext, tclass);
        index = hash % AVC_NUM_SLOTS;
 
-       foreach (cell, avc_slots[index])
+       foreach(cell, avc_slots[index])
        {
                cache = lfirst(cell);
 
@@ -348,14 +347,15 @@ sepgsql_avc_check_perms_label(const char *tcontext,
                                                          uint16 tclass, uint32 required,
                                                          const char *audit_name, bool abort)
 {
-       char *scontext = sepgsql_get_client_label();
+       char       *scontext = sepgsql_get_client_label();
        avc_cache  *cache;
        uint32          denied;
        uint32          audited;
        bool            result;
 
        sepgsql_avc_check_valid();
-       do {
+       do
+       {
                result = true;
 
                /*
@@ -377,16 +377,16 @@ sepgsql_avc_check_perms_label(const char *tcontext,
                        audited = (denied ? (denied & ~0) : (required & ~0));
                else
                        audited = denied ? (denied & cache->auditdeny)
-                                                        : (required & cache->auditallow);
+                               : (required & cache->auditallow);
 
                if (denied)
                {
                        /*
                         * In permissive mode or permissive domain, violated permissions
                         * shall be audited to the log files at once, and then implicitly
-                        * allowed to avoid a flood of access denied logs, because
-                        * the purpose of permissive mode/domain is to collect a violation
-                        * log that will make it possible to fix up the security policy.
+                        * allowed to avoid a flood of access denied logs, because the
+                        * purpose of permissive mode/domain is to collect a violation log
+                        * that will make it possible to fix up the security policy.
                         */
                        if (!sepgsql_getenforce() || cache->permissive)
                                cache->allowed |= required;
@@ -397,10 +397,10 @@ sepgsql_avc_check_perms_label(const char *tcontext,
 
        /*
         * In the case when we have something auditable actions here,
-        * sepgsql_audit_log shall be called with text representation of
-        * security labels for both of subject and object.
-        * It records this access violation, so DBA will be able to find
-        * out unexpected security problems later.
+        * sepgsql_audit_log shall be called with text representation of security
+        * labels for both of subject and object. It records this access
+        * violation, so DBA will be able to find out unexpected security problems
+        * later.
         */
        if (audited != 0 &&
                audit_name != SEPGSQL_AVC_NOAUDIT &&
@@ -428,8 +428,8 @@ sepgsql_avc_check_perms(const ObjectAddress *tobject,
                                                uint16 tclass, uint32 required,
                                                const char *audit_name, bool abort)
 {
-       char   *tcontext = GetSecurityLabel(tobject, SEPGSQL_LABEL_TAG);
-       bool    rc;
+       char       *tcontext = GetSecurityLabel(tobject, SEPGSQL_LABEL_TAG);
+       bool            rc;
 
        rc = sepgsql_avc_check_perms_label(tcontext,
                                                                           tclass, required,
@@ -450,10 +450,10 @@ sepgsql_avc_check_perms(const ObjectAddress *tobject,
 char *
 sepgsql_avc_trusted_proc(Oid functionId)
 {
-       char               *scontext = sepgsql_get_client_label();
-       char               *tcontext;
-       ObjectAddress   tobject;
-       avc_cache          *cache;
+       char       *scontext = sepgsql_get_client_label();
+       char       *tcontext;
+       ObjectAddress tobject;
+       avc_cache  *cache;
 
        tobject.classId = ProcedureRelationId;
        tobject.objectId = functionId;
@@ -461,7 +461,8 @@ sepgsql_avc_trusted_proc(Oid functionId)
        tcontext = GetSecurityLabel(&tobject, SEPGSQL_LABEL_TAG);
 
        sepgsql_avc_check_valid();
-       do {
+       do
+       {
                if (tcontext)
                        cache = sepgsql_avc_lookup(scontext, tcontext,
                                                                           SEPG_CLASS_DB_PROCEDURE);
@@ -492,7 +493,7 @@ sepgsql_avc_exit(int code, Datum arg)
 void
 sepgsql_avc_init(void)
 {
-       int     rc;
+       int                     rc;
 
        /*
         * All the avc stuff shall be allocated on avc_mem_cxt
@@ -508,12 +509,11 @@ sepgsql_avc_init(void)
        avc_threshold = AVC_DEF_THRESHOLD;
 
        /*
-        * SELinux allows to mmap(2) its kernel status page in read-only mode
-        * to inform userspace applications its status updating (such as
-        * policy reloading) without system-call invocations.
-        * This feature is only supported in Linux-2.6.38 or later, however,
-        * libselinux provides a fallback mode to know its status using
-        * netlink sockets.
+        * SELinux allows to mmap(2) its kernel status page in read-only mode to
+        * inform userspace applications its status updating (such as policy
+        * reloading) without system-call invocations. This feature is only
+        * supported in Linux-2.6.38 or later, however, libselinux provides a
+        * fallback mode to know its status using netlink sockets.
         */
        rc = selinux_status_open(1);
        if (rc < 0)
index 39a0160587461a31f0ba01c5e7e26fc9ad14fbbf..8dc565a190775f5ba94ae11e684de4da027debfe 100644 (file)
@@ -536,8 +536,7 @@ check_foreign_key(PG_FUNCTION_ARGS)
 
                        /*
                         * Remember that SPI_prepare places plan in current memory context
-                        * - so, we have to save plan in Top memory context for later
-                        * use.
+                        * - so, we have to save plan in Top memory context for later use.
                         */
                        if (SPI_keepplan(pplan))
                                /* internal error */
index 641a8c3425d97614a14a547f91651f0bbd83eae8..958a496b24abb85a82cb1f738d02db1ac9ff22b6 100644 (file)
@@ -69,7 +69,7 @@ vacuumlo(const char *database, const struct _param * param)
        int                     i;
        static char *password = NULL;
        bool            new_pass;
-       bool        success = true;
+       bool            success = true;
 
        /* Note: password can be carried over from a previous call */
        if (param->pg_prompt == TRI_YES && password == NULL)
@@ -261,8 +261,8 @@ vacuumlo(const char *database, const struct _param * param)
         * We don't want to run each delete as an individual transaction, because
         * the commit overhead would be high.  However, since 9.0 the backend will
         * acquire a lock per deleted LO, so deleting too many LOs per transaction
-        * risks running out of room in the shared-memory lock table.
-        * Accordingly, we delete up to transaction_limit LOs per transaction.
+        * risks running out of room in the shared-memory lock table. Accordingly,
+        * we delete up to transaction_limit LOs per transaction.
         */
        res = PQexec(conn, "begin");
        if (PQresultStatus(res) != PGRES_COMMAND_OK)
@@ -459,8 +459,8 @@ main(int argc, char **argv)
                                if (param.transaction_limit < 0)
                                {
                                        fprintf(stderr,
-                               "%s: transaction limit must not be negative (0 disables)\n",
-                                               progname);
+                                                       "%s: transaction limit must not be negative (0 disables)\n",
+                                                       progname);
                                        exit(1);
                                }
                                break;
index 2ddee59fcb779ba3677affdbe16eb8e7fc3ff204..660d25c3490bc6bfda8298e3d474186fe7e14492 100644 (file)
@@ -702,126 +702,126 @@ xpath_table(PG_FUNCTION_ARGS)
 
        PG_TRY();
        {
-       /* For each row i.e. document returned from SPI */
-       for (i = 0; i < proc; i++)
-       {
-               char       *pkey;
-               char       *xmldoc;
-               xmlXPathContextPtr ctxt;
-               xmlXPathObjectPtr res;
-               xmlChar    *resstr;
-               xmlXPathCompExprPtr comppath;
-
-               /* Extract the row data as C Strings */
-               spi_tuple = tuptable->vals[i];
-               pkey = SPI_getvalue(spi_tuple, spi_tupdesc, 1);
-               xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc, 2);
-
-               /*
-                * Clear the values array, so that not-well-formed documents return
-                * NULL in all columns.  Note that this also means that spare columns
-                * will be NULL.
-                */
-               for (j = 0; j < ret_tupdesc->natts; j++)
-                       values[j] = NULL;
-
-               /* Insert primary key */
-               values[0] = pkey;
-
-               /* Parse the document */
-               if (xmldoc)
-                       doctree = xmlParseMemory(xmldoc, strlen(xmldoc));
-               else    /* treat NULL as not well-formed */
-                       doctree = NULL;
-
-               if (doctree == NULL)
+               /* For each row i.e. document returned from SPI */
+               for (i = 0; i < proc; i++)
                {
-                       /* not well-formed, so output all-NULL tuple */
-                       ret_tuple = BuildTupleFromCStrings(attinmeta, values);
-                       tuplestore_puttuple(tupstore, ret_tuple);
-                       heap_freetuple(ret_tuple);
-               }
-               else
-               {
-                       /* New loop here - we have to deal with nodeset results */
-                       rownr = 0;
-
-                       do
+                       char       *pkey;
+                       char       *xmldoc;
+                       xmlXPathContextPtr ctxt;
+                       xmlXPathObjectPtr res;
+                       xmlChar    *resstr;
+                       xmlXPathCompExprPtr comppath;
+
+                       /* Extract the row data as C Strings */
+                       spi_tuple = tuptable->vals[i];
+                       pkey = SPI_getvalue(spi_tuple, spi_tupdesc, 1);
+                       xmldoc = SPI_getvalue(spi_tuple, spi_tupdesc, 2);
+
+                       /*
+                        * Clear the values array, so that not-well-formed documents
+                        * return NULL in all columns.  Note that this also means that
+                        * spare columns will be NULL.
+                        */
+                       for (j = 0; j < ret_tupdesc->natts; j++)
+                               values[j] = NULL;
+
+                       /* Insert primary key */
+                       values[0] = pkey;
+
+                       /* Parse the document */
+                       if (xmldoc)
+                               doctree = xmlParseMemory(xmldoc, strlen(xmldoc));
+                       else    /* treat NULL as not well-formed */
+                               doctree = NULL;
+
+                       if (doctree == NULL)
                        {
-                               /* Now evaluate the set of xpaths. */
-                               had_values = false;
-                               for (j = 0; j < numpaths; j++)
+                               /* not well-formed, so output all-NULL tuple */
+                               ret_tuple = BuildTupleFromCStrings(attinmeta, values);
+                               tuplestore_puttuple(tupstore, ret_tuple);
+                               heap_freetuple(ret_tuple);
+                       }
+                       else
+                       {
+                               /* New loop here - we have to deal with nodeset results */
+                               rownr = 0;
+
+                               do
                                {
-                                       ctxt = xmlXPathNewContext(doctree);
-                                       ctxt->node = xmlDocGetRootElement(doctree);
+                                       /* Now evaluate the set of xpaths. */
+                                       had_values = false;
+                                       for (j = 0; j < numpaths; j++)
+                                       {
+                                               ctxt = xmlXPathNewContext(doctree);
+                                               ctxt->node = xmlDocGetRootElement(doctree);
 
-                                       /* compile the path */
-                                       comppath = xmlXPathCompile(xpaths[j]);
-                                       if (comppath == NULL)
-                                               xml_ereport(xmlerrcxt, ERROR,
-                                                                       ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
-                                                                       "XPath Syntax Error");
+                                               /* compile the path */
+                                               comppath = xmlXPathCompile(xpaths[j]);
+                                               if (comppath == NULL)
+                                                       xml_ereport(xmlerrcxt, ERROR,
+                                                                               ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
+                                                                               "XPath Syntax Error");
 
-                                       /* Now evaluate the path expression. */
-                                       res = xmlXPathCompiledEval(comppath, ctxt);
-                                       xmlXPathFreeCompExpr(comppath);
+                                               /* Now evaluate the path expression. */
+                                               res = xmlXPathCompiledEval(comppath, ctxt);
+                                               xmlXPathFreeCompExpr(comppath);
 
-                                       if (res != NULL)
-                                       {
-                                               switch (res->type)
+                                               if (res != NULL)
                                                {
-                                                       case XPATH_NODESET:
-                                                               /* We see if this nodeset has enough nodes */
-                                                               if (res->nodesetval != NULL &&
-                                                                       rownr < res->nodesetval->nodeNr)
-                                                               {
-                                                                       resstr = xmlXPathCastNodeToString(res->nodesetval->nodeTab[rownr]);
-                                                                       had_values = true;
-                                                               }
-                                                               else
-                                                                       resstr = NULL;
-
-                                                               break;
-
-                                                       case XPATH_STRING:
-                                                               resstr = xmlStrdup(res->stringval);
-                                                               break;
-
-                                                       default:
-                                                               elog(NOTICE, "unsupported XQuery result: %d", res->type);
-                                                               resstr = xmlStrdup((const xmlChar *) "<unsupported/>");
+                                                       switch (res->type)
+                                                       {
+                                                               case XPATH_NODESET:
+                                                                       /* We see if this nodeset has enough nodes */
+                                                                       if (res->nodesetval != NULL &&
+                                                                               rownr < res->nodesetval->nodeNr)
+                                                                       {
+                                                                               resstr = xmlXPathCastNodeToString(res->nodesetval->nodeTab[rownr]);
+                                                                               had_values = true;
+                                                                       }
+                                                                       else
+                                                                               resstr = NULL;
+
+                                                                       break;
+
+                                                               case XPATH_STRING:
+                                                                       resstr = xmlStrdup(res->stringval);
+                                                                       break;
+
+                                                               default:
+                                                                       elog(NOTICE, "unsupported XQuery result: %d", res->type);
+                                                                       resstr = xmlStrdup((const xmlChar *) "<unsupported/>");
+                                                       }
+
+                                                       /*
+                                                        * Insert this into the appropriate column in the
+                                                        * result tuple.
+                                                        */
+                                                       values[j + 1] = (char *) resstr;
                                                }
-
-                                               /*
-                                                * Insert this into the appropriate column in the
-                                                * result tuple.
-                                                */
-                                               values[j + 1] = (char *) resstr;
+                                               xmlXPathFreeContext(ctxt);
                                        }
-                                       xmlXPathFreeContext(ctxt);
-                               }
 
-                               /* Now add the tuple to the output, if there is one. */
-                               if (had_values)
-                               {
-                                       ret_tuple = BuildTupleFromCStrings(attinmeta, values);
-                                       tuplestore_puttuple(tupstore, ret_tuple);
-                                       heap_freetuple(ret_tuple);
-                               }
+                                       /* Now add the tuple to the output, if there is one. */
+                                       if (had_values)
+                                       {
+                                               ret_tuple = BuildTupleFromCStrings(attinmeta, values);
+                                               tuplestore_puttuple(tupstore, ret_tuple);
+                                               heap_freetuple(ret_tuple);
+                                       }
 
-                               rownr++;
-                       } while (had_values);
-               }
+                                       rownr++;
+                               } while (had_values);
+                       }
 
-               if (doctree != NULL)
-                       xmlFreeDoc(doctree);
-               doctree = NULL;
+                       if (doctree != NULL)
+                               xmlFreeDoc(doctree);
+                       doctree = NULL;
 
-               if (pkey)
-                       pfree(pkey);
-               if (xmldoc)
-                       pfree(xmldoc);
-       }
+                       if (pkey)
+                               pfree(pkey);
+                       if (xmldoc)
+                               pfree(xmldoc);
+               }
        }
        PG_CATCH();
        {
index ba1171a041fadb9d605ef78b879bf43946c10b02..a93931d2618ccecaefa3a71fe752fcd52132302c 100644 (file)
@@ -85,40 +85,40 @@ xslt_process(PG_FUNCTION_ARGS)
        {
                /* Check to see if document is a file or a literal */
 
-       if (VARDATA(doct)[0] == '<')
-               doctree = xmlParseMemory((char *) VARDATA(doct), VARSIZE(doct) - VARHDRSZ);
-       else
-               doctree = xmlParseFile(text_to_cstring(doct));
-
-       if (doctree == NULL)
-               xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
-                                       "error parsing XML document");
+               if (VARDATA(doct)[0] == '<')
+                       doctree = xmlParseMemory((char *) VARDATA(doct), VARSIZE(doct) - VARHDRSZ);
+               else
+                       doctree = xmlParseFile(text_to_cstring(doct));
 
-       /* Same for stylesheet */
-       if (VARDATA(ssheet)[0] == '<')
-       {
-               ssdoc = xmlParseMemory((char *) VARDATA(ssheet),
-                                                          VARSIZE(ssheet) - VARHDRSZ);
-               if (ssdoc == NULL)
+               if (doctree == NULL)
                        xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
-                                               "error parsing stylesheet as XML document");
+                                               "error parsing XML document");
 
-               stylesheet = xsltParseStylesheetDoc(ssdoc);
-       }
-       else
-               stylesheet = xsltParseStylesheetFile((xmlChar *) text_to_cstring(ssheet));
+               /* Same for stylesheet */
+               if (VARDATA(ssheet)[0] == '<')
+               {
+                       ssdoc = xmlParseMemory((char *) VARDATA(ssheet),
+                                                                  VARSIZE(ssheet) - VARHDRSZ);
+                       if (ssdoc == NULL)
+                               xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
+                                                       "error parsing stylesheet as XML document");
+
+                       stylesheet = xsltParseStylesheetDoc(ssdoc);
+               }
+               else
+                       stylesheet = xsltParseStylesheetFile((xmlChar *) text_to_cstring(ssheet));
 
-       if (stylesheet == NULL)
-               xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
-                                       "failed to parse stylesheet");
+               if (stylesheet == NULL)
+                       xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
+                                               "failed to parse stylesheet");
 
-       restree = xsltApplyStylesheet(stylesheet, doctree, params);
+               restree = xsltApplyStylesheet(stylesheet, doctree, params);
 
-       if (restree == NULL)
-               xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
-                                       "failed to apply stylesheet");
+               if (restree == NULL)
+                       xml_ereport(xmlerrcxt, ERROR, ERRCODE_EXTERNAL_ROUTINE_EXCEPTION,
+                                               "failed to apply stylesheet");
 
-       resstat = xsltSaveResultToString(&resstr, &reslen, restree, stylesheet);
+               resstat = xsltSaveResultToString(&resstr, &reslen, restree, stylesheet);
        }
        PG_CATCH();
        {
index 1efaaee1a8636745f6b55ab375dd027bd5b72abf..783590ea55e59134bbd22cbf17bd4712e4e543a4 100644 (file)
@@ -27,7 +27,7 @@
 /* non-export function prototypes */
 static void gistfixsplit(GISTInsertState *state, GISTSTATE *giststate);
 static bool gistinserttuple(GISTInsertState *state, GISTInsertStack *stack,
-                               GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum);
+                        GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum);
 static bool gistinserttuples(GISTInsertState *state, GISTInsertStack *stack,
                                 GISTSTATE *giststate,
                                 IndexTuple *tuples, int ntup, OffsetNumber oldoffnum,
@@ -781,8 +781,8 @@ gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum)
                {
                        /*
                         * Page was split while we looked elsewhere. We didn't see the
-                        * downlink to the right page when we scanned the parent, so
-                        * add it to the queue now.
+                        * downlink to the right page when we scanned the parent, so add
+                        * it to the queue now.
                         *
                         * Put the right page ahead of the queue, so that we visit it
                         * next. That's important, because if this is the lowest internal
@@ -829,7 +829,7 @@ gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum)
 
        elog(ERROR, "failed to re-find parent of a page in index \"%s\", block %u",
                 RelationGetRelationName(r), child);
-       return NULL; /* keep compiler quiet */
+       return NULL;                            /* keep compiler quiet */
 }
 
 /*
@@ -1046,7 +1046,7 @@ gistfixsplit(GISTInsertState *state, GISTSTATE *giststate)
  */
 static bool
 gistinserttuple(GISTInsertState *state, GISTInsertStack *stack,
-                               GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum)
+                         GISTSTATE *giststate, IndexTuple tuple, OffsetNumber oldoffnum)
 {
        return gistinserttuples(state, stack, giststate, &tuple, 1, oldoffnum,
                                                        InvalidBuffer, InvalidBuffer, false, false);
@@ -1308,7 +1308,7 @@ initGISTstate(Relation index)
        giststate = (GISTSTATE *) palloc(sizeof(GISTSTATE));
 
        giststate->scanCxt = scanCxt;
-       giststate->tempCxt = scanCxt;   /* caller must change this if needed */
+       giststate->tempCxt = scanCxt;           /* caller must change this if needed */
        giststate->tupdesc = index->rd_att;
 
        for (i = 0; i < index->rd_att->natts; i++)
index 712e59ac908c9c7fc5e1702eab27cea2bc1170d8..8caf4856763b68828c6f23d9c1df6c9a60f8e7b5 100644 (file)
@@ -48,7 +48,7 @@ typedef enum
                                                                 * before switching to the buffering build
                                                                 * mode */
        GIST_BUFFERING_ACTIVE           /* in buffering build mode */
-}      GistBufferingMode;
+} GistBufferingMode;
 
 /* Working state for gistbuild and its callback */
 typedef struct
@@ -263,7 +263,7 @@ gistValidateBufferingOption(char *value)
                ereport(ERROR,
                                (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
                                 errmsg("invalid value for \"buffering\" option"),
-                          errdetail("Valid values are \"on\", \"off\", and \"auto\".")));
+                         errdetail("Valid values are \"on\", \"off\", and \"auto\".")));
        }
 }
 
@@ -567,7 +567,7 @@ gistProcessItup(GISTBuildState *buildstate, IndexTuple itup,
        BlockNumber childblkno;
        Buffer          buffer;
        bool            result = false;
-       BlockNumber     blkno;
+       BlockNumber blkno;
        int                     level;
        OffsetNumber downlinkoffnum = InvalidOffsetNumber;
        BlockNumber parentblkno = InvalidBlockNumber;
@@ -623,7 +623,7 @@ gistProcessItup(GISTBuildState *buildstate, IndexTuple itup,
                {
                        gistbufferinginserttuples(buildstate, buffer, level,
                                                                          &newtup, 1, childoffnum,
-                                                                         InvalidBlockNumber, InvalidOffsetNumber);
+                                                                       InvalidBlockNumber, InvalidOffsetNumber);
                        /* gistbufferinginserttuples() released the buffer */
                }
                else
@@ -716,26 +716,26 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level,
 
                /*
                 * All the downlinks on the old root page are now on one of the child
-                * pages. Visit all the new child pages to memorize the parents of
-                * the grandchildren.
+                * pages. Visit all the new child pages to memorize the parents of the
+                * grandchildren.
                 */
                if (gfbb->rootlevel > 1)
                {
                        maxoff = PageGetMaxOffsetNumber(page);
                        for (off = FirstOffsetNumber; off <= maxoff; off++)
                        {
-                               ItemId iid = PageGetItemId(page, off);
-                               IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
+                               ItemId          iid = PageGetItemId(page, off);
+                               IndexTuple      idxtuple = (IndexTuple) PageGetItem(page, iid);
                                BlockNumber childblkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid));
-                               Buffer childbuf = ReadBuffer(buildstate->indexrel, childblkno);
+                               Buffer          childbuf = ReadBuffer(buildstate->indexrel, childblkno);
 
                                LockBuffer(childbuf, GIST_SHARE);
                                gistMemorizeAllDownlinks(buildstate, childbuf);
                                UnlockReleaseBuffer(childbuf);
 
                                /*
-                                * Also remember that the parent of the new child page is
-                                * the root block.
+                                * Also remember that the parent of the new child page is the
+                                * root block.
                                 */
                                gistMemorizeParent(buildstate, childblkno, GIST_ROOT_BLKNO);
                        }
@@ -789,8 +789,8 @@ gistbufferinginserttuples(GISTBuildState *buildstate, Buffer buffer, int level,
                         * Remember the parent of each new child page in our parent map.
                         * This assumes that the downlinks fit on the parent page. If the
                         * parent page is split, too, when we recurse up to insert the
-                        * downlinks, the recursive gistbufferinginserttuples() call
-                        * will update the map again.
+                        * downlinks, the recursive gistbufferinginserttuples() call will
+                        * update the map again.
                         */
                        if (level > 0)
                                gistMemorizeParent(buildstate,
@@ -879,8 +879,9 @@ gistBufferingFindCorrectParent(GISTBuildState *buildstate,
        if (parent == *parentblkno && *parentblkno != InvalidBlockNumber &&
                *downlinkoffnum != InvalidOffsetNumber && *downlinkoffnum <= maxoff)
        {
-               ItemId iid = PageGetItemId(page, *downlinkoffnum);
-               IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
+               ItemId          iid = PageGetItemId(page, *downlinkoffnum);
+               IndexTuple      idxtuple = (IndexTuple) PageGetItem(page, iid);
+
                if (ItemPointerGetBlockNumber(&(idxtuple->t_tid)) == childblkno)
                {
                        /* Still there */
@@ -889,16 +890,17 @@ gistBufferingFindCorrectParent(GISTBuildState *buildstate,
        }
 
        /*
-        * Downlink was not at the offset where it used to be. Scan the page
-        * to find it. During normal gist insertions, it might've moved to another
-        * page, to the right, but during a buffering build, we keep track of
-        * the parent of each page in the lookup table so we should always know
-        * what page it's on.
+        * Downlink was not at the offset where it used to be. Scan the page to
+        * find it. During normal gist insertions, it might've moved to another
+        * page, to the right, but during a buffering build, we keep track of the
+        * parent of each page in the lookup table so we should always know what
+        * page it's on.
         */
        for (off = FirstOffsetNumber; off <= maxoff; off = OffsetNumberNext(off))
        {
-               ItemId iid = PageGetItemId(page, off);
-               IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
+               ItemId          iid = PageGetItemId(page, off);
+               IndexTuple      idxtuple = (IndexTuple) PageGetItem(page, iid);
+
                if (ItemPointerGetBlockNumber(&(idxtuple->t_tid)) == childblkno)
                {
                        /* yes!!, found it */
@@ -908,7 +910,7 @@ gistBufferingFindCorrectParent(GISTBuildState *buildstate,
        }
 
        elog(ERROR, "failed to re-find parent for block %u", childblkno);
-       return InvalidBuffer; /* keep compiler quiet */
+       return InvalidBuffer;           /* keep compiler quiet */
 }
 
 /*
@@ -1129,7 +1131,7 @@ gistGetMaxLevel(Relation index)
 
 typedef struct
 {
-       BlockNumber childblkno; /* hash key */
+       BlockNumber childblkno;         /* hash key */
        BlockNumber parentblkno;
 } ParentMapEntry;
 
@@ -1156,9 +1158,9 @@ gistMemorizeParent(GISTBuildState *buildstate, BlockNumber child, BlockNumber pa
        bool            found;
 
        entry = (ParentMapEntry *) hash_search(buildstate->parentMap,
-                                                                                 (const void *) &child,
-                                                                                 HASH_ENTER,
-                                                                                 &found);
+                                                                                  (const void *) &child,
+                                                                                  HASH_ENTER,
+                                                                                  &found);
        entry->parentblkno = parent;
 }
 
@@ -1171,16 +1173,17 @@ gistMemorizeAllDownlinks(GISTBuildState *buildstate, Buffer parentbuf)
        OffsetNumber maxoff;
        OffsetNumber off;
        BlockNumber parentblkno = BufferGetBlockNumber(parentbuf);
-       Page page = BufferGetPage(parentbuf);
+       Page            page = BufferGetPage(parentbuf);
 
        Assert(!GistPageIsLeaf(page));
 
        maxoff = PageGetMaxOffsetNumber(page);
        for (off = FirstOffsetNumber; off <= maxoff; off++)
        {
-               ItemId iid = PageGetItemId(page, off);
-               IndexTuple idxtuple = (IndexTuple) PageGetItem(page, iid);
+               ItemId          iid = PageGetItemId(page, off);
+               IndexTuple      idxtuple = (IndexTuple) PageGetItem(page, iid);
                BlockNumber childblkno = ItemPointerGetBlockNumber(&(idxtuple->t_tid));
+
                gistMemorizeParent(buildstate, childblkno, parentblkno);
        }
 }
@@ -1193,9 +1196,9 @@ gistGetParent(GISTBuildState *buildstate, BlockNumber child)
 
        /* Find node buffer in hash table */
        entry = (ParentMapEntry *) hash_search(buildstate->parentMap,
-                                                                                 (const void *) &child,
-                                                                                 HASH_FIND,
-                                                                                 &found);
+                                                                                  (const void *) &child,
+                                                                                  HASH_FIND,
+                                                                                  &found);
        if (!found)
                elog(ERROR, "could not find parent of block %d in lookup table", child);
 
index 3feca263a7621ded24ba818c315067832d355bee..39aec856f9270a4751ef3e97ee469e5c14efbf5f 100644 (file)
@@ -528,7 +528,7 @@ typedef struct
        bool            isnull[INDEX_MAX_KEYS];
        GISTPageSplitInfo *splitinfo;
        GISTNodeBuffer *nodeBuffer;
-}      RelocationBufferInfo;
+} RelocationBufferInfo;
 
 /*
  * At page split, distribute tuples from the buffer of the split page to
index d97c64ede3f9326fb507824556c05686070f3e57..09e911d0981ef335baf99812b56fc3b468958a14 100644 (file)
@@ -244,7 +244,7 @@ typedef struct
        int                     index;
        /* Delta between penalties of entry insertion into different groups */
        double          delta;
-}      CommonEntry;
+} CommonEntry;
 
 /*
  * Context for g_box_consider_split. Contains information about currently
@@ -267,7 +267,7 @@ typedef struct
        int                     dim;                    /* axis of this split */
        double          range;                  /* width of general MBR projection to the
                                                                 * selected axis */
-}      ConsiderSplitContext;
+} ConsiderSplitContext;
 
 /*
  * Interval represents projection of box to axis.
@@ -276,7 +276,7 @@ typedef struct
 {
        double          lower,
                                upper;
-}      SplitInterval;
+} SplitInterval;
 
 /*
  * Interval comparison function by lower bound of the interval;
index bf139de8246f40b6373bcc49ad184573522dbb2c..c9fc9ba97f9bd3c78f6fe748e1c0a044474f5265 100644 (file)
@@ -124,7 +124,7 @@ gistbeginscan(PG_FUNCTION_ARGS)
        so->giststate = giststate;
        giststate->tempCxt = createTempGistContext();
        so->queue = NULL;
-       so->queueCxt = giststate->scanCxt;                      /* see gistrescan */
+       so->queueCxt = giststate->scanCxt;      /* see gistrescan */
 
        /* workspaces with size dependent on numberOfOrderBys: */
        so->tmpTreeItem = palloc(GSTIHDRSZ + sizeof(double) * scan->numberOfOrderBys);
index 2ec69a60d454818d0cf005d9fde66a8f88145d22..739fc597ceb70613fc72790ec1eae7ec6c54cf99 100644 (file)
@@ -581,8 +581,7 @@ gistSplitByKey(Relation r, Page page, IndexTuple *itup, int len, GISTSTATE *gist
                        if (v->spl_equiv == NULL)
                        {
                                /*
-                                * simple case: left and right keys for attno column are
-                                * equal
+                                * simple case: left and right keys for attno column are equal
                                 */
                                gistSplitByKey(r, page, itup, len, giststate, v, entryvec, attno + 1);
                        }
index 96dabdb48ae689d3fc551cb6a3d854f3c5e7e8a1..bbea5e4eaca275bd37be2a056921e050ff4f6901 100644 (file)
@@ -391,7 +391,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf,
        uint32          ovflbitno;
        int32           bitmappage,
                                bitmapbit;
-       Bucket          bucket PG_USED_FOR_ASSERTS_ONLY;
+       Bucket bucket PG_USED_FOR_ASSERTS_ONLY;
 
        /* Get information from the doomed page */
        _hash_checkpage(rel, ovflbuf, LH_OVERFLOW_PAGE);
index 2d81383ae8ad467427c6161694517856df26d055..9519e73e54c6c6315015a648ef0f958527315c80 100644 (file)
@@ -223,9 +223,9 @@ heapgetpage(HeapScanDesc scan, BlockNumber page)
        }
 
        /*
-        * Be sure to check for interrupts at least once per page.  Checks at
-        * higher code levels won't be able to stop a seqscan that encounters
-        * many pages' worth of consecutive dead tuples.
+        * Be sure to check for interrupts at least once per page.      Checks at
+        * higher code levels won't be able to stop a seqscan that encounters many
+        * pages' worth of consecutive dead tuples.
         */
        CHECK_FOR_INTERRUPTS();
 
@@ -997,8 +997,8 @@ relation_openrv(const RangeVar *relation, LOCKMODE lockmode)
  *
  *             Same as relation_openrv, but with an additional missing_ok argument
  *             allowing a NULL return rather than an error if the relation is not
- *      found.  (Note that some other causes, such as permissions problems,
- *      will still result in an ereport.)
+ *             found.  (Note that some other causes, such as permissions problems,
+ *             will still result in an ereport.)
  * ----------------
  */
 Relation
@@ -1105,7 +1105,7 @@ heap_openrv(const RangeVar *relation, LOCKMODE lockmode)
  *             by a RangeVar node
  *
  *             As above, but optionally return NULL instead of failing for
- *      relation-not-found.
+ *             relation-not-found.
  * ----------------
  */
 Relation
@@ -1588,10 +1588,10 @@ heap_hot_search_buffer(ItemPointer tid, Relation relation, Buffer buffer,
 
                /*
                 * When first_call is true (and thus, skip is initially false) we'll
-                * return the first tuple we find.  But on later passes, heapTuple
+                * return the first tuple we find.      But on later passes, heapTuple
                 * will initially be pointing to the tuple we returned last time.
-                * Returning it again would be incorrect (and would loop forever),
-                * so we skip it and return the next match we find.
+                * Returning it again would be incorrect (and would loop forever), so
+                * we skip it and return the next match we find.
                 */
                if (!skip)
                {
@@ -1651,7 +1651,7 @@ heap_hot_search(ItemPointer tid, Relation relation, Snapshot snapshot,
 {
        bool            result;
        Buffer          buffer;
-       HeapTupleData   heapTuple;
+       HeapTupleData heapTuple;
 
        buffer = ReadBuffer(relation, ItemPointerGetBlockNumber(tid));
        LockBuffer(buffer, BUFFER_LOCK_SHARE);
@@ -1885,14 +1885,14 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
        heaptup = heap_prepare_insert(relation, tup, xid, cid, options);
 
        /*
-        * We're about to do the actual insert -- but check for conflict first,
-        * to avoid possibly having to roll back work we've just done.
+        * We're about to do the actual insert -- but check for conflict first, to
+        * avoid possibly having to roll back work we've just done.
         *
-        * For a heap insert, we only need to check for table-level SSI locks.
-        * Our new tuple can't possibly conflict with existing tuple locks, and
-        * heap page locks are only consolidated versions of tuple locks; they do
-        * not lock "gaps" as index page locks do.  So we don't need to identify
-        * buffer before making the call.
+        * For a heap insert, we only need to check for table-level SSI locks. Our
+        * new tuple can't possibly conflict with existing tuple locks, and heap
+        * page locks are only consolidated versions of tuple locks; they do not
+        * lock "gaps" as index page locks do.  So we don't need to identify a
+        * buffer before making the call.
         */
        CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
 
@@ -2123,11 +2123,11 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
         * We're about to do the actual inserts -- but check for conflict first,
         * to avoid possibly having to roll back work we've just done.
         *
-        * For a heap insert, we only need to check for table-level SSI locks.
-        * Our new tuple can't possibly conflict with existing tuple locks, and
-        * heap page locks are only consolidated versions of tuple locks; they do
-        * not lock "gaps" as index page locks do.  So we don't need to identify
-        * buffer before making the call.
+        * For a heap insert, we only need to check for table-level SSI locks. Our
+        * new tuple can't possibly conflict with existing tuple locks, and heap
+        * page locks are only consolidated versions of tuple locks; they do not
+        * lock "gaps" as index page locks do.  So we don't need to identify a
+        * buffer before making the call.
         */
        CheckForSerializableConflictIn(relation, NULL, InvalidBuffer);
 
@@ -2137,12 +2137,11 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
                Buffer          buffer;
                Buffer          vmbuffer = InvalidBuffer;
                bool            all_visible_cleared = false;
-               int nthispage;
+               int                     nthispage;
 
                /*
-                * Find buffer where at least the next tuple will fit.  If the page
-                * is all-visible, this will also pin the requisite visibility map
-                * page.
+                * Find buffer where at least the next tuple will fit.  If the page is
+                * all-visible, this will also pin the requisite visibility map page.
                 */
                buffer = RelationGetBufferForTuple(relation, heaptuples[ndone]->t_len,
                                                                                   InvalidBuffer, options, bistate,
@@ -2358,7 +2357,7 @@ heap_delete(Relation relation, ItemPointer tid,
        ItemId          lp;
        HeapTupleData tp;
        Page            page;
-       BlockNumber     block;
+       BlockNumber block;
        Buffer          buffer;
        Buffer          vmbuffer = InvalidBuffer;
        bool            have_tuple_lock = false;
@@ -2372,10 +2371,10 @@ heap_delete(Relation relation, ItemPointer tid,
        page = BufferGetPage(buffer);
 
        /*
-        * Before locking the buffer, pin the visibility map page if it appears
-        * to be necessary.  Since we haven't got the lock yet, someone else might
-        * be in the middle of changing this, so we'll need to recheck after
-        * we have the lock.
+        * Before locking the buffer, pin the visibility map page if it appears to
+        * be necessary.  Since we haven't got the lock yet, someone else might be
+        * in the middle of changing this, so we'll need to recheck after we have
+        * the lock.
         */
        if (PageIsAllVisible(page))
                visibilitymap_pin(relation, block, &vmbuffer);
@@ -2717,7 +2716,7 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
        HeapTupleData oldtup;
        HeapTuple       heaptup;
        Page            page;
-       BlockNumber     block;
+       BlockNumber block;
        Buffer          buffer,
                                newbuf,
                                vmbuffer = InvalidBuffer,
@@ -2753,10 +2752,10 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
        page = BufferGetPage(buffer);
 
        /*
-        * Before locking the buffer, pin the visibility map page if it appears
-        * to be necessary.  Since we haven't got the lock yet, someone else might
-        * be in the middle of changing this, so we'll need to recheck after
-        * we have the lock.
+        * Before locking the buffer, pin the visibility map page if it appears to
+        * be necessary.  Since we haven't got the lock yet, someone else might be
+        * in the middle of changing this, so we'll need to recheck after we have
+        * the lock.
         */
        if (PageIsAllVisible(page))
                visibilitymap_pin(relation, block, &vmbuffer);
@@ -2900,11 +2899,11 @@ l2:
 
        /*
         * If we didn't pin the visibility map page and the page has become all
-        * visible while we were busy locking the buffer, or during some subsequent
-        * window during which we had it unlocked, we'll have to unlock and
-        * re-lock, to avoid holding the buffer lock across an I/O.  That's a bit
-        * unfortunate, esepecially since we'll now have to recheck whether the
-        * tuple has been locked or updated under us, but hopefully it won't
+        * visible while we were busy locking the buffer, or during some
+        * subsequent window during which we had it unlocked, we'll have to unlock
+        * and re-lock, to avoid holding the buffer lock across an I/O.  That's a
+        * bit unfortunate, esepecially since we'll now have to recheck whether
+        * the tuple has been locked or updated under us, but hopefully it won't
         * happen very often.
         */
        if (vmbuffer == InvalidBuffer && PageIsAllVisible(page))
@@ -3196,11 +3195,11 @@ l2:
 
        /*
         * Mark old tuple for invalidation from system caches at next command
-        * boundary, and mark the new tuple for invalidation in case we abort.
-        * We have to do this before releasing the buffer because oldtup is in
-        * the buffer.  (heaptup is all in local memory, but it's necessary to
-        * process both tuple versions in one call to inval.c so we can avoid
-        * redundant sinval messages.)
+        * boundary, and mark the new tuple for invalidation in case we abort. We
+        * have to do this before releasing the buffer because oldtup is in the
+        * buffer.      (heaptup is all in local memory, but it's necessary to process
+        * both tuple versions in one call to inval.c so we can avoid redundant
+        * sinval messages.)
         */
        CacheInvalidateHeapTuple(relation, &oldtup, heaptup);
 
@@ -4069,7 +4068,7 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid)
  */
 bool
 heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
-                                 Buffer buf)
+                                               Buffer buf)
 {
        TransactionId xid;
 
@@ -4368,9 +4367,9 @@ log_heap_freeze(Relation reln, Buffer buffer,
 }
 
 /*
- * Perform XLogInsert for a heap-visible operation.     'block' is the block
+ * Perform XLogInsert for a heap-visible operation.  'block' is the block
  * being marked all-visible, and vm_buffer is the buffer containing the
- * corresponding visibility map block.  Both should have already been modified
+ * corresponding visibility map block. Both should have already been modified
  * and dirtied.
  */
 XLogRecPtr
@@ -4705,7 +4704,7 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
        Page            page;
 
        /*
-        * Read the heap page, if it still exists.  If the heap file has been
+        * Read the heap page, if it still exists.      If the heap file has been
         * dropped or truncated later in recovery, this might fail.  In that case,
         * there's no point in doing anything further, since the visibility map
         * will have to be cleared out at the same time.
@@ -4731,17 +4730,16 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
        LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
 
        /*
-        * We don't bump the LSN of the heap page when setting the visibility
-        * map bit, because that would generate an unworkable volume of
-        * full-page writes.  This exposes us to torn page hazards, but since
-        * we're not inspecting the existing page contents in any way, we
-        * don't care.
+        * We don't bump the LSN of the heap page when setting the visibility map
+        * bit, because that would generate an unworkable volume of full-page
+        * writes.      This exposes us to torn page hazards, but since we're not
+        * inspecting the existing page contents in any way, we don't care.
         *
-        * However, all operations that clear the visibility map bit *do* bump
-        * the LSN, and those operations will only be replayed if the XLOG LSN
-        * follows the page LSN.  Thus, if the page LSN has advanced past our
-        * XLOG record's LSN, we mustn't mark the page all-visible, because
-        * the subsequent update won't be replayed to clear the flag.
+        * However, all operations that clear the visibility map bit *do* bump the
+        * LSN, and those operations will only be replayed if the XLOG LSN follows
+        * the page LSN.  Thus, if the page LSN has advanced past our XLOG
+        * record's LSN, we mustn't mark the page all-visible, because the
+        * subsequent update won't be replayed to clear the flag.
         */
        if (!XLByteLE(lsn, PageGetLSN(page)))
        {
@@ -4772,10 +4770,10 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
                 * Don't set the bit if replay has already passed this point.
                 *
                 * It might be safe to do this unconditionally; if replay has past
-                * this point, we'll replay at least as far this time as we did before,
-                * and if this bit needs to be cleared, the record responsible for
-                * doing so should be again replayed, and clear it.  For right now,
-                * out of an abundance of conservatism, we use the same test here
+                * this point, we'll replay at least as far this time as we did
+                * before, and if this bit needs to be cleared, the record responsible
+                * for doing so should be again replayed, and clear it.  For right
+                * now, out of an abundance of conservatism, we use the same test here
                 * we did for the heap page; if this results in a dropped bit, no real
                 * harm is done; and the next VACUUM will fix it.
                 */
@@ -5183,7 +5181,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
        if (xlrec->all_visible_cleared)
        {
                Relation        reln = CreateFakeRelcacheEntry(xlrec->target.node);
-               BlockNumber     block = ItemPointerGetBlockNumber(&xlrec->target.tid);
+               BlockNumber block = ItemPointerGetBlockNumber(&xlrec->target.tid);
                Buffer          vmbuffer = InvalidBuffer;
 
                visibilitymap_pin(reln, block, &vmbuffer);
@@ -5267,7 +5265,7 @@ newt:;
        if (xlrec->new_all_visible_cleared)
        {
                Relation        reln = CreateFakeRelcacheEntry(xlrec->target.node);
-               BlockNumber     block = ItemPointerGetBlockNumber(&xlrec->newtid);
+               BlockNumber block = ItemPointerGetBlockNumber(&xlrec->newtid);
                Buffer          vmbuffer = InvalidBuffer;
 
                visibilitymap_pin(reln, block, &vmbuffer);
@@ -5690,7 +5688,7 @@ heap2_desc(StringInfo buf, uint8 xl_info, char *rec)
                else
                        appendStringInfo(buf, "multi-insert: ");
                appendStringInfo(buf, "rel %u/%u/%u; blk %u; %d tuples",
-                                                xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode,
+                               xlrec->node.spcNode, xlrec->node.dbNode, xlrec->node.relNode,
                                                 xlrec->blkno, xlrec->ntuples);
        }
        else
index 30ef1bf7e04efc8571a618515e234ccfdf4766bf..19a34923c7a74ddbf09d073ffe09f7cb5893cf80 100644 (file)
@@ -109,8 +109,8 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
                                         BlockNumber block1, BlockNumber block2,
                                         Buffer *vmbuffer1, Buffer *vmbuffer2)
 {
-       bool    need_to_pin_buffer1;
-       bool    need_to_pin_buffer2;
+       bool            need_to_pin_buffer1;
+       bool            need_to_pin_buffer2;
 
        Assert(BufferIsValid(buffer1));
        Assert(buffer2 == InvalidBuffer || buffer1 <= buffer2);
@@ -145,7 +145,7 @@ GetVisibilityMapPins(Relation relation, Buffer buffer1, Buffer buffer2,
                /*
                 * If there are two buffers involved and we pinned just one of them,
                 * it's possible that the second one became all-visible while we were
-                * busy pinning the first one.  If it looks like that's a possible
+                * busy pinning the first one.  If it looks like that's a possible
                 * scenario, we'll need to make a second pass through this loop.
                 */
                if (buffer2 == InvalidBuffer || buffer1 == buffer2
@@ -302,11 +302,11 @@ RelationGetBufferForTuple(Relation relation, Size len,
                 * block if one was given, taking suitable care with lock ordering and
                 * the possibility they are the same block.
                 *
-                * If the page-level all-visible flag is set, caller will need to clear
-                * both that and the corresponding visibility map bit.  However, by the
-                * time we return, we'll have x-locked the buffer, and we don't want to
-                * do any I/O while in that state.  So we check the bit here before
-                * taking the lock, and pin the page if it appears necessary.
+                * If the page-level all-visible flag is set, caller will need to
+                * clear both that and the corresponding visibility map bit.  However,
+                * by the time we return, we'll have x-locked the buffer, and we don't
+                * want to do any I/O while in that state.      So we check the bit here
+                * before taking the lock, and pin the page if it appears necessary.
                 * Checking without the lock creates a risk of getting the wrong
                 * answer, so we'll have to recheck after acquiring the lock.
                 */
@@ -347,23 +347,24 @@ RelationGetBufferForTuple(Relation relation, Size len,
 
                /*
                 * We now have the target page (and the other buffer, if any) pinned
-                * and locked.  However, since our initial PageIsAllVisible checks
-                * were performed before acquiring the lock, the results might now
-                * be out of date, either for the selected victim buffer, or for the
-                * other buffer passed by the caller.  In that case, we'll need to give
-                * up our locks, go get the pin(s) we failed to get earlier, and
+                * and locked.  However, since our initial PageIsAllVisible checks
+                * were performed before acquiring the lock, the results might now be
+                * out of date, either for the selected victim buffer, or for the
+                * other buffer passed by the caller.  In that case, we'll need to
+                * give up our locks, go get the pin(s) we failed to get earlier, and
                 * re-lock.  That's pretty painful, but hopefully shouldn't happen
                 * often.
                 *
-                * Note that there's a small possibility that we didn't pin the
-                * page above but still have the correct page pinned anyway, either
-                * because we've already made a previous pass through this loop, or
-                * because caller passed us the right page anyway.
+                * Note that there's a small possibility that we didn't pin the page
+                * above but still have the correct page pinned anyway, either because
+                * we've already made a previous pass through this loop, or because
+                * caller passed us the right page anyway.
                 *
                 * Note also that it's possible that by the time we get the pin and
                 * retake the buffer locks, the visibility map bit will have been
-                * cleared by some other backend anyway.  In that case, we'll have done
-                * a bit of extra work for no gain, but there's no real harm done.
+                * cleared by some other backend anyway.  In that case, we'll have
+                * done a bit of extra work for no gain, but there's no real harm
+                * done.
                 */
                if (otherBuffer == InvalidBuffer || buffer <= otherBuffer)
                        GetVisibilityMapPins(relation, buffer, otherBuffer,
index 28b5a20ae706156eb80a898fd106f58d0e6d2c27..050f048a9b0458d9c7469d4ec30210c90a090abe 100644 (file)
@@ -75,7 +75,7 @@ do { \
 
 static void toast_delete_datum(Relation rel, Datum value);
 static Datum toast_save_datum(Relation rel, Datum value,
-                                struct varlena *oldexternal, int options);
+                                struct varlena * oldexternal, int options);
 static bool toastrel_valueid_exists(Relation toastrel, Oid valueid);
 static bool toastid_valueid_exists(Oid toastrelid, Oid valueid);
 static struct varlena *toast_fetch_datum(struct varlena * attr);
@@ -1233,7 +1233,7 @@ toast_compress_datum(Datum value)
  */
 static Datum
 toast_save_datum(Relation rel, Datum value,
-                                struct varlena *oldexternal, int options)
+                                struct varlena * oldexternal, int options)
 {
        Relation        toastrel;
        Relation        toastidx;
@@ -1353,7 +1353,7 @@ toast_save_datum(Relation rel, Datum value,
                                 * those versions could easily reference the same toast value.
                                 * When we copy the second or later version of such a row,
                                 * reusing the OID will mean we select an OID that's already
-                                * in the new toast table.  Check for that, and if so, just
+                                * in the new toast table.      Check for that, and if so, just
                                 * fall through without writing the data again.
                                 *
                                 * While annoying and ugly-looking, this is a good thing
index 9152c7d151150573fbdbd5bb8d7a8125da18adf6..eb5625906f12904b3248b9b0364998f03a625318 100644 (file)
@@ -16,7 +16,7 @@
  *             visibilitymap_pin_ok - check whether correct map page is already pinned
  *             visibilitymap_set        - set a bit in a previously pinned page
  *             visibilitymap_test       - test if a bit is set
- *             visibilitymap_count      - count number of bits set in visibility map
+ *             visibilitymap_count  - count number of bits set in visibility map
  *             visibilitymap_truncate  - truncate the visibility map
  *
  * NOTES
@@ -27,7 +27,7 @@
  * the sense that we make sure that whenever a bit is set, we know the
  * condition is true, but if a bit is not set, it might or might not be true.
  *
- * Clearing a visibility map bit is not separately WAL-logged.  The callers
+ * Clearing a visibility map bit is not separately WAL-logged. The callers
  * must make sure that whenever a bit is cleared, the bit is cleared on WAL
  * replay of the updating operation as well.
  *
@@ -36,9 +36,9 @@
  * it may still be the case that every tuple on the page is visible to all
  * transactions; we just don't know that for certain.  The difficulty is that
  * there are two bits which are typically set together: the PD_ALL_VISIBLE bit
- * on the page itself, and the visibility map bit.  If a crash occurs after the
+ * on the page itself, and the visibility map bit.     If a crash occurs after the
  * visibility map page makes it to disk and before the updated heap page makes
- * it to disk, redo must set the bit on the heap page.  Otherwise, the next
+ * it to disk, redo must set the bit on the heap page. Otherwise, the next
  * insert, update, or delete on the heap page will fail to realize that the
  * visibility map bit must be cleared, possibly causing index-only scans to
  * return wrong answers.
  * the buffer lock over any I/O that may be required to read in the visibility
  * map page.  To avoid this, we examine the heap page before locking it;
  * if the page-level PD_ALL_VISIBLE bit is set, we pin the visibility map
- * bit.  Then, we lock the buffer.  But this creates a race condition: there
+ * bit.  Then, we lock the buffer.     But this creates a race condition: there
  * is a possibility that in the time it takes to lock the buffer, the
  * PD_ALL_VISIBLE bit gets set.  If that happens, we have to unlock the
- * buffer, pin the visibility map page, and relock the buffer.  This shouldn't
+ * buffer, pin the visibility map page, and relock the buffer. This shouldn't
  * happen often, because only VACUUM currently sets visibility map bits,
  * and the race will only occur if VACUUM processes a given page at almost
  * exactly the same time that someone tries to further modify it.
@@ -227,9 +227,9 @@ visibilitymap_pin_ok(BlockNumber heapBlk, Buffer buf)
  *     visibilitymap_set - set a bit on a previously pinned page
  *
  * recptr is the LSN of the XLOG record we're replaying, if we're in recovery,
- * or InvalidXLogRecPtr in normal running.  The page LSN is advanced to the
+ * or InvalidXLogRecPtr in normal running.     The page LSN is advanced to the
  * one provided; in normal running, we generate a new XLOG record and set the
- * page LSN to that value.  cutoff_xid is the largest xmin on the page being
+ * page LSN to that value.     cutoff_xid is the largest xmin on the page being
  * marked all-visible; it is needed for Hot Standby, and can be
  * InvalidTransactionId if the page contains no tuples.
  *
@@ -295,10 +295,10 @@ visibilitymap_set(Relation rel, BlockNumber heapBlk, XLogRecPtr recptr,
  * releasing *buf after it's done testing and setting bits.
  *
  * NOTE: This function is typically called without a lock on the heap page,
- * so somebody else could change the bit just after we look at it.  In fact,
+ * so somebody else could change the bit just after we look at it.     In fact,
  * since we don't lock the visibility map page either, it's even possible that
  * someone else could have changed the bit just before we look at it, but yet
- * we might see the old value.  It is the caller's responsibility to deal with
+ * we might see the old value. It is the caller's responsibility to deal with
  * all concurrency issues!
  */
 bool
@@ -344,7 +344,7 @@ visibilitymap_test(Relation rel, BlockNumber heapBlk, Buffer *buf)
 }
 
 /*
- *     visibilitymap_count      - count number of bits set in visibility map
+ *     visibilitymap_count  - count number of bits set in visibility map
  *
  * Note: we ignore the possibility of race conditions when the table is being
  * extended concurrently with the call.  New pages added to the table aren't
@@ -356,16 +356,16 @@ visibilitymap_count(Relation rel)
        BlockNumber result = 0;
        BlockNumber mapBlock;
 
-       for (mapBlock = 0; ; mapBlock++)
+       for (mapBlock = 0;; mapBlock++)
        {
                Buffer          mapBuffer;
                unsigned char *map;
                int                     i;
 
                /*
-                * Read till we fall off the end of the map.  We assume that any
-                * extra bytes in the last page are zeroed, so we don't bother
-                * excluding them from the count.
+                * Read till we fall off the end of the map.  We assume that any extra
+                * bytes in the last page are zeroed, so we don't bother excluding
+                * them from the count.
                 */
                mapBuffer = vm_readbuf(rel, mapBlock, false);
                if (!BufferIsValid(mapBuffer))
@@ -496,11 +496,11 @@ vm_readbuf(Relation rel, BlockNumber blkno, bool extend)
        Buffer          buf;
 
        /*
-        * We might not have opened the relation at the smgr level yet, or we might
-        * have been forced to close it by a sinval message.  The code below won't
-        * necessarily notice relation extension immediately when extend = false,
-        * so we rely on sinval messages to ensure that our ideas about the size of
-        * the map aren't too far out of date.
+        * We might not have opened the relation at the smgr level yet, or we
+        * might have been forced to close it by a sinval message.      The code below
+        * won't necessarily notice relation extension immediately when extend =
+        * false, so we rely on sinval messages to ensure that our ideas about the
+        * size of the map aren't too far out of date.
         */
        RelationOpenSmgr(rel);
 
index d54b669bf359d5f005d8b5131fe8e91ffa04dd82..26fd9b6e11438eb67b1cb234a9cb2ebbb540cc97 100644 (file)
@@ -93,7 +93,7 @@ RelationGetIndexScan(Relation indexRelation, int nkeys, int norderbys)
        else
                scan->orderByData = NULL;
 
-       scan->xs_want_itup = false;                     /* may be set later */
+       scan->xs_want_itup = false; /* may be set later */
 
        /*
         * During recovery we ignore killed tuples and don't bother to kill them
index 16ac4e1b9ffcae0a7d902297a1288a82f02295fb..d64df319c5cac37ac71fd3cf436c6f635cd4bb1e 100644 (file)
@@ -435,7 +435,7 @@ index_restrpos(IndexScanDesc scan)
 ItemPointer
 index_getnext_tid(IndexScanDesc scan, ScanDirection direction)
 {
-       FmgrInfo        *procedure;
+       FmgrInfo   *procedure;
        bool            found;
 
        SCAN_CHECKS;
@@ -495,7 +495,7 @@ index_getnext_tid(IndexScanDesc scan, ScanDirection direction)
 HeapTuple
 index_fetch_heap(IndexScanDesc scan)
 {
-       ItemPointer     tid = &scan->xs_ctup.t_self;
+       ItemPointer tid = &scan->xs_ctup.t_self;
        bool            all_dead = false;
        bool            got_heap_tuple;
 
@@ -530,8 +530,8 @@ index_fetch_heap(IndexScanDesc scan)
        if (got_heap_tuple)
        {
                /*
-                * Only in a non-MVCC snapshot can more than one member of the
-                * HOT chain be visible.
+                * Only in a non-MVCC snapshot can more than one member of the HOT
+                * chain be visible.
                 */
                scan->xs_continue_hot = !IsMVCCSnapshot(scan->xs_snapshot);
                pgstat_count_heap_fetch(scan->indexRelation);
@@ -544,7 +544,7 @@ index_fetch_heap(IndexScanDesc scan)
        /*
         * If we scanned a whole HOT chain and found only dead tuples, tell index
         * AM to kill its entry for that TID (this will take effect in the next
-        * amgettuple call, in index_getnext_tid).  We do not do this when in
+        * amgettuple call, in index_getnext_tid).      We do not do this when in
         * recovery because it may violate MVCC to do so.  See comments in
         * RelationGetIndexScan().
         */
index fedde934a389921e5c01deb672de79f22d9dd05b..d610bef79830cb35bd5cc89c48eabd6aa7433d4c 100644 (file)
@@ -82,7 +82,7 @@ btint2fastcmp(Datum x, Datum y, SortSupport ssup)
 Datum
 btint2sortsupport(PG_FUNCTION_ARGS)
 {
-       SortSupport     ssup = (SortSupport) PG_GETARG_POINTER(0);
+       SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
 
        ssup->comparator = btint2fastcmp;
        PG_RETURN_VOID();
@@ -119,7 +119,7 @@ btint4fastcmp(Datum x, Datum y, SortSupport ssup)
 Datum
 btint4sortsupport(PG_FUNCTION_ARGS)
 {
-       SortSupport     ssup = (SortSupport) PG_GETARG_POINTER(0);
+       SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
 
        ssup->comparator = btint4fastcmp;
        PG_RETURN_VOID();
@@ -156,7 +156,7 @@ btint8fastcmp(Datum x, Datum y, SortSupport ssup)
 Datum
 btint8sortsupport(PG_FUNCTION_ARGS)
 {
-       SortSupport     ssup = (SortSupport) PG_GETARG_POINTER(0);
+       SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
 
        ssup->comparator = btint8fastcmp;
        PG_RETURN_VOID();
@@ -277,7 +277,7 @@ btoidfastcmp(Datum x, Datum y, SortSupport ssup)
 Datum
 btoidsortsupport(PG_FUNCTION_ARGS)
 {
-       SortSupport     ssup = (SortSupport) PG_GETARG_POINTER(0);
+       SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
 
        ssup->comparator = btoidfastcmp;
        PG_RETURN_VOID();
@@ -338,7 +338,7 @@ btnamefastcmp(Datum x, Datum y, SortSupport ssup)
 Datum
 btnamesortsupport(PG_FUNCTION_ARGS)
 {
-       SortSupport     ssup = (SortSupport) PG_GETARG_POINTER(0);
+       SortSupport ssup = (SortSupport) PG_GETARG_POINTER(0);
 
        ssup->comparator = btnamefastcmp;
        PG_RETURN_VOID();
index e6dec618c7781b7a454b899954be345593864f79..016ce2283c2655ec3e25178ebe6d40ad7bf9c02a 100644 (file)
@@ -1362,7 +1362,7 @@ _bt_pagedel(Relation rel, Buffer buf, BTStack stack)
         * we're in VACUUM and would not otherwise have an XID.  Having already
         * updated links to the target, ReadNewTransactionId() suffices as an
         * upper bound.  Any scan having retained a now-stale link is advertising
-        * in its PGXACT an xmin less than or equal to the value we read here.  It
+        * in its PGXACT an xmin less than or equal to the value we read here.  It
         * will continue to do so, holding back RecentGlobalXmin, for the duration
         * of that scan.
         */
index 184fc3bb79b234a5423a06af253cbe836ab4c14d..41d06edb15487130104a19457b96dacd7ccc0331 100644 (file)
@@ -433,7 +433,7 @@ btbeginscan(PG_FUNCTION_ARGS)
 
        /*
         * We don't know yet whether the scan will be index-only, so we do not
-        * allocate the tuple workspace arrays until btrescan.  However, we set up
+        * allocate the tuple workspace arrays until btrescan.  However, we set up
         * scan->xs_itupdesc whether we'll need it or not, since that's so cheap.
         */
        so->currTuples = so->markTuples = NULL;
@@ -478,7 +478,7 @@ btrescan(PG_FUNCTION_ARGS)
 
        /*
         * Allocate tuple workspace arrays, if needed for an index-only scan and
-        * not already done in a previous rescan call.  To save on palloc
+        * not already done in a previous rescan call.  To save on palloc
         * overhead, both workspaces are allocated as one palloc block; only this
         * function and btendscan know that.
         *
index b701c3f819cb0dd76b9ef75fadd6086796198b37..e0c952368b9c1c72712f04334326a9c9de40ec68 100644 (file)
@@ -564,11 +564,11 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
                                        ScanKeyEntryInitialize(chosen,
                                                                                   (SK_SEARCHNOTNULL | SK_ISNULL |
                                                                                        (impliesNN->sk_flags &
-                                                                                        (SK_BT_DESC | SK_BT_NULLS_FIRST))),
+                                                                                 (SK_BT_DESC | SK_BT_NULLS_FIRST))),
                                                                                   curattr,
-                                                                                  ((impliesNN->sk_flags & SK_BT_NULLS_FIRST) ?
-                                                                                       BTGreaterStrategyNumber :
-                                                                                       BTLessStrategyNumber),
+                                                                ((impliesNN->sk_flags & SK_BT_NULLS_FIRST) ?
+                                                                 BTGreaterStrategyNumber :
+                                                                 BTLessStrategyNumber),
                                                                                   InvalidOid,
                                                                                   InvalidOid,
                                                                                   InvalidOid,
index f79ce552b6206005b4a24ab92ad06183d603c146..33ad8915f5a7982e6a7cef3216f5bffeb1e90894 100644 (file)
@@ -37,10 +37,10 @@ typedef struct BTSortArrayContext
 static Datum _bt_find_extreme_element(IndexScanDesc scan, ScanKey skey,
                                                 StrategyNumber strat,
                                                 Datum *elems, int nelems);
-static int     _bt_sort_array_elements(IndexScanDesc scan, ScanKey skey,
+static int _bt_sort_array_elements(IndexScanDesc scan, ScanKey skey,
                                                bool reverse,
                                                Datum *elems, int nelems);
-static int _bt_compare_array_elements(const void *a, const void *b, void *arg);
+static int     _bt_compare_array_elements(const void *a, const void *b, void *arg);
 static bool _bt_compare_scankey_args(IndexScanDesc scan, ScanKey op,
                                                 ScanKey leftarg, ScanKey rightarg,
                                                 bool *result);
@@ -227,8 +227,8 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
        }
 
        /*
-        * Make a scan-lifespan context to hold array-associated data, or reset
-        * it if we already have one from a previous rescan cycle.
+        * Make a scan-lifespan context to hold array-associated data, or reset it
+        * if we already have one from a previous rescan cycle.
         */
        if (so->arrayContext == NULL)
                so->arrayContext = AllocSetContextCreate(CurrentMemoryContext,
@@ -269,7 +269,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
                        continue;
 
                /*
-                * First, deconstruct the array into elements.  Anything allocated
+                * First, deconstruct the array into elements.  Anything allocated
                 * here (including a possibly detoasted array value) is in the
                 * workspace context.
                 */
@@ -283,7 +283,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
                                                  &elem_values, &elem_nulls, &num_elems);
 
                /*
-                * Compress out any null elements.  We can ignore them since we assume
+                * Compress out any null elements.      We can ignore them since we assume
                 * all btree operators are strict.
                 */
                num_nonnulls = 0;
@@ -338,7 +338,7 @@ _bt_preprocess_array_keys(IndexScanDesc scan)
                 * successive primitive indexscans produce data in index order.
                 */
                num_elems = _bt_sort_array_elements(scan, cur,
-                                                                                       (indoption[cur->sk_attno - 1] & INDOPTION_DESC) != 0,
+                                               (indoption[cur->sk_attno - 1] & INDOPTION_DESC) != 0,
                                                                                        elem_values, num_nonnulls);
 
                /*
@@ -387,9 +387,10 @@ _bt_find_extreme_element(IndexScanDesc scan, ScanKey skey,
        /*
         * Look up the appropriate comparison operator in the opfamily.
         *
-        * Note: it's possible that this would fail, if the opfamily is incomplete,
-        * but it seems quite unlikely that an opfamily would omit non-cross-type
-        * comparison operators for any datatype that it supports at all.
+        * Note: it's possible that this would fail, if the opfamily is
+        * incomplete, but it seems quite unlikely that an opfamily would omit
+        * non-cross-type comparison operators for any datatype that it supports
+        * at all.
         */
        cmp_op = get_opfamily_member(rel->rd_opfamily[skey->sk_attno - 1],
                                                                 elemtype,
@@ -455,9 +456,10 @@ _bt_sort_array_elements(IndexScanDesc scan, ScanKey skey,
        /*
         * Look up the appropriate comparison function in the opfamily.
         *
-        * Note: it's possible that this would fail, if the opfamily is incomplete,
-        * but it seems quite unlikely that an opfamily would omit non-cross-type
-        * support functions for any datatype that it supports at all.
+        * Note: it's possible that this would fail, if the opfamily is
+        * incomplete, but it seems quite unlikely that an opfamily would omit
+        * non-cross-type support functions for any datatype that it supports at
+        * all.
         */
        cmp_proc = get_opfamily_proc(rel->rd_opfamily[skey->sk_attno - 1],
                                                                 elemtype,
@@ -515,7 +517,7 @@ _bt_compare_array_elements(const void *a, const void *b, void *arg)
  * _bt_start_array_keys() -- Initialize array keys at start of a scan
  *
  * Set up the cur_elem counters and fill in the first sk_argument value for
- * each array scankey.  We can't do this until we know the scan direction.
+ * each array scankey. We can't do this until we know the scan direction.
  */
 void
 _bt_start_array_keys(IndexScanDesc scan, ScanDirection dir)
@@ -609,8 +611,8 @@ _bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir)
  * so that the index sorts in the desired direction.
  *
  * One key purpose of this routine is to discover which scan keys must be
- * satisfied to continue the scan.  It also attempts to eliminate redundant
- * keys and detect contradictory keys.  (If the index opfamily provides
+ * satisfied to continue the scan.     It also attempts to eliminate redundant
+ * keys and detect contradictory keys. (If the index opfamily provides
  * incomplete sets of cross-type operators, we may fail to detect redundant
  * or contradictory keys, but we can survive that.)
  *
@@ -676,7 +678,7 @@ _bt_advance_array_keys(IndexScanDesc scan, ScanDirection dir)
  * Note: the reason we have to copy the preprocessed scan keys into private
  * storage is that we are modifying the array based on comparisons of the
  * key argument values, which could change on a rescan or after moving to
- * new elements of array keys.  Therefore we can't overwrite the source data.
+ * new elements of array keys. Therefore we can't overwrite the source data.
  */
 void
 _bt_preprocess_keys(IndexScanDesc scan)
@@ -781,8 +783,8 @@ _bt_preprocess_keys(IndexScanDesc scan)
                         * set qual_ok to false and abandon further processing.
                         *
                         * We also have to deal with the case of "key IS NULL", which is
-                        * unsatisfiable in combination with any other index condition.
-                        * By the time we get here, that's been classified as an equality
+                        * unsatisfiable in combination with any other index condition. By
+                        * the time we get here, that's been classified as an equality
                         * check, and we've rejected any combination of it with a regular
                         * equality condition; but not with other types of conditions.
                         */
@@ -1421,12 +1423,12 @@ _bt_checkkeys(IndexScanDesc scan,
                                /*
                                 * Since NULLs are sorted before non-NULLs, we know we have
                                 * reached the lower limit of the range of values for this
-                                * index attr.  On a backward scan, we can stop if this qual
+                                * index attr.  On a backward scan, we can stop if this qual
                                 * is one of the "must match" subset.  We can stop regardless
                                 * of whether the qual is > or <, so long as it's required,
-                                * because it's not possible for any future tuples to pass.
-                                * On a forward scan, however, we must keep going, because we
-                                * may have initially positioned to the start of the index.
+                                * because it's not possible for any future tuples to pass. On
+                                * a forward scan, however, we must keep going, because we may
+                                * have initially positioned to the start of the index.
                                 */
                                if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
                                        ScanDirectionIsBackward(dir))
@@ -1437,11 +1439,11 @@ _bt_checkkeys(IndexScanDesc scan,
                                /*
                                 * Since NULLs are sorted after non-NULLs, we know we have
                                 * reached the upper limit of the range of values for this
-                                * index attr.  On a forward scan, we can stop if this qual is
-                                * one of the "must match" subset.  We can stop regardless of
+                                * index attr.  On a forward scan, we can stop if this qual is
+                                * one of the "must match" subset.      We can stop regardless of
                                 * whether the qual is > or <, so long as it's required,
-                                * because it's not possible for any future tuples to pass.
-                                * On a backward scan, however, we must keep going, because we
+                                * because it's not possible for any future tuples to pass. On
+                                * a backward scan, however, we must keep going, because we
                                 * may have initially positioned to the end of the index.
                                 */
                                if ((key->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
@@ -1532,12 +1534,12 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
                                /*
                                 * Since NULLs are sorted before non-NULLs, we know we have
                                 * reached the lower limit of the range of values for this
-                                * index attr.  On a backward scan, we can stop if this qual
+                                * index attr.  On a backward scan, we can stop if this qual
                                 * is one of the "must match" subset.  We can stop regardless
                                 * of whether the qual is > or <, so long as it's required,
-                                * because it's not possible for any future tuples to pass.
-                                * On a forward scan, however, we must keep going, because we
-                                * may have initially positioned to the start of the index.
+                                * because it's not possible for any future tuples to pass. On
+                                * a forward scan, however, we must keep going, because we may
+                                * have initially positioned to the start of the index.
                                 */
                                if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
                                        ScanDirectionIsBackward(dir))
@@ -1548,11 +1550,11 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
                                /*
                                 * Since NULLs are sorted after non-NULLs, we know we have
                                 * reached the upper limit of the range of values for this
-                                * index attr.  On a forward scan, we can stop if this qual is
-                                * one of the "must match" subset.  We can stop regardless of
+                                * index attr.  On a forward scan, we can stop if this qual is
+                                * one of the "must match" subset.      We can stop regardless of
                                 * whether the qual is > or <, so long as it's required,
-                                * because it's not possible for any future tuples to pass.
-                                * On a backward scan, however, we must keep going, because we
+                                * because it's not possible for any future tuples to pass. On
+                                * a backward scan, however, we must keep going, because we
                                 * may have initially positioned to the end of the index.
                                 */
                                if ((subkey->sk_flags & (SK_BT_REQFWD | SK_BT_REQBKWD)) &&
index 98a7bea742ea87bb81611586df035a8d6c169ac2..b3f8f6a231372fb1f4b6e2c1d42f124b2ca215cb 100644 (file)
@@ -24,7 +24,7 @@
 /*
  * SPPageDesc tracks all info about a page we are inserting into.  In some
  * situations it actually identifies a tuple, or even a specific node within
- * an inner tuple.  But any of the fields can be invalid.  If the buffer
+ * an inner tuple.     But any of the fields can be invalid.  If the buffer
  * field is valid, it implies we hold pin and exclusive lock on that buffer.
  * page pointer should be valid exactly when buffer is.
  */
@@ -129,8 +129,8 @@ spgPageIndexMultiDelete(SpGistState *state, Page page,
                                                int firststate, int reststate,
                                                BlockNumber blkno, OffsetNumber offnum)
 {
-       OffsetNumber    firstItem;
-       OffsetNumber   *sortednos;
+       OffsetNumber firstItem;
+       OffsetNumber *sortednos;
        SpGistDeadTuple tuple = NULL;
        int                     i;
 
@@ -155,8 +155,8 @@ spgPageIndexMultiDelete(SpGistState *state, Page page,
 
        for (i = 0; i < nitems; i++)
        {
-               OffsetNumber    itemno = sortednos[i];
-               int                             tupstate;
+               OffsetNumber itemno = sortednos[i];
+               int                     tupstate;
 
                tupstate = (itemno == firstItem) ? firststate : reststate;
                if (tuple == NULL || tuple->tupstate != tupstate)
@@ -200,7 +200,7 @@ saveNodeLink(Relation index, SPPageDesc *parent,
  */
 static void
 addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
-                        SPPageDesc *current, SPPageDesc *parent, bool isNulls, bool isNew)
+                  SPPageDesc *current, SPPageDesc *parent, bool isNulls, bool isNew)
 {
        XLogRecData rdata[4];
        spgxlogAddLeaf xlrec;
@@ -230,7 +230,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
                /* Tuple is not part of a chain */
                leafTuple->nextOffset = InvalidOffsetNumber;
                current->offnum = SpGistPageAddNewItem(state, current->page,
-                                                                                          (Item) leafTuple, leafTuple->size,
+                                                                                  (Item) leafTuple, leafTuple->size,
                                                                                           NULL, false);
 
                xlrec.offnumLeaf = current->offnum;
@@ -250,9 +250,9 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
        else
        {
                /*
-                * Tuple must be inserted into existing chain.  We mustn't change
-                * the chain's head address, but we don't need to chase the entire
-                * chain to put the tuple at the end; we can insert it second.
+                * Tuple must be inserted into existing chain.  We mustn't change the
+                * chain's head address, but we don't need to chase the entire chain
+                * to put the tuple at the end; we can insert it second.
                 *
                 * Also, it's possible that the "chain" consists only of a DEAD tuple,
                 * in which case we should replace the DEAD tuple in-place.
@@ -261,7 +261,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
                OffsetNumber offnum;
 
                head = (SpGistLeafTuple) PageGetItem(current->page,
-                                                                                        PageGetItemId(current->page, current->offnum));
+                                                         PageGetItemId(current->page, current->offnum));
                if (head->tupstate == SPGIST_LIVE)
                {
                        leafTuple->nextOffset = head->nextOffset;
@@ -274,7 +274,7 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple,
                         * and set new second element
                         */
                        head = (SpGistLeafTuple) PageGetItem(current->page,
-                                                                                        PageGetItemId(current->page, current->offnum));
+                                                         PageGetItemId(current->page, current->offnum));
                        head->nextOffset = offnum;
 
                        xlrec.offnumLeaf = offnum;
@@ -483,7 +483,7 @@ moveLeafs(Relation index, SpGistState *state,
                for (i = 0; i < nDelete; i++)
                {
                        it = (SpGistLeafTuple) PageGetItem(current->page,
-                                                                                          PageGetItemId(current->page, toDelete[i]));
+                                                                 PageGetItemId(current->page, toDelete[i]));
                        Assert(it->tupstate == SPGIST_LIVE);
 
                        /*
@@ -516,12 +516,12 @@ moveLeafs(Relation index, SpGistState *state,
        leafptr += newLeafTuple->size;
 
        /*
-        * Now delete the old tuples, leaving a redirection pointer behind for
-        * the first one, unless we're doing an index build; in which case there
-        * can't be any concurrent scan so we need not provide a redirect.
+        * Now delete the old tuples, leaving a redirection pointer behind for the
+        * first one, unless we're doing an index build; in which case there can't
+        * be any concurrent scan so we need not provide a redirect.
         */
        spgPageIndexMultiDelete(state, current->page, toDelete, nDelete,
-                                                       state->isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
+                                          state->isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
                                                        SPGIST_PLACEHOLDER,
                                                        nblkno, r);
 
@@ -575,7 +575,7 @@ setRedirectionTuple(SPPageDesc *current, OffsetNumber position,
        SpGistDeadTuple dt;
 
        dt = (SpGistDeadTuple) PageGetItem(current->page,
-                                                                          PageGetItemId(current->page, position));
+                                                                        PageGetItemId(current->page, position));
        Assert(dt->tupstate == SPGIST_REDIRECT);
        Assert(ItemPointerGetBlockNumber(&dt->pointer) == SPGIST_METAPAGE_BLKNO);
        ItemPointerSet(&dt->pointer, blkno, offnum);
@@ -640,7 +640,7 @@ checkAllTheSame(spgPickSplitIn *in, spgPickSplitOut *out, bool tooBig,
        /* The opclass may not use node labels, but if it does, duplicate 'em */
        if (out->nodeLabels)
        {
-               Datum   theLabel = out->nodeLabels[theNode];
+               Datum           theLabel = out->nodeLabels[theNode];
 
                out->nodeLabels = (Datum *) palloc(sizeof(Datum) * out->nNodes);
                for (i = 0; i < out->nNodes; i++)
@@ -754,8 +754,8 @@ doPickSplit(Relation index, SpGistState *state,
        {
                /*
                 * We are splitting the root (which up to now is also a leaf page).
-                * Its tuples are not linked, so scan sequentially to get them all.
-                * We ignore the original value of current->offnum.
+                * Its tuples are not linked, so scan sequentially to get them all. We
+                * ignore the original value of current->offnum.
                 */
                for (i = FirstOffsetNumber; i <= max; i++)
                {
@@ -773,7 +773,7 @@ doPickSplit(Relation index, SpGistState *state,
                                /* we will delete the tuple altogether, so count full space */
                                spaceToDelete += it->size + sizeof(ItemIdData);
                        }
-                       else                            /* tuples on root should be live */
+                       else    /* tuples on root should be live */
                                elog(ERROR, "unexpected SPGiST tuple state: %d", it->tupstate);
                }
        }
@@ -820,7 +820,7 @@ doPickSplit(Relation index, SpGistState *state,
         * We may not actually insert new tuple because another picksplit may be
         * necessary due to too large value, but we will try to allocate enough
         * space to include it; and in any case it has to be included in the input
-        * for the picksplit function.  So don't increment nToInsert yet.
+        * for the picksplit function.  So don't increment nToInsert yet.
         */
        in.datums[in.nTuples] = SGLTDATUM(newLeafTuple, state);
        heapPtrs[in.nTuples] = newLeafTuple->heapPtr;
@@ -878,7 +878,7 @@ doPickSplit(Relation index, SpGistState *state,
        /*
         * Check to see if the picksplit function failed to separate the values,
         * ie, it put them all into the same child node.  If so, select allTheSame
-        * mode and create a random split instead.  See comments for
+        * mode and create a random split instead.      See comments for
         * checkAllTheSame as to why we need to know if the new leaf tuples could
         * fit on one page.
         */
@@ -924,8 +924,8 @@ doPickSplit(Relation index, SpGistState *state,
        innerTuple->allTheSame = allTheSame;
 
        /*
-        * Update nodes[] array to point into the newly formed innerTuple, so
-        * that we can adjust their downlinks below.
+        * Update nodes[] array to point into the newly formed innerTuple, so that
+        * we can adjust their downlinks below.
         */
        SGITITERATE(innerTuple, i, node)
        {
@@ -944,13 +944,13 @@ doPickSplit(Relation index, SpGistState *state,
        }
 
        /*
-        * To perform the split, we must insert a new inner tuple, which can't
-        * go on a leaf page; and unless we are splitting the root page, we
-        * must then update the parent tuple's downlink to point to the inner
-        * tuple.  If there is room, we'll put the new inner tuple on the same
-        * page as the parent tuple, otherwise we need another non-leaf buffer.
-        * But if the parent page is the root, we can't add the new inner tuple
-        * there, because the root page must have only one inner tuple.
+        * To perform the split, we must insert a new inner tuple, which can't go
+        * on a leaf page; and unless we are splitting the root page, we must then
+        * update the parent tuple's downlink to point to the inner tuple.  If
+        * there is room, we'll put the new inner tuple on the same page as the
+        * parent tuple, otherwise we need another non-leaf buffer. But if the
+        * parent page is the root, we can't add the new inner tuple there,
+        * because the root page must have only one inner tuple.
         */
        xlrec.initInner = false;
        if (parent->buffer != InvalidBuffer &&
@@ -965,9 +965,9 @@ doPickSplit(Relation index, SpGistState *state,
        {
                /* Send tuple to page with next triple parity (see README) */
                newInnerBuffer = SpGistGetBuffer(index,
-                                                                                GBUF_INNER_PARITY(parent->blkno + 1) |
+                                                                          GBUF_INNER_PARITY(parent->blkno + 1) |
                                                                                 (isNulls ? GBUF_NULLS : 0),
-                                                                                innerTuple->size + sizeof(ItemIdData),
+                                                                          innerTuple->size + sizeof(ItemIdData),
                                                                                 &xlrec.initInner);
        }
        else
@@ -977,22 +977,22 @@ doPickSplit(Relation index, SpGistState *state,
        }
 
        /*
-        * Because a WAL record can't involve more than four buffers, we can
-        * only afford to deal with two leaf pages in each picksplit action,
-        * ie the current page and at most one other.
+        * Because a WAL record can't involve more than four buffers, we can only
+        * afford to deal with two leaf pages in each picksplit action, ie the
+        * current page and at most one other.
         *
-        * The new leaf tuples converted from the existing ones should require
-        * the same or less space, and therefore should all fit onto one page
+        * The new leaf tuples converted from the existing ones should require the
+        * same or less space, and therefore should all fit onto one page
         * (although that's not necessarily the current page, since we can't
         * delete the old tuples but only replace them with placeholders).
-        * However, the incoming new tuple might not also fit, in which case
-        * we might need another picksplit cycle to reduce it some more.
+        * However, the incoming new tuple might not also fit, in which case we
+        * might need another picksplit cycle to reduce it some more.
         *
-        * If there's not room to put everything back onto the current page,
-        * then we decide on a per-node basis which tuples go to the new page.
-        * (We do it like that because leaf tuple chains can't cross pages,
-        * so we must place all leaf tuples belonging to the same parent node
-        * on the same page.)
+        * If there's not room to put everything back onto the current page, then
+        * we decide on a per-node basis which tuples go to the new page. (We do
+        * it like that because leaf tuple chains can't cross pages, so we must
+        * place all leaf tuples belonging to the same parent node on the same
+        * page.)
         *
         * If we are splitting the root page (turning it from a leaf page into an
         * inner page), then no leaf tuples can go back to the current page; they
@@ -1037,12 +1037,13 @@ doPickSplit(Relation index, SpGistState *state,
                int                     newspace;
 
                newLeafBuffer = SpGistGetBuffer(index,
-                                                                               GBUF_LEAF | (isNulls ? GBUF_NULLS : 0),
+                                                                         GBUF_LEAF | (isNulls ? GBUF_NULLS : 0),
                                                                                Min(totalLeafSizes,
                                                                                        SPGIST_PAGE_CAPACITY),
                                                                                &xlrec.initDest);
+
                /*
-                * Attempt to assign node groups to the two pages.  We might fail to
+                * Attempt to assign node groups to the two pages.      We might fail to
                 * do so, even if totalLeafSizes is less than the available space,
                 * because we can't split a group across pages.
                 */
@@ -1054,12 +1055,12 @@ doPickSplit(Relation index, SpGistState *state,
                {
                        if (leafSizes[i] <= curspace)
                        {
-                               nodePageSelect[i] = 0; /* signifies current page */
+                               nodePageSelect[i] = 0;  /* signifies current page */
                                curspace -= leafSizes[i];
                        }
                        else
                        {
-                               nodePageSelect[i] = 1; /* signifies new leaf page */
+                               nodePageSelect[i] = 1;  /* signifies new leaf page */
                                newspace -= leafSizes[i];
                        }
                }
@@ -1075,7 +1076,7 @@ doPickSplit(Relation index, SpGistState *state,
                else if (includeNew)
                {
                        /* We must exclude the new leaf tuple from the split */
-                       int             nodeOfNewTuple = out.mapTuplesToNodes[in.nTuples - 1];
+                       int                     nodeOfNewTuple = out.mapTuplesToNodes[in.nTuples - 1];
 
                        leafSizes[nodeOfNewTuple] -=
                                newLeafs[in.nTuples - 1]->size + sizeof(ItemIdData);
@@ -1087,12 +1088,12 @@ doPickSplit(Relation index, SpGistState *state,
                        {
                                if (leafSizes[i] <= curspace)
                                {
-                                       nodePageSelect[i] = 0; /* signifies current page */
+                                       nodePageSelect[i] = 0;          /* signifies current page */
                                        curspace -= leafSizes[i];
                                }
                                else
                                {
-                                       nodePageSelect[i] = 1; /* signifies new leaf page */
+                                       nodePageSelect[i] = 1;          /* signifies new leaf page */
                                        newspace -= leafSizes[i];
                                }
                        }
@@ -1204,7 +1205,7 @@ doPickSplit(Relation index, SpGistState *state,
        for (i = 0; i < nToInsert; i++)
        {
                SpGistLeafTuple it = newLeafs[i];
-               Buffer  leafBuffer;
+               Buffer          leafBuffer;
                BlockNumber leafBlock;
                OffsetNumber newoffset;
 
@@ -1584,12 +1585,12 @@ spgAddNodeAction(Relation index, SpGistState *state,
                xlrec.nodeI = parent->node;
 
                /*
-                * obtain new buffer with the same parity as current, since it will
-                * be a child of same parent tuple
+                * obtain new buffer with the same parity as current, since it will be
+                * a child of same parent tuple
                 */
                current->buffer = SpGistGetBuffer(index,
                                                                                  GBUF_INNER_PARITY(current->blkno),
-                                                                                 newInnerTuple->size + sizeof(ItemIdData),
+                                                                       newInnerTuple->size + sizeof(ItemIdData),
                                                                                  &xlrec.newPage);
                current->blkno = BufferGetBlockNumber(current->buffer);
                current->page = BufferGetPage(current->buffer);
@@ -1597,15 +1598,15 @@ spgAddNodeAction(Relation index, SpGistState *state,
                xlrec.blknoNew = current->blkno;
 
                /*
-                * Let's just make real sure new current isn't same as old.  Right
-                * now that's impossible, but if SpGistGetBuffer ever got smart enough
-                * to delete placeholder tuples before checking space, maybe it
-                * wouldn't be impossible.  The case would appear to work except that
-                * WAL replay would be subtly wrong, so I think a mere assert isn't
-                * enough here.
+                * Let's just make real sure new current isn't same as old.  Right now
+                * that's impossible, but if SpGistGetBuffer ever got smart enough to
+                * delete placeholder tuples before checking space, maybe it wouldn't
+                * be impossible.  The case would appear to work except that WAL
+                * replay would be subtly wrong, so I think a mere assert isn't enough
+                * here.
                 */
-                if (xlrec.blknoNew == xlrec.blkno)
-                        elog(ERROR, "SPGiST new buffer shouldn't be same as old buffer");
+               if (xlrec.blknoNew == xlrec.blkno)
+                       elog(ERROR, "SPGiST new buffer shouldn't be same as old buffer");
 
                /*
                 * New current and parent buffer will both be modified; but note that
@@ -1707,9 +1708,9 @@ spgSplitNodeAction(Relation index, SpGistState *state,
        Assert(!SpGistPageStoresNulls(current->page));
 
        /*
-        * Construct new prefix tuple, containing a single node with the
-        * specified label.  (We'll update the node's downlink to point to the
-        * new postfix tuple, below.)
+        * Construct new prefix tuple, containing a single node with the specified
+        * label.  (We'll update the node's downlink to point to the new postfix
+        * tuple, below.)
         */
        node = spgFormNodeTuple(state, out->result.splitTuple.nodeLabel, false);
 
@@ -1888,9 +1889,9 @@ spgdoinsert(Relation index, SpGistState *state,
                                (errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
                        errmsg("index row size %lu exceeds maximum %lu for index \"%s\"",
                                   (unsigned long) (leafSize - sizeof(ItemIdData)),
-                                  (unsigned long) (SPGIST_PAGE_CAPACITY - sizeof(ItemIdData)),
+                                (unsigned long) (SPGIST_PAGE_CAPACITY - sizeof(ItemIdData)),
                                   RelationGetRelationName(index)),
-                 errhint("Values larger than a buffer page cannot be indexed.")));
+                       errhint("Values larger than a buffer page cannot be indexed.")));
 
        /* Initialize "current" to the appropriate root page */
        current.blkno = isnull ? SPGIST_NULL_BLKNO : SPGIST_ROOT_BLKNO;
@@ -1920,7 +1921,7 @@ spgdoinsert(Relation index, SpGistState *state,
                if (current.blkno == InvalidBlockNumber)
                {
                        /*
-                        * Create a leaf page.  If leafSize is too large to fit on a page,
+                        * Create a leaf page.  If leafSize is too large to fit on a page,
                         * we won't actually use the page yet, but it simplifies the API
                         * for doPickSplit to always have a leaf page at hand; so just
                         * quietly limit our request to a page size.
@@ -1968,7 +1969,7 @@ spgdoinsert(Relation index, SpGistState *state,
                        }
                        else if ((sizeToSplit =
                                          checkSplitConditions(index, state, &current,
-                                                                                  &nToSplit)) < SPGIST_PAGE_CAPACITY / 2 &&
+                                                                       &nToSplit)) < SPGIST_PAGE_CAPACITY / 2 &&
                                         nToSplit < 64 &&
                                         leafTuple->size + sizeof(ItemIdData) + sizeToSplit <= SPGIST_PAGE_CAPACITY)
                        {
@@ -2077,8 +2078,8 @@ spgdoinsert(Relation index, SpGistState *state,
                                        }
 
                                        /*
-                                        * Loop around and attempt to insert the new leafDatum
-                                        * at "current" (which might reference an existing child
+                                        * Loop around and attempt to insert the new leafDatum at
+                                        * "current" (which might reference an existing child
                                         * tuple, or might be invalid to force us to find a new
                                         * page for the tuple).
                                         *
@@ -2102,8 +2103,8 @@ spgdoinsert(Relation index, SpGistState *state,
                                                                         out.result.addNode.nodeLabel);
 
                                        /*
-                                        * Retry insertion into the enlarged node.  We assume
-                                        * that we'll get a MatchNode result this time.
+                                        * Retry insertion into the enlarged node.      We assume that
+                                        * we'll get a MatchNode result this time.
                                         */
                                        goto process_inner_tuple;
                                        break;
index 8ff9245e179ac71063804b79db08cb9ca35a2c9e..456a71fbba5dabb557510167baaccccea4fa6c9b 100644 (file)
@@ -123,7 +123,7 @@ spgbuild(PG_FUNCTION_ARGS)
        buildstate.spgstate.isBuild = true;
 
        buildstate.tmpCtx = AllocSetContextCreate(CurrentMemoryContext,
-                                                                                       "SP-GiST build temporary context",
+                                                                                  "SP-GiST build temporary context",
                                                                                          ALLOCSET_DEFAULT_MINSIZE,
                                                                                          ALLOCSET_DEFAULT_INITSIZE,
                                                                                          ALLOCSET_DEFAULT_MAXSIZE);
index adfe287581b8c101af79a461f176222b338dfecf..db472db9d6310cecef0cd6cb1c91977c1f1cb1d9 100644 (file)
@@ -135,12 +135,12 @@ spg_kd_picksplit(PG_FUNCTION_ARGS)
 
        /*
         * Note: points that have coordinates exactly equal to coord may get
-        * classified into either node, depending on where they happen to fall
-        * in the sorted list.  This is okay as long as the inner_consistent
-        * function descends into both sides for such cases.  This is better
-        * than the alternative of trying to have an exact boundary, because
-        * it keeps the tree balanced even when we have many instances of the
-        * same point value.  So we should never trigger the allTheSame logic.
+        * classified into either node, depending on where they happen to fall in
+        * the sorted list.  This is okay as long as the inner_consistent function
+        * descends into both sides for such cases.  This is better than the
+        * alternative of trying to have an exact boundary, because it keeps the
+        * tree balanced even when we have many instances of the same point value.
+        * So we should never trigger the allTheSame logic.
         */
        for (i = 0; i < in->nTuples; i++)
        {
index 10fafe58643b249ae6a32fee229c71b394e0e4eb..5da265025e6192cfc7ea397595c5dac820b42dd5 100644 (file)
@@ -253,8 +253,8 @@ spg_quad_inner_consistent(PG_FUNCTION_ARGS)
                                boxQuery = DatumGetBoxP(in->scankeys[i].sk_argument);
 
                                if (DatumGetBool(DirectFunctionCall2(box_contain_pt,
-                                                                                                        PointerGetDatum(boxQuery),
-                                                                                                        PointerGetDatum(centroid))))
+                                                                                                  PointerGetDatum(boxQuery),
+                                                                                                PointerGetDatum(centroid))))
                                {
                                        /* centroid is in box, so all quadrants are OK */
                                }
index 7a3a96230d176b6d13e78a771373a55c04802f4f..2a083b7c388beb3f00de872bec34252e04391f62 100644 (file)
@@ -24,7 +24,7 @@
 
 
 typedef void (*storeRes_func) (SpGistScanOpaque so, ItemPointer heapPtr,
-                                                          Datum leafValue, bool isnull, bool recheck);
+                                                                Datum leafValue, bool isnull, bool recheck);
 
 typedef struct ScanStackEntry
 {
@@ -88,7 +88,7 @@ resetSpGistScanOpaque(SpGistScanOpaque so)
        if (so->want_itup)
        {
                /* Must pfree IndexTuples to avoid memory leak */
-               int             i;
+               int                     i;
 
                for (i = 0; i < so->nPtrs; i++)
                        pfree(so->indexTups[i]);
@@ -102,7 +102,7 @@ resetSpGistScanOpaque(SpGistScanOpaque so)
  * Sets searchNulls, searchNonNulls, numberOfKeys, keyData fields of *so.
  *
  * The point here is to eliminate null-related considerations from what the
- * opclass consistent functions need to deal with.  We assume all SPGiST-
+ * opclass consistent functions need to deal with.     We assume all SPGiST-
  * indexable operators are strict, so any null RHS value makes the scan
  * condition unsatisfiable.  We also pull out any IS NULL/IS NOT NULL
  * conditions; their effect is reflected into searchNulls/searchNonNulls.
@@ -177,6 +177,7 @@ spgbeginscan(PG_FUNCTION_ARGS)
 {
        Relation        rel = (Relation) PG_GETARG_POINTER(0);
        int                     keysz = PG_GETARG_INT32(1);
+
        /* ScanKey                      scankey = (ScanKey) PG_GETARG_POINTER(2); */
        IndexScanDesc scan;
        SpGistScanOpaque so;
@@ -457,7 +458,7 @@ redirect:
                        MemoryContext oldCtx;
 
                        innerTuple = (SpGistInnerTuple) PageGetItem(page,
-                                                                                                               PageGetItemId(page, offset));
+                                                                                               PageGetItemId(page, offset));
 
                        if (innerTuple->tupstate != SPGIST_LIVE)
                        {
@@ -522,7 +523,7 @@ redirect:
 
                        for (i = 0; i < out.nNodes; i++)
                        {
-                               int             nodeN = out.nodeNumbers[i];
+                               int                     nodeN = out.nodeNumbers[i];
 
                                Assert(nodeN >= 0 && nodeN < in.nNodes);
                                if (ItemPointerIsValid(&nodes[nodeN]->t_tid))
@@ -598,7 +599,7 @@ storeGettuple(SpGistScanOpaque so, ItemPointer heapPtr,
        if (so->want_itup)
        {
                /*
-                * Reconstruct desired IndexTuple.  We have to copy the datum out of
+                * Reconstruct desired IndexTuple.      We have to copy the datum out of
                 * the temp context anyway, so we may as well create the tuple here.
                 */
                so->indexTups[so->nPtrs] = index_form_tuple(so->indexTupDesc,
@@ -636,7 +637,7 @@ spggettuple(PG_FUNCTION_ARGS)
                if (so->want_itup)
                {
                        /* Must pfree IndexTuples to avoid memory leak */
-                       int             i;
+                       int                     i;
 
                        for (i = 0; i < so->nPtrs; i++)
                                pfree(so->indexTups[i]);
index 656015ea7e6598f1fa194c2676747944d3f7bcca..520d7b24c5597d3059066dd440fce02179af6c35 100644 (file)
@@ -26,7 +26,7 @@
  * In the worst case, a inner tuple in a text suffix tree could have as many
  * as 256 nodes (one for each possible byte value).  Each node can take 16
  * bytes on MAXALIGN=8 machines.  The inner tuple must fit on an index page
- * of size BLCKSZ.  Rather than assuming we know the exact amount of overhead
+ * of size BLCKSZ.     Rather than assuming we know the exact amount of overhead
  * imposed by page headers, tuple headers, etc, we leave 100 bytes for that
  * (the actual overhead should be no more than 56 bytes at this writing, so
  * there is slop in this number).  The upshot is that the maximum safe prefix
@@ -209,9 +209,9 @@ spg_text_choose(PG_FUNCTION_ARGS)
        {
                /*
                 * Descend to existing node.  (If in->allTheSame, the core code will
-                * ignore our nodeN specification here, but that's OK.  We still
-                * have to provide the correct levelAdd and restDatum values, and
-                * those are the same regardless of which node gets chosen by core.)
+                * ignore our nodeN specification here, but that's OK.  We still have
+                * to provide the correct levelAdd and restDatum values, and those are
+                * the same regardless of which node gets chosen by core.)
                 */
                out->resultType = spgMatchNode;
                out->result.matchNode.nodeN = i;
@@ -227,10 +227,10 @@ spg_text_choose(PG_FUNCTION_ARGS)
        else if (in->allTheSame)
        {
                /*
-                * Can't use AddNode action, so split the tuple.  The upper tuple
-                * has the same prefix as before and uses an empty node label for
-                * the lower tuple.  The lower tuple has no prefix and the same
-                * node labels as the original tuple.
+                * Can't use AddNode action, so split the tuple.  The upper tuple has
+                * the same prefix as before and uses an empty node label for the
+                * lower tuple.  The lower tuple has no prefix and the same node
+                * labels as the original tuple.
                 */
                out->resultType = spgSplitTuple;
                out->result.splitTuple.prefixHasPrefix = in->hasPrefix;
@@ -315,13 +315,13 @@ spg_text_picksplit(PG_FUNCTION_ARGS)
                if (commonLen < VARSIZE_ANY_EXHDR(texti))
                        nodes[i].c = *(uint8 *) (VARDATA_ANY(texti) + commonLen);
                else
-                       nodes[i].c = '\0';                      /* use \0 if string is all common */
+                       nodes[i].c = '\0';      /* use \0 if string is all common */
                nodes[i].i = i;
                nodes[i].d = in->datums[i];
        }
 
        /*
-        * Sort by label bytes so that we can group the values into nodes.  This
+        * Sort by label bytes so that we can group the values into nodes.      This
         * also ensures that the nodes are ordered by label value, allowing the
         * use of binary search in searchChar.
         */
@@ -371,7 +371,7 @@ spg_text_inner_consistent(PG_FUNCTION_ARGS)
 
        /*
         * Reconstruct values represented at this tuple, including parent data,
-        * prefix of this tuple if any, and the node label if any.  in->level
+        * prefix of this tuple if any, and the node label if any.      in->level
         * should be the length of the previously reconstructed value, and the
         * number of bytes added here is prefixSize or prefixSize + 1.
         *
@@ -381,7 +381,7 @@ spg_text_inner_consistent(PG_FUNCTION_ARGS)
         * long-format reconstructed values.
         */
        Assert(in->level == 0 ? DatumGetPointer(in->reconstructedValue) == NULL :
-                  VARSIZE_ANY_EXHDR(DatumGetPointer(in->reconstructedValue)) == in->level);
+       VARSIZE_ANY_EXHDR(DatumGetPointer(in->reconstructedValue)) == in->level);
 
        maxReconstrLen = in->level + 1;
        if (in->hasPrefix)
@@ -530,7 +530,7 @@ spg_text_leaf_consistent(PG_FUNCTION_ARGS)
        }
        else
        {
-               text   *fullText = palloc(VARHDRSZ + fullLen);
+               text       *fullText = palloc(VARHDRSZ + fullLen);
 
                SET_VARSIZE(fullText, VARHDRSZ + fullLen);
                fullValue = VARDATA(fullText);
index 46a10f6a20617e165d32e0a79e46f632e650612f..d56c2325fe57d1e3d846ff6ddec42705d298b15a 100644 (file)
@@ -235,7 +235,7 @@ SpGistUpdateMetaPage(Relation index)
  *
  * When requesting an inner page, if we get one with the wrong parity,
  * we just release the buffer and try again.  We will get a different page
- * because GetFreeIndexPage will have marked the page used in FSM.  The page
+ * because GetFreeIndexPage will have marked the page used in FSM.     The page
  * is entered in our local lastUsedPages cache, so there's some hope of
  * making use of it later in this session, but otherwise we rely on VACUUM
  * to eventually re-enter the page in FSM, making it available for recycling.
@@ -245,7 +245,7 @@ SpGistUpdateMetaPage(Relation index)
  *
  * When we return a buffer to the caller, the page is *not* entered into
  * the lastUsedPages cache; we expect the caller will do so after it's taken
- * whatever space it will use.  This is because after the caller has used up
+ * whatever space it will use. This is because after the caller has used up
  * some space, the page might have less space than whatever was cached already
  * so we'd rather not trash the old cache entry.
  */
@@ -275,7 +275,7 @@ allocNewBuffer(Relation index, int flags)
                else
                {
                        BlockNumber blkno = BufferGetBlockNumber(buffer);
-                       int             blkFlags = GBUF_INNER_PARITY(blkno);
+                       int                     blkFlags = GBUF_INNER_PARITY(blkno);
 
                        if ((flags & GBUF_PARITY_MASK) == blkFlags)
                        {
@@ -317,7 +317,7 @@ SpGistGetBuffer(Relation index, int flags, int needSpace, bool *isNew)
 
        /*
         * If possible, increase the space request to include relation's
-        * fillfactor.  This ensures that when we add unrelated tuples to a page,
+        * fillfactor.  This ensures that when we add unrelated tuples to a page,
         * we try to keep 100-fillfactor% available for adding tuples that are
         * related to the ones already on it.  But fillfactor mustn't cause an
         * error for requests that would otherwise be legal.
@@ -664,7 +664,7 @@ spgFormInnerTuple(SpGistState *state, bool hasPrefix, Datum prefix,
                                 errmsg("SPGiST inner tuple size %lu exceeds maximum %lu",
                                                (unsigned long) size,
                                (unsigned long) (SPGIST_PAGE_CAPACITY - sizeof(ItemIdData))),
-                                errhint("Values larger than a buffer page cannot be indexed.")));
+                       errhint("Values larger than a buffer page cannot be indexed.")));
 
        /*
         * Check for overflow of header fields --- probably can't fail if the
@@ -801,7 +801,7 @@ SpGistPageAddNewItem(SpGistState *state, Page page, Item item, Size size,
                        for (; i <= maxoff; i++)
                        {
                                SpGistDeadTuple it = (SpGistDeadTuple) PageGetItem(page,
-                                                                                                       PageGetItemId(page, i));
+                                                                                                        PageGetItemId(page, i));
 
                                if (it->tupstate == SPGIST_PLACEHOLDER)
                                {
index 856790ee2aa41cac1728021a079338a10b21e429..27b55170cb4b3bfa083ce0d7ffa7cbbb1d1f2733 100644 (file)
@@ -31,8 +31,8 @@
 /* Entry in pending-list of TIDs we need to revisit */
 typedef struct spgVacPendingItem
 {
-       ItemPointerData tid;                            /* redirection target to visit */
-       bool            done;                                   /* have we dealt with this? */
+       ItemPointerData tid;            /* redirection target to visit */
+       bool            done;                   /* have we dealt with this? */
        struct spgVacPendingItem *next;         /* list link */
 } spgVacPendingItem;
 
@@ -46,10 +46,10 @@ typedef struct spgBulkDeleteState
        void       *callback_state;
 
        /* Additional working state */
-       SpGistState spgstate;                   /* for SPGiST operations that need one */
-       spgVacPendingItem *pendingList; /* TIDs we need to (re)visit */
-       TransactionId myXmin;                   /* for detecting newly-added redirects */
-       TransactionId OldestXmin;               /* for deciding a redirect is obsolete */
+       SpGistState spgstate;           /* for SPGiST operations that need one */
+       spgVacPendingItem *pendingList;         /* TIDs we need to (re)visit */
+       TransactionId myXmin;           /* for detecting newly-added redirects */
+       TransactionId OldestXmin;       /* for deciding a redirect is obsolete */
        BlockNumber lastFilledBlock;    /* last non-deletable block */
 } spgBulkDeleteState;
 
@@ -213,7 +213,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
         * Figure out exactly what we have to do.  We do this separately from
         * actually modifying the page, mainly so that we have a representation
         * that can be dumped into WAL and then the replay code can do exactly
-        * the same thing.  The output of this step consists of six arrays
+        * the same thing.      The output of this step consists of six arrays
         * describing four kinds of operations, to be performed in this order:
         *
         * toDead[]: tuple numbers to be replaced with DEAD tuples
@@ -276,8 +276,8 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
                        else if (prevLive == InvalidOffsetNumber)
                        {
                                /*
-                                * This is the first live tuple in the chain.  It has
-                                * to move to the head position.
+                                * This is the first live tuple in the chain.  It has to move
+                                * to the head position.
                                 */
                                moveSrc[xlrec.nMove] = j;
                                moveDest[xlrec.nMove] = i;
@@ -289,7 +289,7 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
                        else
                        {
                                /*
-                                * Second or later live tuple.  Arrange to re-chain it to the
+                                * Second or later live tuple.  Arrange to re-chain it to the
                                 * previous live one, if there was a gap.
                                 */
                                if (interveningDeletable)
@@ -353,11 +353,11 @@ vacuumLeafPage(spgBulkDeleteState *bds, Relation index, Buffer buffer,
                                                        InvalidBlockNumber, InvalidOffsetNumber);
 
        /*
-        * We implement the move step by swapping the item pointers of the
-        * source and target tuples, then replacing the newly-source tuples
-        * with placeholders.  This is perhaps unduly friendly with the page
-        * data representation, but it's fast and doesn't risk page overflow
-        * when a tuple to be relocated is large.
+        * We implement the move step by swapping the item pointers of the source
+        * and target tuples, then replacing the newly-source tuples with
+        * placeholders.  This is perhaps unduly friendly with the page data
+        * representation, but it's fast and doesn't risk page overflow when a
+        * tuple to be relocated is large.
         */
        for (i = 0; i < xlrec.nMove; i++)
        {
@@ -518,7 +518,7 @@ vacuumRedirectAndPlaceholder(Relation index, Buffer buffer,
         */
        for (i = max;
                 i >= FirstOffsetNumber &&
-                        (opaque->nRedirection > 0 || !hasNonPlaceholder);
+                (opaque->nRedirection > 0 || !hasNonPlaceholder);
                 i--)
        {
                SpGistDeadTuple dt;
@@ -651,9 +651,9 @@ spgvacuumpage(spgBulkDeleteState *bds, BlockNumber blkno)
 
        /*
         * The root pages must never be deleted, nor marked as available in FSM,
-        * because we don't want them ever returned by a search for a place to
-        * put a new tuple.  Otherwise, check for empty/deletable page, and
-        * make sure FSM knows about it.
+        * because we don't want them ever returned by a search for a place to put
+        * a new tuple.  Otherwise, check for empty/deletable page, and make sure
+        * FSM knows about it.
         */
        if (!SpGistBlockIsRoot(blkno))
        {
@@ -688,7 +688,7 @@ spgprocesspending(spgBulkDeleteState *bds)
        Relation        index = bds->info->index;
        spgVacPendingItem *pitem;
        spgVacPendingItem *nitem;
-       BlockNumber     blkno;
+       BlockNumber blkno;
        Buffer          buffer;
        Page            page;
 
@@ -741,11 +741,11 @@ spgprocesspending(spgBulkDeleteState *bds)
                else
                {
                        /*
-                        * On an inner page, visit the referenced inner tuple and add
-                        * all its downlinks to the pending list.  We might have pending
-                        * items for more than one inner tuple on the same page (in fact
-                        * this is pretty likely given the way space allocation works),
-                        * so get them all while we are here.
+                        * On an inner page, visit the referenced inner tuple and add all
+                        * its downlinks to the pending list.  We might have pending items
+                        * for more than one inner tuple on the same page (in fact this is
+                        * pretty likely given the way space allocation works), so get
+                        * them all while we are here.
                         */
                        for (nitem = pitem; nitem != NULL; nitem = nitem->next)
                        {
@@ -774,7 +774,7 @@ spgprocesspending(spgBulkDeleteState *bds)
                                        {
                                                /* transfer attention to redirect point */
                                                spgAddPendingTID(bds,
-                                                                                &((SpGistDeadTuple) innerTuple)->pointer);
+                                                                  &((SpGistDeadTuple) innerTuple)->pointer);
                                        }
                                        else
                                                elog(ERROR, "unexpected SPGiST tuple state: %d",
@@ -825,8 +825,8 @@ spgvacuumscan(spgBulkDeleteState *bds)
         * physical order (we hope the kernel will cooperate in providing
         * read-ahead for speed).  It is critical that we visit all leaf pages,
         * including ones added after we start the scan, else we might fail to
-        * delete some deletable tuples.  See more extensive comments about
-        * this in btvacuumscan().
+        * delete some deletable tuples.  See more extensive comments about this
+        * in btvacuumscan().
         */
        blkno = SPGIST_METAPAGE_BLKNO + 1;
        for (;;)
index 8e87e2adc9060ffee2a5dcc4b76f2cd2b7726256..82f8c8b978a01a426d4019aa5f7fdbfc463a5b36 100644 (file)
@@ -40,7 +40,7 @@ fillFakeState(SpGistState *state, spgxlogState stateSrc)
 }
 
 /*
- * Add a leaf tuple, or replace an existing placeholder tuple.  This is used
+ * Add a leaf tuple, or replace an existing placeholder tuple. This is used
  * to replay SpGistPageAddNewItem() operations.  If the offset points at an
  * existing tuple, it had better be a placeholder tuple.
  */
@@ -50,7 +50,7 @@ addOrReplaceTuple(Page page, Item tuple, int size, OffsetNumber offset)
        if (offset <= PageGetMaxOffsetNumber(page))
        {
                SpGistDeadTuple dt = (SpGistDeadTuple) PageGetItem(page,
-                                                                                                                  PageGetItemId(page, offset));
+                                                                                               PageGetItemId(page, offset));
 
                if (dt->tupstate != SPGIST_PLACEHOLDER)
                        elog(ERROR, "SPGiST tuple to be replaced is not a placeholder");
@@ -126,7 +126,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
 
                        if (xldata->newPage)
                                SpGistInitBuffer(buffer,
-                                                                SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
+                                        SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
 
                        if (!XLByteLE(lsn, PageGetLSN(page)))
                        {
@@ -143,7 +143,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
                                                SpGistLeafTuple head;
 
                                                head = (SpGistLeafTuple) PageGetItem(page,
-                                                                                                                PageGetItemId(page, xldata->offnumHeadLeaf));
+                                                               PageGetItemId(page, xldata->offnumHeadLeaf));
                                                Assert(head->nextOffset == leafTuple->nextOffset);
                                                head->nextOffset = xldata->offnumLeaf;
                                        }
@@ -154,7 +154,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
                                        PageIndexTupleDelete(page, xldata->offnumLeaf);
                                        if (PageAddItem(page,
                                                                        (Item) leafTuple, leafTuple->size,
-                                                                       xldata->offnumLeaf, false, false) != xldata->offnumLeaf)
+                                        xldata->offnumLeaf, false, false) != xldata->offnumLeaf)
                                                elog(ERROR, "failed to add item of size %u to SPGiST index page",
                                                         leafTuple->size);
                                }
@@ -180,7 +180,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
                                SpGistInnerTuple tuple;
 
                                tuple = (SpGistInnerTuple) PageGetItem(page,
-                                                                                                          PageGetItemId(page, xldata->offnumParent));
+                                                                 PageGetItemId(page, xldata->offnumParent));
 
                                spgUpdateNodeLink(tuple, xldata->nodeI,
                                                                  xldata->blknoLeaf, xldata->offnumLeaf);
@@ -229,7 +229,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
 
                        if (xldata->newPage)
                                SpGistInitBuffer(buffer,
-                                                                SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
+                                        SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
 
                        if (!XLByteLE(lsn, PageGetLSN(page)))
                        {
@@ -261,7 +261,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
                        if (!XLByteLE(lsn, PageGetLSN(page)))
                        {
                                spgPageIndexMultiDelete(&state, page, toDelete, xldata->nMoves,
-                                                                               state.isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
+                                               state.isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
                                                                                SPGIST_PLACEHOLDER,
                                                                                xldata->blknoDst,
                                                                                toInsert[nInsert - 1]);
@@ -286,7 +286,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
                                SpGistInnerTuple tuple;
 
                                tuple = (SpGistInnerTuple) PageGetItem(page,
-                                                                                                          PageGetItemId(page, xldata->offnumParent));
+                                                                 PageGetItemId(page, xldata->offnumParent));
 
                                spgUpdateNodeLink(tuple, xldata->nodeI,
                                                                  xldata->blknoDst, toInsert[nInsert - 1]);
@@ -413,7 +413,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
                }
 
                /*
-                * Update parent downlink.  Since parent could be in either of the
+                * Update parent downlink.      Since parent could be in either of the
                 * previous two buffers, it's a bit tricky to determine which BKP bit
                 * applies.
                 */
@@ -435,7 +435,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
                                        SpGistInnerTuple innerTuple;
 
                                        innerTuple = (SpGistInnerTuple) PageGetItem(page,
-                                                                                                                               PageGetItemId(page, xldata->offnumParent));
+                                                                 PageGetItemId(page, xldata->offnumParent));
 
                                        spgUpdateNodeLink(innerTuple, xldata->nodeI,
                                                                          xldata->blknoNew, xldata->offnumNew);
@@ -504,7 +504,7 @@ spgRedoSplitTuple(XLogRecPtr lsn, XLogRecord *record)
                        {
                                PageIndexTupleDelete(page, xldata->offnumPrefix);
                                if (PageAddItem(page, (Item) prefixTuple, prefixTuple->size,
-                                                               xldata->offnumPrefix, false, false) != xldata->offnumPrefix)
+                                xldata->offnumPrefix, false, false) != xldata->offnumPrefix)
                                        elog(ERROR, "failed to add item of size %u to SPGiST index page",
                                                 prefixTuple->size);
 
@@ -571,7 +571,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
                page = (Page) BufferGetPage(srcBuffer);
 
                SpGistInitBuffer(srcBuffer,
-                                                SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
+                                        SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
                /* don't update LSN etc till we're done with it */
        }
        else
@@ -587,8 +587,8 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
                                {
                                        /*
                                         * We have it a bit easier here than in doPickSplit(),
-                                        * because we know the inner tuple's location already,
-                                        * so we can inject the correct redirection tuple now.
+                                        * because we know the inner tuple's location already, so
+                                        * we can inject the correct redirection tuple now.
                                         */
                                        if (!state.isBuild)
                                                spgPageIndexMultiDelete(&state, page,
@@ -627,7 +627,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
                page = (Page) BufferGetPage(destBuffer);
 
                SpGistInitBuffer(destBuffer,
-                                                SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
+                                        SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
                /* don't update LSN etc till we're done with it */
        }
        else
@@ -707,9 +707,9 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
                                        SpGistInnerTuple parent;
 
                                        parent = (SpGistInnerTuple) PageGetItem(page,
-                                                                       PageGetItemId(page, xldata->offnumParent));
+                                                                 PageGetItemId(page, xldata->offnumParent));
                                        spgUpdateNodeLink(parent, xldata->nodeI,
-                                                                         xldata->blknoInner, xldata->offnumInner);
+                                                                       xldata->blknoInner, xldata->offnumInner);
                                }
 
                                PageSetLSN(page, lsn);
@@ -742,9 +742,9 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
                                        SpGistInnerTuple parent;
 
                                        parent = (SpGistInnerTuple) PageGetItem(page,
-                                                                       PageGetItemId(page, xldata->offnumParent));
+                                                                 PageGetItemId(page, xldata->offnumParent));
                                        spgUpdateNodeLink(parent, xldata->nodeI,
-                                                                         xldata->blknoInner, xldata->offnumInner);
+                                                                       xldata->blknoInner, xldata->offnumInner);
 
                                        PageSetLSN(page, lsn);
                                        PageSetTLI(page, ThisTimeLineID);
@@ -803,7 +803,7 @@ spgRedoVacuumLeaf(XLogRecPtr lsn, XLogRecord *record)
 
                                spgPageIndexMultiDelete(&state, page,
                                                                                toPlaceholder, xldata->nPlaceholder,
-                                                                               SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
+                                                                         SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
                                                                                InvalidBlockNumber,
                                                                                InvalidOffsetNumber);
 
@@ -821,7 +821,7 @@ spgRedoVacuumLeaf(XLogRecPtr lsn, XLogRecord *record)
 
                                spgPageIndexMultiDelete(&state, page,
                                                                                moveSrc, xldata->nMove,
-                                                                               SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
+                                                                         SPGIST_PLACEHOLDER, SPGIST_PLACEHOLDER,
                                                                                InvalidBlockNumber,
                                                                                InvalidOffsetNumber);
 
@@ -906,7 +906,7 @@ spgRedoVacuumRedirect(XLogRecPtr lsn, XLogRecord *record)
                                        SpGistDeadTuple dt;
 
                                        dt = (SpGistDeadTuple) PageGetItem(page,
-                                                                                                          PageGetItemId(page, itemToPlaceholder[i]));
+                                                                 PageGetItemId(page, itemToPlaceholder[i]));
                                        Assert(dt->tupstate == SPGIST_REDIRECT);
                                        dt->tupstate = SPGIST_PLACEHOLDER;
                                        ItemPointerSetInvalid(&dt->pointer);
index 33b5ca2d36f3391a4884c2173b55a149bf096a59..7f2f6921d5db4de494633788aa407a1f6f85e72d 100644 (file)
@@ -417,7 +417,7 @@ TransactionIdGetStatus(TransactionId xid, XLogRecPtr *lsn)
  * Testing during the PostgreSQL 9.2 development cycle revealed that on a
  * large multi-processor system, it was possible to have more CLOG page
  * requests in flight at one time than the numebr of CLOG buffers which existed
- * at that time, which was hardcoded to 8.  Further testing revealed that
+ * at that time, which was hardcoded to 8.     Further testing revealed that
  * performance dropped off with more than 32 CLOG buffers, possibly because
  * the linear buffer search algorithm doesn't scale well.
  *
index a8e3f19119a568858e8107bf53196bffa5cfa2c5..dd69c232eb4f18dbc76bd0901a4f2be28289f649 100644 (file)
@@ -903,12 +903,12 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
        {
                int                     slotno;
                int                     cur_count;
-               int                     bestvalidslot = 0;                              /* keep compiler quiet */
+               int                     bestvalidslot = 0;      /* keep compiler quiet */
                int                     best_valid_delta = -1;
-               int                     best_valid_page_number = 0;             /* keep compiler quiet */
-               int                     bestinvalidslot = 0;                    /* keep compiler quiet */
+               int                     best_valid_page_number = 0; /* keep compiler quiet */
+               int                     bestinvalidslot = 0;            /* keep compiler quiet */
                int                     best_invalid_delta = -1;
-               int                     best_invalid_page_number = 0;   /* keep compiler quiet */
+               int                     best_invalid_page_number = 0;           /* keep compiler quiet */
 
                /* See if page already has a buffer assigned */
                for (slotno = 0; slotno < shared->num_slots; slotno++)
@@ -920,15 +920,15 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
 
                /*
                 * If we find any EMPTY slot, just select that one. Else choose a
-                * victim page to replace.  We normally take the least recently used
+                * victim page to replace.      We normally take the least recently used
                 * valid page, but we will never take the slot containing
-                * latest_page_number, even if it appears least recently used.  We
+                * latest_page_number, even if it appears least recently used.  We
                 * will select a slot that is already I/O busy only if there is no
                 * other choice: a read-busy slot will not be least recently used once
                 * the read finishes, and waiting for an I/O on a write-busy slot is
                 * inferior to just picking some other slot.  Testing shows the slot
-                * we pick instead will often be clean, allowing us to begin a read
-                * at once.
+                * we pick instead will often be clean, allowing us to begin a read at
+                * once.
                 *
                 * Normally the page_lru_count values will all be different and so
                 * there will be a well-defined LRU page.  But since we allow
@@ -997,10 +997,10 @@ SlruSelectLRUPage(SlruCtl ctl, int pageno)
 
                /*
                 * If all pages (except possibly the latest one) are I/O busy, we'll
-                * have to wait for an I/O to complete and then retry.  In that unhappy
-                * case, we choose to wait for the I/O on the least recently used slot,
-                * on the assumption that it was likely initiated first of all the I/Os
-                * in progress and may therefore finish first.
+                * have to wait for an I/O to complete and then retry.  In that
+                * unhappy case, we choose to wait for the I/O on the least recently
+                * used slot, on the assumption that it was likely initiated first of
+                * all the I/Os in progress and may therefore finish first.
                 */
                if (best_valid_delta < 0)
                {
@@ -1168,20 +1168,20 @@ restart:;
 
 /*
  * SlruScanDirectory callback
- *             This callback reports true if there's any segment prior to the one
- *             containing the page passed as "data".
+ *             This callback reports true if there's any segment prior to the one
+ *             containing the page passed as "data".
  */
 bool
 SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int segpage, void *data)
 {
-       int             cutoffPage = *(int *) data;
+       int                     cutoffPage = *(int *) data;
 
        cutoffPage -= cutoffPage % SLRU_PAGES_PER_SEGMENT;
 
        if (ctl->PagePrecedes(segpage, cutoffPage))
-               return true;    /* found one; don't iterate any more */
+               return true;                    /* found one; don't iterate any more */
 
-       return false;   /* keep going */
+       return false;                           /* keep going */
 }
 
 /*
@@ -1191,8 +1191,8 @@ SlruScanDirCbReportPresence(SlruCtl ctl, char *filename, int segpage, void *data
 static bool
 SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data)
 {
-       char    path[MAXPGPATH];
-       int             cutoffPage = *(int *) data;
+       char            path[MAXPGPATH];
+       int                     cutoffPage = *(int *) data;
 
        if (ctl->PagePrecedes(segpage, cutoffPage))
        {
@@ -1202,7 +1202,7 @@ SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data)
                unlink(path);
        }
 
-       return false;   /* keep going */
+       return false;                           /* keep going */
 }
 
 /*
@@ -1212,14 +1212,14 @@ SlruScanDirCbDeleteCutoff(SlruCtl ctl, char *filename, int segpage, void *data)
 bool
 SlruScanDirCbDeleteAll(SlruCtl ctl, char *filename, int segpage, void *data)
 {
-       char    path[MAXPGPATH];
+       char            path[MAXPGPATH];
 
        snprintf(path, MAXPGPATH, "%s/%s", ctl->Dir, filename);
        ereport(DEBUG2,
                        (errmsg("removing file \"%s\"", path)));
        unlink(path);
 
-       return false;   /* keep going */
+       return false;                           /* keep going */
 }
 
 /*
index 0b41a76a3251461e3c53ed0d33ae8789a7c0d22f..b94fae3740251426b80dff346fc03424e7e6a76f 100644 (file)
@@ -360,8 +360,9 @@ static void
 GXactLoadSubxactData(GlobalTransaction gxact, int nsubxacts,
                                         TransactionId *children)
 {
-       PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
-       PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
+       PGPROC     *proc = &ProcGlobal->allProcs[gxact->pgprocno];
+       PGXACT     *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
+
        /* We need no extra lock since the GXACT isn't valid yet */
        if (nsubxacts > PGPROC_MAX_CACHED_SUBXIDS)
        {
@@ -410,7 +411,7 @@ LockGXact(const char *gid, Oid user)
        for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
        {
                GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
-               PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
+               PGPROC     *proc = &ProcGlobal->allProcs[gxact->pgprocno];
 
                /* Ignore not-yet-valid GIDs */
                if (!gxact->valid)
@@ -523,7 +524,7 @@ TransactionIdIsPrepared(TransactionId xid)
        for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
        {
                GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
-               PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
+               PGXACT     *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
 
                if (gxact->valid && pgxact->xid == xid)
                {
@@ -648,8 +649,8 @@ pg_prepared_xact(PG_FUNCTION_ARGS)
        while (status->array != NULL && status->currIdx < status->ngxacts)
        {
                GlobalTransaction gxact = &status->array[status->currIdx++];
-               PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
-               PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
+               PGPROC     *proc = &ProcGlobal->allProcs[gxact->pgprocno];
+               PGXACT     *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
                Datum           values[5];
                bool            nulls[5];
                HeapTuple       tuple;
@@ -719,7 +720,7 @@ TwoPhaseGetDummyProc(TransactionId xid)
        for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
        {
                GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
-               PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
+               PGXACT     *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
 
                if (pgxact->xid == xid)
                {
@@ -850,8 +851,8 @@ save_state_data(const void *data, uint32 len)
 void
 StartPrepare(GlobalTransaction gxact)
 {
-       PGPROC *proc = &ProcGlobal->allProcs[gxact->pgprocno];
-       PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
+       PGPROC     *proc = &ProcGlobal->allProcs[gxact->pgprocno];
+       PGXACT     *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
        TransactionId xid = pgxact->xid;
        TwoPhaseFileHeader hdr;
        TransactionId *children;
@@ -1063,9 +1064,9 @@ EndPrepare(GlobalTransaction gxact)
                                 errmsg("could not close two-phase state file: %m")));
 
        /*
-        * Mark the prepared transaction as valid.      As soon as xact.c marks MyPgXact
-        * as not running our XID (which it will do immediately after this
-        * function returns), others can commit/rollback the xact.
+        * Mark the prepared transaction as valid.      As soon as xact.c marks
+        * MyPgXact as not running our XID (which it will do immediately after
+        * this function returns), others can commit/rollback the xact.
         *
         * NB: a side effect of this is to make a dummy ProcArray entry for the
         * prepared XID.  This must happen before we clear the XID from MyPgXact,
@@ -1551,7 +1552,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
        for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
        {
                GlobalTransaction gxact = TwoPhaseState->prepXacts[i];
-               PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
+               PGXACT     *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
 
                if (gxact->valid &&
                        XLByteLE(gxact->prepare_lsn, redo_horizon))
@@ -1707,7 +1708,7 @@ PrescanPreparedTransactions(TransactionId **xids_p, int *nxids_p)
                         * XID, and they may force us to advance nextXid.
                         *
                         * We don't expect anyone else to modify nextXid, hence we don't
-                        * need to hold a lock while examining it.  We still acquire the
+                        * need to hold a lock while examining it.      We still acquire the
                         * lock to modify it, though.
                         */
                        subxids = (TransactionId *)
index 892a46abc3fa34dd16627e6747b2279387b65b30..7abf9343beeff774da70e1debb3ee6174ec5cafd 100644 (file)
@@ -174,8 +174,8 @@ GetNewTransactionId(bool isSubXact)
         * latestCompletedXid is present in the ProcArray, which is essential for
         * correct OldestXmin tracking; see src/backend/access/transam/README.
         *
-        * XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we are
-        * relying on fetch/store of an xid to be atomic, else other backends
+        * XXX by storing xid into MyPgXact without acquiring ProcArrayLock, we
+        * are relying on fetch/store of an xid to be atomic, else other backends
         * might see a partially-set xid here.  But holding both locks at once
         * would be a nasty concurrency hit.  So for now, assume atomicity.
         *
index 659b53524cdf71dd8d36e1723f09838c49c82e1c..8f00186dd741b565c2769da496b28b40fbddc881 100644 (file)
@@ -1019,6 +1019,7 @@ RecordTransactionCommit(void)
                        XLogRecData rdata[4];
                        int                     lastrdata = 0;
                        xl_xact_commit xlrec;
+
                        /*
                         * Set flags required for recovery processing of commits.
                         */
@@ -1073,7 +1074,8 @@ RecordTransactionCommit(void)
                {
                        XLogRecData rdata[2];
                        int                     lastrdata = 0;
-                       xl_xact_commit_compact  xlrec;
+                       xl_xact_commit_compact xlrec;
+
                        xlrec.xact_time = xactStopTimestamp;
                        xlrec.nsubxacts = nchildren;
                        rdata[0].data = (char *) (&xlrec);
@@ -2102,7 +2104,7 @@ PrepareTransaction(void)
        if (XactHasExportedSnapshots())
                ereport(ERROR,
                                (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
-                                errmsg("cannot PREPARE a transaction that has exported snapshots")));
+               errmsg("cannot PREPARE a transaction that has exported snapshots")));
 
        /* Prevent cancel/die interrupt while cleaning up */
        HOLD_INTERRUPTS();
@@ -2602,10 +2604,10 @@ CommitTransactionCommand(void)
                        break;
 
                        /*
-                        * We were issued a RELEASE command, so we end the
-                        * current subtransaction and return to the parent transaction.
-                        * The parent might be ended too, so repeat till we find an
-                        * INPROGRESS transaction or subtransaction.
+                        * We were issued a RELEASE command, so we end the current
+                        * subtransaction and return to the parent transaction. The parent
+                        * might be ended too, so repeat till we find an INPROGRESS
+                        * transaction or subtransaction.
                         */
                case TBLOCK_SUBRELEASE:
                        do
@@ -2623,9 +2625,9 @@ CommitTransactionCommand(void)
                         * hierarchy and perform final commit. We do this by rolling up
                         * any subtransactions into their parent, which leads to O(N^2)
                         * operations with respect to resource owners - this isn't that
-                        * bad until we approach a thousands of savepoints but is necessary
-                        * for correctness should after triggers create new resource
-                        * owners.
+                        * bad until we approach a thousands of savepoints but is
+                        * necessary for correctness should after triggers create new
+                        * resource owners.
                         */
                case TBLOCK_SUBCOMMIT:
                        do
@@ -4551,11 +4553,11 @@ xactGetCommittedChildren(TransactionId **ptr)
  */
 static void
 xact_redo_commit_internal(TransactionId xid, XLogRecPtr lsn,
-                                       TransactionId *sub_xids, int nsubxacts,
-                                       SharedInvalidationMessage *inval_msgs, int nmsgs,
-                                       RelFileNode *xnodes, int nrels,
-                                       Oid dbId, Oid tsId,
-                                       uint32 xinfo)
+                                                 TransactionId *sub_xids, int nsubxacts,
+                                                 SharedInvalidationMessage *inval_msgs, int nmsgs,
+                                                 RelFileNode *xnodes, int nrels,
+                                                 Oid dbId, Oid tsId,
+                                                 uint32 xinfo)
 {
        TransactionId max_xid;
        int                     i;
@@ -4659,12 +4661,13 @@ xact_redo_commit_internal(TransactionId xid, XLogRecPtr lsn,
                XLogFlush(lsn);
 
 }
+
 /*
  * Utility function to call xact_redo_commit_internal after breaking down xlrec
  */
 static void
 xact_redo_commit(xl_xact_commit *xlrec,
-                                                       TransactionId xid, XLogRecPtr lsn)
+                                TransactionId xid, XLogRecPtr lsn)
 {
        TransactionId *subxacts;
        SharedInvalidationMessage *inval_msgs;
@@ -4675,11 +4678,11 @@ xact_redo_commit(xl_xact_commit *xlrec,
        inval_msgs = (SharedInvalidationMessage *) &(subxacts[xlrec->nsubxacts]);
 
        xact_redo_commit_internal(xid, lsn, subxacts, xlrec->nsubxacts,
-                                                               inval_msgs, xlrec->nmsgs,
-                                                               xlrec->xnodes, xlrec->nrels,
-                                                               xlrec->dbId,
-                                                               xlrec->tsId,
-                                                               xlrec->xinfo);
+                                                         inval_msgs, xlrec->nmsgs,
+                                                         xlrec->xnodes, xlrec->nrels,
+                                                         xlrec->dbId,
+                                                         xlrec->tsId,
+                                                         xlrec->xinfo);
 }
 
 /*
@@ -4687,14 +4690,14 @@ xact_redo_commit(xl_xact_commit *xlrec,
  */
 static void
 xact_redo_commit_compact(xl_xact_commit_compact *xlrec,
-                                                       TransactionId xid, XLogRecPtr lsn)
+                                                TransactionId xid, XLogRecPtr lsn)
 {
        xact_redo_commit_internal(xid, lsn, xlrec->subxacts, xlrec->nsubxacts,
-                                                               NULL, 0,                /* inval msgs */
-                                                               NULL, 0,                /* relfilenodes */
-                                                               InvalidOid,             /* dbId */
-                                                               InvalidOid,             /* tsId */
-                                                               0);                             /* xinfo */
+                                                         NULL, 0,      /* inval msgs */
+                                                         NULL, 0,      /* relfilenodes */
+                                                         InvalidOid,           /* dbId */
+                                                         InvalidOid,           /* tsId */
+                                                         0);           /* xinfo */
 }
 
 /*
index 0f2678cfda04a750d436d0eb1a2d7632ba2d5b57..bcb71c45b2a1b677d81c4634b14b6af96faffe97 100644 (file)
@@ -344,10 +344,10 @@ typedef struct XLogCtlInsert
 
        /*
         * fullPageWrites is the master copy used by all backends to determine
-        * whether to write full-page to WAL, instead of using process-local
-        * one. This is required because, when full_page_writes is changed
-        * by SIGHUP, we must WAL-log it before it actually affects
-        * WAL-logging by backends. Checkpointer sets at startup or after SIGHUP.
+        * whether to write full-page to WAL, instead of using process-local one.
+        * This is required because, when full_page_writes is changed by SIGHUP,
+        * we must WAL-log it before it actually affects WAL-logging by backends.
+        * Checkpointer sets at startup or after SIGHUP.
         */
        bool            fullPageWrites;
 
@@ -455,8 +455,11 @@ typedef struct XLogCtlData
        XLogRecPtr      recoveryLastRecPtr;
        /* timestamp of last COMMIT/ABORT record replayed (or being replayed) */
        TimestampTz recoveryLastXTime;
-       /* timestamp of when we started replaying the current chunk of WAL data,
-        * only relevant for replication or archive recovery */
+
+       /*
+        * timestamp of when we started replaying the current chunk of WAL data,
+        * only relevant for replication or archive recovery
+        */
        TimestampTz currentChunkStartTime;
        /* end of the last record restored from the archive */
        XLogRecPtr      restoreLastRecPtr;
@@ -580,7 +583,7 @@ static bool updateMinRecoveryPoint = true;
  * to replay all the WAL, so reachedConsistency is never set. During archive
  * recovery, the database is consistent once minRecoveryPoint is reached.
  */
-bool reachedConsistency = false;
+bool           reachedConsistency = false;
 
 static bool InRedo = false;
 
@@ -750,8 +753,8 @@ XLogInsert(RmgrId rmid, uint8 info, XLogRecData *rdata)
         * insert lock, but it seems better to avoid doing CRC calculations while
         * holding the lock.
         *
-        * We add entries for backup blocks to the chain, so that they don't
-        * need any special treatment in the critical section where the chunks are
+        * We add entries for backup blocks to the chain, so that they don't need
+        * any special treatment in the critical section where the chunks are
         * copied into the WAL buffers. Those entries have to be unlinked from the
         * chain if we have to loop back here.
         */
@@ -896,10 +899,10 @@ begin:;
        /*
         * Calculate CRC of the data, including all the backup blocks
         *
-        * Note that the record header isn't added into the CRC initially since
-        * we don't know the prev-link yet.  Thus, the CRC will represent the CRC
-        * of the whole record in the order: rdata, then backup blocks, then
-        * record header.
+        * Note that the record header isn't added into the CRC initially since we
+        * don't know the prev-link yet.  Thus, the CRC will represent the CRC of
+        * the whole record in the order: rdata, then backup blocks, then record
+        * header.
         */
        INIT_CRC32(rdata_crc);
        for (rdt = rdata; rdt != NULL; rdt = rdt->next)
@@ -948,10 +951,10 @@ begin:;
        }
 
        /*
-        * Also check to see if fullPageWrites or forcePageWrites was just turned on;
-        * if we weren't already doing full-page writes then go back and recompute.
-        * (If it was just turned off, we could recompute the record without full pages,
-        * but we choose not to bother.)
+        * Also check to see if fullPageWrites or forcePageWrites was just turned
+        * on; if we weren't already doing full-page writes then go back and
+        * recompute. (If it was just turned off, we could recompute the record
+        * without full pages, but we choose not to bother.)
         */
        if ((Insert->fullPageWrites || Insert->forcePageWrites) && !doPageWrites)
        {
@@ -1575,15 +1578,15 @@ AdvanceXLInsertBuffer(bool new_segment)
         * WAL records beginning in this page have removable backup blocks.  This
         * allows the WAL archiver to know whether it is safe to compress archived
         * WAL data by transforming full-block records into the non-full-block
-        * format.  It is sufficient to record this at the page level because we
+        * format.      It is sufficient to record this at the page level because we
         * force a page switch (in fact a segment switch) when starting a backup,
         * so the flag will be off before any records can be written during the
-        * backup.  At the end of a backup, the last page will be marked as all
+        * backup.      At the end of a backup, the last page will be marked as all
         * unsafe when perhaps only part is unsafe, but at worst the archiver
         * would miss the opportunity to compress a few records.
         */
        if (!Insert->forcePageWrites)
-               NewPage->xlp_info |= XLP_BKP_REMOVABLE;
+               NewPage   ->xlp_info |= XLP_BKP_REMOVABLE;
 
        /*
         * If first page of an XLOG segment file, make it a long header.
@@ -1827,11 +1830,11 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible, bool xlog_switch)
                                Write->lastSegSwitchTime = (pg_time_t) time(NULL);
 
                                /*
-                                * Request a checkpoint if we've consumed too
-                                * much xlog since the last one.  For speed, we first check
-                                * using the local copy of RedoRecPtr, which might be out of
-                                * date; if it looks like a checkpoint is needed, forcibly
-                                * update RedoRecPtr and recheck.
+                                * Request a checkpoint if we've consumed too much xlog since
+                                * the last one.  For speed, we first check using the local
+                                * copy of RedoRecPtr, which might be out of date; if it looks
+                                * like a checkpoint is needed, forcibly update RedoRecPtr and
+                                * recheck.
                                 */
                                if (IsUnderPostmaster &&
                                        XLogCheckpointNeeded(openLogId, openLogSeg))
@@ -1931,7 +1934,7 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
 
        /*
         * If the WALWriter is sleeping, we should kick it to make it come out of
-        * low-power mode.  Otherwise, determine whether there's a full page of
+        * low-power mode.      Otherwise, determine whether there's a full page of
         * WAL available to write.
         */
        if (!sleeping)
@@ -1945,9 +1948,9 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
        }
 
        /*
-        * Nudge the WALWriter: it has a full page of WAL to write, or we want
-        * it to come out of low-power mode so that this async commit will reach
-        * disk within the expected amount of time.
+        * Nudge the WALWriter: it has a full page of WAL to write, or we want it
+        * to come out of low-power mode so that this async commit will reach disk
+        * within the expected amount of time.
         */
        if (ProcGlobal->walwriterLatch)
                SetLatch(ProcGlobal->walwriterLatch);
@@ -2076,8 +2079,8 @@ XLogFlush(XLogRecPtr record)
        WriteRqstPtr = record;
 
        /*
-        * Now wait until we get the write lock, or someone else does the
-        * flush for us.
+        * Now wait until we get the write lock, or someone else does the flush
+        * for us.
         */
        for (;;)
        {
@@ -2182,7 +2185,7 @@ XLogFlush(XLogRecPtr record)
  * block, and flush through the latest one of those.  Thus, if async commits
  * are not being used, we will flush complete blocks only.     We can guarantee
  * that async commits reach disk after at most three cycles; normally only
- * one or two.  (When flushing complete blocks, we allow XLogWrite to write
+ * one or two. (When flushing complete blocks, we allow XLogWrite to write
  * "flexibly", meaning it can stop at the end of the buffer ring; this makes a
  * difference only with very high load or long wal_writer_delay, but imposes
  * one extra cycle for the worst case for async commits.)
@@ -2273,7 +2276,8 @@ XLogBackgroundFlush(void)
 
        /*
         * If we wrote something then we have something to send to standbys also,
-        * otherwise the replication delay become around 7s with just async commit.
+        * otherwise the replication delay become around 7s with just async
+        * commit.
         */
        if (wrote_something)
                WalSndWakeup();
@@ -2776,17 +2780,17 @@ XLogFileRead(uint32 log, uint32 seg, int emode, TimeLineID tli,
        }
 
        /*
-        * If the segment was fetched from archival storage, replace
-        * the existing xlog segment (if any) with the archival version.
+        * If the segment was fetched from archival storage, replace the existing
+        * xlog segment (if any) with the archival version.
         */
        if (source == XLOG_FROM_ARCHIVE)
        {
                /* use volatile pointer to prevent code rearrangement */
                volatile XLogCtlData *xlogctl = XLogCtl;
-               XLogRecPtr              endptr;
-               char                    xlogfpath[MAXPGPATH];
-               bool                    reload = false;
-               struct stat             statbuf;
+               XLogRecPtr      endptr;
+               char            xlogfpath[MAXPGPATH];
+               bool            reload = false;
+               struct stat statbuf;
 
                XLogFilePath(xlogfpath, tli, log, seg);
                if (stat(xlogfpath, &statbuf) == 0)
@@ -2801,9 +2805,9 @@ XLogFileRead(uint32 log, uint32 seg, int emode, TimeLineID tli,
 
                if (rename(path, xlogfpath) < 0)
                        ereport(ERROR,
-                               (errcode_for_file_access(),
-                                errmsg("could not rename file \"%s\" to \"%s\": %m",
-                                               path, xlogfpath)));
+                                       (errcode_for_file_access(),
+                                        errmsg("could not rename file \"%s\" to \"%s\": %m",
+                                                       path, xlogfpath)));
 
                /*
                 * If the existing segment was replaced, since walsenders might have
@@ -3812,7 +3816,7 @@ ReadRecord(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt)
                RecPtr = &tmpRecPtr;
 
                /*
-                * RecPtr is pointing to end+1 of the previous WAL record.  We must
+                * RecPtr is pointing to end+1 of the previous WAL record.      We must
                 * advance it if necessary to where the next record starts.  First,
                 * align to next page if no more records can fit on the current page.
                 */
@@ -5389,10 +5393,10 @@ readRecoveryCommandFile(void)
                        }
                        if (rtli)
                                ereport(DEBUG2,
-                                               (errmsg_internal("recovery_target_timeline = %u", rtli)));
+                                  (errmsg_internal("recovery_target_timeline = %u", rtli)));
                        else
                                ereport(DEBUG2,
-                                               (errmsg_internal("recovery_target_timeline = latest")));
+                                        (errmsg_internal("recovery_target_timeline = latest")));
                }
                else if (strcmp(item->name, "recovery_target_xid") == 0)
                {
@@ -5404,7 +5408,7 @@ readRecoveryCommandFile(void)
                                                 item->value)));
                        ereport(DEBUG2,
                                        (errmsg_internal("recovery_target_xid = %u",
-                                                                        recoveryTargetXid)));
+                                                                        recoveryTargetXid)));
                        recoveryTarget = RECOVERY_TARGET_XID;
                }
                else if (strcmp(item->name, "recovery_target_time") == 0)
@@ -5428,7 +5432,7 @@ readRecoveryCommandFile(void)
                                                                                                                Int32GetDatum(-1)));
                        ereport(DEBUG2,
                                        (errmsg_internal("recovery_target_time = '%s'",
-                                                                        timestamptz_to_str(recoveryTargetTime))));
+                                                                  timestamptz_to_str(recoveryTargetTime))));
                }
                else if (strcmp(item->name, "recovery_target_name") == 0)
                {
@@ -5576,13 +5580,13 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
        }
 
        /*
-        * If we are establishing a new timeline, we have to copy data from
-        * the last WAL segment of the old timeline to create a starting WAL
-        * segment for the new timeline.
+        * If we are establishing a new timeline, we have to copy data from the
+        * last WAL segment of the old timeline to create a starting WAL segment
+        * for the new timeline.
         *
-        * Notify the archiver that the last WAL segment of the old timeline
-        * is ready to copy to archival storage. Otherwise, it is not archived
-        * for a while.
+        * Notify the archiver that the last WAL segment of the old timeline is
+        * ready to copy to archival storage. Otherwise, it is not archived for a
+        * while.
         */
        if (endTLI != ThisTimeLineID)
        {
@@ -5604,8 +5608,8 @@ exitArchiveRecovery(TimeLineID endTLI, uint32 endLogId, uint32 endLogSeg)
        XLogArchiveCleanup(xlogpath);
 
        /*
-        * Since there might be a partial WAL segment named RECOVERYXLOG,
-        * get rid of it.
+        * Since there might be a partial WAL segment named RECOVERYXLOG, get rid
+        * of it.
         */
        snprintf(recoveryPath, MAXPGPATH, XLOGDIR "/RECOVERYXLOG");
        unlink(recoveryPath);           /* ignore any error */
@@ -6323,11 +6327,11 @@ StartupXLOG(void)
                /*
                 * Set backupStartPoint if we're starting recovery from a base backup.
                 *
-                * Set backupEndPoint and use minRecoveryPoint as the backup end location
-                * if we're starting recovery from a base backup which was taken from
-                * the standby. In this case, the database system status in pg_control must
-                * indicate DB_IN_ARCHIVE_RECOVERY. If not, which means that backup
-                * is corrupted, so we cancel recovery.
+                * Set backupEndPoint and use minRecoveryPoint as the backup end
+                * location if we're starting recovery from a base backup which was
+                * taken from the standby. In this case, the database system status in
+                * pg_control must indicate DB_IN_ARCHIVE_RECOVERY. If not, which
+                * means that backup is corrupted, so we cancel recovery.
                 */
                if (haveBackupLabel)
                {
@@ -6340,7 +6344,7 @@ StartupXLOG(void)
                                        ereport(FATAL,
                                                        (errmsg("backup_label contains inconsistent data with control file"),
                                                         errhint("This means that the backup is corrupted and you will "
-                                                                        "have to use another backup for recovery.")));
+                                                          "have to use another backup for recovery.")));
                                ControlFile->backupEndPoint = ControlFile->minRecoveryPoint;
                        }
                }
@@ -6383,15 +6387,15 @@ StartupXLOG(void)
 
                /*
                 * We're in recovery, so unlogged relations may be trashed and must be
-                * reset.  This should be done BEFORE allowing Hot Standby connections,
-                * so that read-only backends don't try to read whatever garbage is
-                * left over from before.
+                * reset.  This should be done BEFORE allowing Hot Standby
+                * connections, so that read-only backends don't try to read whatever
+                * garbage is left over from before.
                 */
                ResetUnloggedRelations(UNLOGGED_RELATION_CLEANUP);
 
                /*
-                * Likewise, delete any saved transaction snapshot files that got
-                * left behind by crashed backends.
+                * Likewise, delete any saved transaction snapshot files that got left
+                * behind by crashed backends.
                 */
                DeleteAllExportedSnapshotFiles();
 
@@ -6489,10 +6493,11 @@ StartupXLOG(void)
 
                /*
                 * Let postmaster know we've started redo now, so that it can launch
-                * checkpointer to perform restartpoints.  We don't bother during crash
-                * recovery as restartpoints can only be performed during archive
-                * recovery.  And we'd like to keep crash recovery simple, to avoid
-                * introducing bugs that could affect you when recovering after crash.
+                * checkpointer to perform restartpoints.  We don't bother during
+                * crash recovery as restartpoints can only be performed during
+                * archive recovery.  And we'd like to keep crash recovery simple, to
+                * avoid introducing bugs that could affect you when recovering after
+                * crash.
                 *
                 * After this point, we can no longer assume that we're the only
                 * process in addition to postmaster!  Also, fsync requests are
@@ -6649,8 +6654,8 @@ StartupXLOG(void)
                                {
                                        /*
                                         * We have reached the end of base backup, the point where
-                                        * the minimum recovery point in pg_control indicates.
-                                        * The data on disk is now consistent. Reset backupStartPoint
+                                        * the minimum recovery point in pg_control indicates. The
+                                        * data on disk is now consistent. Reset backupStartPoint
                                         * and backupEndPoint.
                                         */
                                        elog(DEBUG1, "end of backup reached");
@@ -6863,9 +6868,9 @@ StartupXLOG(void)
        oldestActiveXID = PrescanPreparedTransactions(NULL, NULL);
 
        /*
-        * Update full_page_writes in shared memory and write an
-        * XLOG_FPW_CHANGE record before resource manager writes cleanup
-        * WAL records or checkpoint record is written.
+        * Update full_page_writes in shared memory and write an XLOG_FPW_CHANGE
+        * record before resource manager writes cleanup WAL records or checkpoint
+        * record is written.
         */
        Insert->fullPageWrites = lastFullPageWrites;
        LocalSetXLogInsertAllowed();
@@ -6954,8 +6959,8 @@ StartupXLOG(void)
        LWLockRelease(ProcArrayLock);
 
        /*
-        * Start up the commit log and subtrans, if not already done for
-        * hot standby.
+        * Start up the commit log and subtrans, if not already done for hot
+        * standby.
         */
        if (standbyState == STANDBY_DISABLED)
        {
@@ -7705,9 +7710,9 @@ CreateCheckPoint(int flags)
        checkPoint.time = (pg_time_t) time(NULL);
 
        /*
-        * For Hot Standby, derive the oldestActiveXid before we fix the redo pointer.
-        * This allows us to begin accumulating changes to assemble our starting
-        * snapshot of locks and transactions.
+        * For Hot Standby, derive the oldestActiveXid before we fix the redo
+        * pointer. This allows us to begin accumulating changes to assemble our
+        * starting snapshot of locks and transactions.
         */
        if (!shutdown && XLogStandbyInfoActive())
                checkPoint.oldestActiveXid = GetOldestActiveTransactionId();
@@ -8062,7 +8067,7 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
        volatile XLogCtlData *xlogctl = XLogCtl;
 
        /*
-        * Is it safe to restartpoint?  We must ask each of the resource managers
+        * Is it safe to restartpoint?  We must ask each of the resource managers
         * whether they have any partial state information that might prevent a
         * correct restart from this point.  If so, we skip this opportunity, but
         * return at the next checkpoint record for another try.
@@ -8082,10 +8087,11 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
        }
 
        /*
-        * Also refrain from creating a restartpoint if we have seen any references
-        * to non-existent pages. Restarting recovery from the restartpoint would
-        * not see the references, so we would lose the cross-check that the pages
-        * belonged to a relation that was dropped later.
+        * Also refrain from creating a restartpoint if we have seen any
+        * references to non-existent pages. Restarting recovery from the
+        * restartpoint would not see the references, so we would lose the
+        * cross-check that the pages belonged to a relation that was dropped
+        * later.
         */
        if (XLogHaveInvalidPages())
        {
@@ -8098,8 +8104,8 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
        }
 
        /*
-        * Copy the checkpoint record to shared memory, so that checkpointer
-        * can work out the next time it wants to perform a restartpoint.
+        * Copy the checkpoint record to shared memory, so that checkpointer can
+        * work out the next time it wants to perform a restartpoint.
         */
        SpinLockAcquire(&xlogctl->info_lck);
        XLogCtl->lastCheckPointRecPtr = ReadRecPtr;
@@ -8493,8 +8499,8 @@ UpdateFullPageWrites(void)
         * Do nothing if full_page_writes has not been changed.
         *
         * It's safe to check the shared full_page_writes without the lock,
-        * because we assume that there is no concurrently running process
-        * which can update it.
+        * because we assume that there is no concurrently running process which
+        * can update it.
         */
        if (fullPageWrites == Insert->fullPageWrites)
                return;
@@ -8505,8 +8511,8 @@ UpdateFullPageWrites(void)
         * It's always safe to take full page images, even when not strictly
         * required, but not the other round. So if we're setting full_page_writes
         * to true, first set it true and then write the WAL record. If we're
-        * setting it to false, first write the WAL record and then set the
-        * global flag.
+        * setting it to false, first write the WAL record and then set the global
+        * flag.
         */
        if (fullPageWrites)
        {
@@ -8516,12 +8522,12 @@ UpdateFullPageWrites(void)
        }
 
        /*
-        * Write an XLOG_FPW_CHANGE record. This allows us to keep
-        * track of full_page_writes during archive recovery, if required.
+        * Write an XLOG_FPW_CHANGE record. This allows us to keep track of
+        * full_page_writes during archive recovery, if required.
         */
        if (XLogStandbyInfoActive() && !RecoveryInProgress())
        {
-               XLogRecData     rdata;
+               XLogRecData rdata;
 
                rdata.data = (char *) (&fullPageWrites);
                rdata.len = sizeof(bool);
@@ -8561,7 +8567,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
                /*
                 * We used to try to take the maximum of ShmemVariableCache->nextOid
                 * and the recorded nextOid, but that fails if the OID counter wraps
-                * around.  Since no OID allocation should be happening during replay
+                * around.      Since no OID allocation should be happening during replay
                 * anyway, better to just believe the record exactly.  We still take
                 * OidGenLock while setting the variable, just in case.
                 */
@@ -8597,7 +8603,7 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
                        !XLogRecPtrIsInvalid(ControlFile->backupStartPoint) &&
                        XLogRecPtrIsInvalid(ControlFile->backupEndPoint))
                        ereport(PANIC,
-                                       (errmsg("online backup was canceled, recovery cannot continue")));
+                       (errmsg("online backup was canceled, recovery cannot continue")));
 
                /*
                 * If we see a shutdown checkpoint, we know that nothing was running
@@ -8797,9 +8803,9 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
                memcpy(&fpw, XLogRecGetData(record), sizeof(bool));
 
                /*
-                * Update the LSN of the last replayed XLOG_FPW_CHANGE record
-                * so that do_pg_start_backup() and do_pg_stop_backup() can check
-                * whether full_page_writes has been disabled during online backup.
+                * Update the LSN of the last replayed XLOG_FPW_CHANGE record so that
+                * do_pg_start_backup() and do_pg_stop_backup() can check whether
+                * full_page_writes has been disabled during online backup.
                 */
                if (!fpw)
                {
@@ -8825,7 +8831,7 @@ xlog_desc(StringInfo buf, uint8 xl_info, char *rec)
                CheckPoint *checkpoint = (CheckPoint *) rec;
 
                appendStringInfo(buf, "checkpoint: redo %X/%X; "
-                                                "tli %u; fpw %s; xid %u/%u; oid %u; multi %u; offset %u; "
+                                  "tli %u; fpw %s; xid %u/%u; oid %u; multi %u; offset %u; "
                                                 "oldest xid %u in DB %u; oldest running xid %u; %s",
                                                 checkpoint->redo.xlogid, checkpoint->redo.xrecoff,
                                                 checkpoint->ThisTimeLineID,
@@ -9115,8 +9121,8 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
                                 errhint("WAL control functions cannot be executed during recovery.")));
 
        /*
-        * During recovery, we don't need to check WAL level. Because, if WAL level
-        * is not sufficient, it's impossible to get here during recovery.
+        * During recovery, we don't need to check WAL level. Because, if WAL
+        * level is not sufficient, it's impossible to get here during recovery.
         */
        if (!backup_started_in_recovery && !XLogIsNeeded())
                ereport(ERROR,
@@ -9179,7 +9185,7 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
                 * old timeline IDs.  That would otherwise happen if you called
                 * pg_start_backup() right after restoring from a PITR archive: the
                 * first WAL segment containing the startup checkpoint has pages in
-                * the beginning with the old timeline ID.  That can cause trouble at
+                * the beginning with the old timeline ID.      That can cause trouble at
                 * recovery: we won't have a history file covering the old timeline if
                 * pg_xlog directory was not included in the base backup and the WAL
                 * archive was cleared too before starting the backup.
@@ -9202,17 +9208,18 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
                        bool            checkpointfpw;
 
                        /*
-                        * Force a CHECKPOINT.  Aside from being necessary to prevent torn
+                        * Force a CHECKPOINT.  Aside from being necessary to prevent torn
                         * page problems, this guarantees that two successive backup runs
                         * will have different checkpoint positions and hence different
                         * history file names, even if nothing happened in between.
                         *
-                        * During recovery, establish a restartpoint if possible. We use the last
-                        * restartpoint as the backup starting checkpoint. This means that two
-                        * successive backup runs can have same checkpoint positions.
+                        * During recovery, establish a restartpoint if possible. We use
+                        * the last restartpoint as the backup starting checkpoint. This
+                        * means that two successive backup runs can have same checkpoint
+                        * positions.
                         *
-                        * Since the fact that we are executing do_pg_start_backup() during
-                        * recovery means that checkpointer is running, we can use
+                        * Since the fact that we are executing do_pg_start_backup()
+                        * during recovery means that checkpointer is running, we can use
                         * RequestCheckpoint() to establish a restartpoint.
                         *
                         * We use CHECKPOINT_IMMEDIATE only if requested by user (via
@@ -9237,12 +9244,12 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
                        {
                                /* use volatile pointer to prevent code rearrangement */
                                volatile XLogCtlData *xlogctl = XLogCtl;
-                               XLogRecPtr              recptr;
+                               XLogRecPtr      recptr;
 
                                /*
-                                * Check to see if all WAL replayed during online backup (i.e.,
-                                * since last restartpoint used as backup starting checkpoint)
-                                * contain full-page writes.
+                                * Check to see if all WAL replayed during online backup
+                                * (i.e., since last restartpoint used as backup starting
+                                * checkpoint) contain full-page writes.
                                 */
                                SpinLockAcquire(&xlogctl->info_lck);
                                recptr = xlogctl->lastFpwDisableRecPtr;
@@ -9250,20 +9257,20 @@ do_pg_start_backup(const char *backupidstr, bool fast, char **labelfile)
 
                                if (!checkpointfpw || XLByteLE(startpoint, recptr))
                                        ereport(ERROR,
-                                                       (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                                        errmsg("WAL generated with full_page_writes=off was replayed "
-                                                                       "since last restartpoint"),
-                                                        errhint("This means that the backup being taken on standby "
-                                                                        "is corrupt and should not be used. "
-                                                                        "Enable full_page_writes and run CHECKPOINT on the master, "
-                                                                        "and then try an online backup again.")));
+                                                 (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
+                                                  errmsg("WAL generated with full_page_writes=off was replayed "
+                                                                 "since last restartpoint"),
+                                                  errhint("This means that the backup being taken on standby "
+                                                                  "is corrupt and should not be used. "
+                                                                  "Enable full_page_writes and run CHECKPOINT on the master, "
+                                                                  "and then try an online backup again.")));
 
                                /*
                                 * During recovery, since we don't use the end-of-backup WAL
-                                * record and don't write the backup history file, the starting WAL
-                                * location doesn't need to be unique. This means that two base
-                                * backups started at the same time might use the same checkpoint
-                                * as starting locations.
+                                * record and don't write the backup history file, the
+                                * starting WAL location doesn't need to be unique. This means
+                                * that two base backups started at the same time might use
+                                * the same checkpoint as starting locations.
                                 */
                                gotUniqueStartpoint = true;
                        }
@@ -9443,8 +9450,8 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
                                 errhint("WAL control functions cannot be executed during recovery.")));
 
        /*
-        * During recovery, we don't need to check WAL level. Because, if WAL level
-        * is not sufficient, it's impossible to get here during recovery.
+        * During recovery, we don't need to check WAL level. Because, if WAL
+        * level is not sufficient, it's impossible to get here during recovery.
         */
        if (!backup_started_in_recovery && !XLogIsNeeded())
                ereport(ERROR,
@@ -9537,9 +9544,9 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
        remaining = strchr(labelfile, '\n') + 1;        /* %n is not portable enough */
 
        /*
-        * Parse the BACKUP FROM line. If we are taking an online backup from
-        * the standby, we confirm that the standby has not been promoted
-        * during the backup.
+        * Parse the BACKUP FROM line. If we are taking an online backup from the
+        * standby, we confirm that the standby has not been promoted during the
+        * backup.
         */
        ptr = strstr(remaining, "BACKUP FROM:");
        if (!ptr || sscanf(ptr, "BACKUP FROM: %19s\n", backupfrom) != 1)
@@ -9555,30 +9562,30 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
                                                 "Try taking another online backup.")));
 
        /*
-        * During recovery, we don't write an end-of-backup record. We assume
-        * that pg_control was backed up last and its minimum recovery
-        * point can be available as the backup end location. Since we don't
-        * have an end-of-backup record, we use the pg_control value to check
-        * whether we've reached the end of backup when starting recovery from
-        * this backup. We have no way of checking if pg_control wasn't backed
-        * up last however.
+        * During recovery, we don't write an end-of-backup record. We assume that
+        * pg_control was backed up last and its minimum recovery point can be
+        * available as the backup end location. Since we don't have an
+        * end-of-backup record, we use the pg_control value to check whether
+        * we've reached the end of backup when starting recovery from this
+        * backup. We have no way of checking if pg_control wasn't backed up last
+        * however.
         *
         * We don't force a switch to new WAL file and wait for all the required
-        * files to be archived. This is okay if we use the backup to start
-        * the standby. But, if it's for an archive recovery, to ensure all the
-        * required files are available, a user should wait for them to be archived,
-        * or include them into the backup.
+        * files to be archived. This is okay if we use the backup to start the
+        * standby. But, if it's for an archive recovery, to ensure all the
+        * required files are available, a user should wait for them to be
+        * archived, or include them into the backup.
         *
         * We return the current minimum recovery point as the backup end
         * location. Note that it's would be bigger than the exact backup end
-        * location if the minimum recovery point is updated since the backup
-        * of pg_control. This is harmless for current uses.
+        * location if the minimum recovery point is updated since the backup of
+        * pg_control. This is harmless for current uses.
         *
         * XXX currently a backup history file is for informational and debug
         * purposes only. It's not essential for an online backup. Furthermore,
         * even if it's created, it will not be archived during recovery because
-        * an archiver is not invoked. So it doesn't seem worthwhile to write
-        * backup history file during recovery.
+        * an archiver is not invoked. So it doesn't seem worthwhile to write a
+        * backup history file during recovery.
         */
        if (backup_started_in_recovery)
        {
@@ -9597,12 +9604,12 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive)
                if (XLByteLE(startpoint, recptr))
                        ereport(ERROR,
                                        (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
-                                        errmsg("WAL generated with full_page_writes=off was replayed "
-                                                       "during online backup"),
-                                        errhint("This means that the backup being taken on standby "
-                                                        "is corrupt and should not be used. "
-                                                        "Enable full_page_writes and run CHECKPOINT on the master, "
-                                                        "and then try an online backup again.")));
+                          errmsg("WAL generated with full_page_writes=off was replayed "
+                                         "during online backup"),
+                                errhint("This means that the backup being taken on standby "
+                                                "is corrupt and should not be used. "
+                                "Enable full_page_writes and run CHECKPOINT on the master, "
+                                                "and then try an online backup again.")));
 
 
                LWLockAcquire(ControlFileLock, LW_SHARED);
@@ -9905,10 +9912,11 @@ read_backup_label(XLogRecPtr *checkPointLoc, bool *backupEndRequired,
                ereport(FATAL,
                                (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
                                 errmsg("invalid data in file \"%s\"", BACKUP_LABEL_FILE)));
+
        /*
-        * BACKUP METHOD and BACKUP FROM lines are new in 9.2. We can't
-        * restore from an older backup anyway, but since the information on it
-        * is not strictly required, don't error out if it's missing for some reason.
+        * BACKUP METHOD and BACKUP FROM lines are new in 9.2. We can't restore
+        * from an older backup anyway, but since the information on it is not
+        * strictly required, don't error out if it's missing for some reason.
         */
        if (fscanf(lfp, "BACKUP METHOD: %19s\n", backuptype) == 1)
        {
@@ -10050,8 +10058,8 @@ XLogPageRead(XLogRecPtr *RecPtr, int emode, bool fetching_ckpt,
        if (readFile >= 0 && !XLByteInSeg(*RecPtr, readId, readSeg))
        {
                /*
-                * Request a restartpoint if we've replayed too much
-                * xlog since the last one.
+                * Request a restartpoint if we've replayed too much xlog since the
+                * last one.
                 */
                if (StandbyMode && bgwriterLaunched)
                {
index f286cdfc073b39e1f352e101f40be27710e84136..6ddcc59b37a45941fd17f8e0b7d3ff0976b346e0 100644 (file)
@@ -80,10 +80,10 @@ log_invalid_page(RelFileNode node, ForkNumber forkno, BlockNumber blkno,
        /*
         * Once recovery has reached a consistent state, the invalid-page table
         * should be empty and remain so. If a reference to an invalid page is
-        * found after consistency is reached, PANIC immediately. This might
-        * seem aggressive, but it's better than letting the invalid reference
-        * linger in the hash table until the end of recovery and PANIC there,
-        * which might come only much later if this is a standby server.
+        * found after consistency is reached, PANIC immediately. This might seem
+        * aggressive, but it's better than letting the invalid reference linger
+        * in the hash table until the end of recovery and PANIC there, which
+        * might come only much later if this is a standby server.
         */
        if (reachedConsistency)
        {
index 9315e79c991d81dcc35881d4ae24fcb71ea30804..45cd0808ce80c609373de189bb330659c54b9895 100644 (file)
@@ -186,10 +186,10 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
 
        foreach(j, grantees)
        {
-               AclItem aclitem;
+               AclItem         aclitem;
                Acl                *newer_acl;
 
-               aclitem.        ai_grantee = lfirst_oid(j);
+               aclitem.ai_grantee = lfirst_oid(j);
 
                /*
                 * Grant options can only be granted to individual roles, not PUBLIC.
@@ -202,7 +202,7 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
                                        (errcode(ERRCODE_INVALID_GRANT_OPERATION),
                                         errmsg("grant options can only be granted to roles")));
 
-               aclitem.        ai_grantor = grantorId;
+               aclitem.ai_grantor = grantorId;
 
                /*
                 * The asymmetry in the conditions here comes from the spec.  In
@@ -3073,7 +3073,7 @@ ExecGrant_Type(InternalGrant *istmt)
                        ereport(ERROR,
                                        (errcode(ERRCODE_INVALID_GRANT_OPERATION),
                                         errmsg("cannot set privileges of array types"),
-                                        errhint("Set the privileges of the element type instead.")));
+                               errhint("Set the privileges of the element type instead.")));
 
                /* Used GRANT DOMAIN on a non-domain? */
                if (istmt->objtype == ACL_OBJECT_DOMAIN &&
@@ -4184,7 +4184,7 @@ pg_type_aclmask(Oid type_oid, Oid roleid, AclMode mask, AclMaskHow how)
        /* "True" array types don't manage permissions of their own */
        if (typeForm->typelem != 0 && typeForm->typlen == -1)
        {
-               Oid             elttype_oid = typeForm->typelem;
+               Oid                     elttype_oid = typeForm->typelem;
 
                ReleaseSysCache(tuple);
 
index db6769cb90b699313ef12dbb31d6176d6f3e7d7d..d4e1f76f315bcb58c48f386428956108bf10c766 100644 (file)
@@ -173,7 +173,7 @@ static void reportDependentObjects(const ObjectAddresses *targetObjects,
                                           int msglevel,
                                           const ObjectAddress *origObject);
 static void deleteOneObject(const ObjectAddress *object,
-                                                       Relation depRel, int32 flags);
+                               Relation depRel, int32 flags);
 static void doDeletion(const ObjectAddress *object, int flags);
 static void AcquireDeletionLock(const ObjectAddress *object, int flags);
 static void ReleaseDeletionLock(const ObjectAddress *object);
@@ -352,7 +352,8 @@ performMultipleDeletions(const ObjectAddresses *objects,
        free_object_addresses(targetObjects);
 
        /*
-        * We closed depRel earlier in deleteOneObject if doing a drop concurrently
+        * We closed depRel earlier in deleteOneObject if doing a drop
+        * concurrently
         */
        if ((flags & PERFORM_DELETION_CONCURRENTLY) != PERFORM_DELETION_CONCURRENTLY)
                heap_close(depRel, RowExclusiveLock);
@@ -424,7 +425,7 @@ deleteWhatDependsOn(const ObjectAddress *object,
                 * Since this function is currently only used to clean out temporary
                 * schemas, we pass PERFORM_DELETION_INTERNAL here, indicating that
                 * the operation is an automatic system operation rather than a user
-                * action.  If, in the future, this function is used for other
+                * action.      If, in the future, this function is used for other
                 * purposes, we might need to revisit this.
                 */
                deleteOneObject(thisobj, depRel, PERFORM_DELETION_INTERNAL);
@@ -514,12 +515,12 @@ findDependentObjects(const ObjectAddress *object,
        /*
         * The target object might be internally dependent on some other object
         * (its "owner"), and/or be a member of an extension (also considered its
-        * owner).  If so, and if we aren't recursing from the owning object, we
+        * owner).      If so, and if we aren't recursing from the owning object, we
         * have to transform this deletion request into a deletion request of the
         * owning object.  (We'll eventually recurse back to this object, but the
-        * owning object has to be visited first so it will be deleted after.)
-        * The way to find out about this is to scan the pg_depend entries that
-        * show what this object depends on.
+        * owning object has to be visited first so it will be deleted after.) The
+        * way to find out about this is to scan the pg_depend entries that show
+        * what this object depends on.
         */
        ScanKeyInit(&key[0],
                                Anum_pg_depend_classid,
@@ -577,7 +578,7 @@ findDependentObjects(const ObjectAddress *object,
                                        /*
                                         * Exception 1a: if the owning object is listed in
                                         * pendingObjects, just release the caller's lock and
-                                        * return.  We'll eventually complete the DROP when we
+                                        * return.      We'll eventually complete the DROP when we
                                         * reach that entry in the pending list.
                                         */
                                        if (pendingObjects &&
@@ -593,8 +594,8 @@ findDependentObjects(const ObjectAddress *object,
                                         * Exception 1b: if the owning object is the extension
                                         * currently being created/altered, it's okay to continue
                                         * with the deletion.  This allows dropping of an
-                                        * extension's objects within the extension's scripts,
-                                        * as well as corner cases such as dropping a transient
+                                        * extension's objects within the extension's scripts, as
+                                        * well as corner cases such as dropping a transient
                                         * object created within such a script.
                                         */
                                        if (creating_extension &&
@@ -618,8 +619,8 @@ findDependentObjects(const ObjectAddress *object,
                                 * it's okay to continue with the deletion.  This holds when
                                 * recursing from a whole object that includes the nominal
                                 * other end as a component, too.  Since there can be more
-                                * than one "owning" object, we have to allow matches that
-                                * are more than one level down in the stack.
+                                * than one "owning" object, we have to allow matches that are
+                                * more than one level down in the stack.
                                 */
                                if (stack_address_present_add_flags(&otherObject, 0, stack))
                                        break;
@@ -630,7 +631,7 @@ findDependentObjects(const ObjectAddress *object,
                                 * owning object.
                                 *
                                 * First, release caller's lock on this object and get
-                                * deletion lock on the owning object.  (We must release
+                                * deletion lock on the owning object.  (We must release
                                 * caller's lock to avoid deadlock against a concurrent
                                 * deletion of the owning object.)
                                 */
@@ -999,7 +1000,8 @@ deleteOneObject(const ObjectAddress *object, Relation depRel, int flags)
        /* DROP hook of the objects being removed */
        if (object_access_hook)
        {
-               ObjectAccessDrop        drop_arg;
+               ObjectAccessDrop drop_arg;
+
                drop_arg.dropflags = flags;
                InvokeObjectAccessHook(OAT_DROP, object->classId, object->objectId,
                                                           object->objectSubId, &drop_arg);
@@ -1049,8 +1051,8 @@ deleteOneObject(const ObjectAddress *object, Relation depRel, int flags)
                                                                         object->objectSubId);
 
        /*
-        * Close depRel if we are doing a drop concurrently because it
-        * commits the transaction, so we don't want dangling references.
+        * Close depRel if we are doing a drop concurrently because it commits the
+        * transaction, so we don't want dangling references.
         */
        if ((flags & PERFORM_DELETION_CONCURRENTLY) == PERFORM_DELETION_CONCURRENTLY)
                heap_close(depRel, RowExclusiveLock);
@@ -1093,8 +1095,8 @@ doDeletion(const ObjectAddress *object, int flags)
 
                                if (relKind == RELKIND_INDEX)
                                {
-                                       bool concurrent = ((flags & PERFORM_DELETION_CONCURRENTLY)
-                                                                                                       == PERFORM_DELETION_CONCURRENTLY);
+                                       bool            concurrent = ((flags & PERFORM_DELETION_CONCURRENTLY)
+                                                                                  == PERFORM_DELETION_CONCURRENTLY);
 
                                        Assert(object->objectSubId == 0);
                                        index_drop(object->objectId, concurrent);
index 8fc69ae72022a057ae35dc668dce60c37357596e..49e7644699383755529b93461e91558705d17699 100644 (file)
@@ -1957,7 +1957,7 @@ StoreRelCheck(Relation rel, char *ccname, Node *expr,
                                                  ccsrc,        /* Source form of check constraint */
                                                  is_local,             /* conislocal */
                                                  inhcount,             /* coninhcount */
-                                                 is_no_inherit);       /* connoinherit */
+                                                 is_no_inherit);               /* connoinherit */
 
        pfree(ccbin);
        pfree(ccsrc);
@@ -1998,7 +1998,7 @@ StoreConstraints(Relation rel, List *cooked_constraints)
                                break;
                        case CONSTR_CHECK:
                                StoreRelCheck(rel, con->name, con->expr, !con->skip_validation,
-                                                         con->is_local, con->inhcount, con->is_no_inherit);
+                                                  con->is_local, con->inhcount, con->is_no_inherit);
                                numchecks++;
                                break;
                        default:
@@ -2345,8 +2345,8 @@ MergeWithExistingConstraint(Relation rel, char *ccname, Node *expr,
                        }
                        /* OK to update the tuple */
                        ereport(NOTICE,
-                                       (errmsg("merging constraint \"%s\" with inherited definition",
-                                                       ccname)));
+                          (errmsg("merging constraint \"%s\" with inherited definition",
+                                          ccname)));
                        simple_heap_update(conDesc, &tup->t_self, tup);
                        CatalogUpdateIndexes(conDesc, tup);
                        break;
index 998379c8af3e4dee11a8bd55367c0606f8259cc7..9e8b1cc49bc0cbd4d94e3cc4fa802ca59395a68b 100644 (file)
@@ -1155,7 +1155,7 @@ index_constraint_create(Relation heapRelation,
                                                                   NULL,
                                                                   NULL,
                                                                   true,                /* islocal */
-                                                                  0,                   /* inhcount */
+                                                                  0,   /* inhcount */
                                                                   false);              /* noinherit */
 
        /*
@@ -1324,8 +1324,8 @@ index_drop(Oid indexId, bool concurrent)
        CheckTableNotInUse(userIndexRelation, "DROP INDEX");
 
        /*
-        * Drop Index concurrently is similar in many ways to creating an
-        * index concurrently, so some actions are similar to DefineIndex()
+        * Drop Index concurrently is similar in many ways to creating an index
+        * concurrently, so some actions are similar to DefineIndex()
         */
        if (concurrent)
        {
@@ -1339,7 +1339,7 @@ index_drop(Oid indexId, bool concurrent)
                indexRelation = heap_open(IndexRelationId, RowExclusiveLock);
 
                tuple = SearchSysCacheCopy1(INDEXRELID,
-                                                                                ObjectIdGetDatum(indexId));
+                                                                       ObjectIdGetDatum(indexId));
                if (!HeapTupleIsValid(tuple))
                        elog(ERROR, "cache lookup failed for index %u", indexId);
                indexForm = (Form_pg_index) GETSTRUCT(tuple);
@@ -1373,15 +1373,15 @@ index_drop(Oid indexId, bool concurrent)
                 * will be marked not indisvalid, so that no one else tries to either
                 * insert into it or use it for queries.
                 *
-                * We must commit our current transaction so that the index update becomes
-                * visible; then start another.  Note that all the data structures we just
-                * built are lost in the commit.  The only data we keep past here are the
-                * relation IDs.
+                * We must commit our current transaction so that the index update
+                * becomes visible; then start another.  Note that all the data
+                * structures we just built are lost in the commit.  The only data we
+                * keep past here are the relation IDs.
                 *
                 * Before committing, get a session-level lock on the table, to ensure
                 * that neither it nor the index can be dropped before we finish. This
-                * cannot block, even if someone else is waiting for access, because we
-                * already have the same lock within our transaction.
+                * cannot block, even if someone else is waiting for access, because
+                * we already have the same lock within our transaction.
                 */
                LockRelationIdForSession(&heaprelid, ShareUpdateExclusiveLock);
                LockRelationIdForSession(&indexrelid, ShareUpdateExclusiveLock);
@@ -1391,23 +1391,23 @@ index_drop(Oid indexId, bool concurrent)
                StartTransactionCommand();
 
                /*
-                * Now we must wait until no running transaction could have the table open
-                * with the old list of indexes.  To do this, inquire which xacts
-                * currently would conflict with AccessExclusiveLock on the table -- ie,
-                * which ones have a lock of any kind on the table.     Then wait for each of
-                * these xacts to commit or abort.      Note we do not need to worry about
-                * xacts that open the table for writing after this point; they will see
-                * the index as invalid when they open the relation.
+                * Now we must wait until no running transaction could have the table
+                * open with the old list of indexes.  To do this, inquire which xacts
+                * currently would conflict with AccessExclusiveLock on the table --
+                * ie, which ones have a lock of any kind on the table. Then wait for
+                * each of these xacts to commit or abort.      Note we do not need to
+                * worry about xacts that open the table for writing after this point;
+                * they will see the index as invalid when they open the relation.
                 *
-                * Note: the reason we use actual lock acquisition here, rather than just
-                * checking the ProcArray and sleeping, is that deadlock is possible if
-                * one of the transactions in question is blocked trying to acquire an
-                * exclusive lock on our table.  The lock code will detect deadlock and
-                * error out properly.
+                * Note: the reason we use actual lock acquisition here, rather than
+                * just checking the ProcArray and sleeping, is that deadlock is
+                * possible if one of the transactions in question is blocked trying
+                * to acquire an exclusive lock on our table.  The lock code will
+                * detect deadlock and error out properly.
                 *
-                * Note: GetLockConflicts() never reports our own xid, hence we need not
-                * check for that.      Also, prepared xacts are not reported, which is fine
-                * since they certainly aren't going to do anything more.
+                * Note: GetLockConflicts() never reports our own xid, hence we need
+                * not check for that.  Also, prepared xacts are not reported, which
+                * is fine since they certainly aren't going to do anything more.
                 */
                old_lockholders = GetLockConflicts(&heaplocktag, AccessExclusiveLock);
 
@@ -1786,7 +1786,7 @@ index_update_stats(Relation rel,
 
                if (rd_rel->relkind != RELKIND_INDEX)
                        relallvisible = visibilitymap_count(rel);
-               else                                    /* don't bother for indexes */
+               else    /* don't bother for indexes */
                        relallvisible = 0;
 
                if (rd_rel->relpages != (int32) relpages)
index e92efd863ed74fb77425333d772c194c3d36851b..1b6bb3bb6d7e02c065b3950bf83a715341aa8110 100644 (file)
@@ -226,7 +226,7 @@ Datum               pg_is_other_temp_schema(PG_FUNCTION_ARGS);
 Oid
 RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
                                                 bool missing_ok, bool nowait,
-                                                RangeVarGetRelidCallback callback, void *callback_arg)
+                                          RangeVarGetRelidCallback callback, void *callback_arg)
 {
        uint64          inval_count;
        Oid                     relId;
@@ -247,20 +247,20 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
        }
 
        /*
-        * DDL operations can change the results of a name lookup.  Since all
-        * such operations will generate invalidation messages, we keep track
-        * of whether any such messages show up while we're performing the
-        * operation, and retry until either (1) no more invalidation messages
-        * show up or (2) the answer doesn't change.
+        * DDL operations can change the results of a name lookup.      Since all such
+        * operations will generate invalidation messages, we keep track of
+        * whether any such messages show up while we're performing the operation,
+        * and retry until either (1) no more invalidation messages show up or (2)
+        * the answer doesn't change.
         *
         * But if lockmode = NoLock, then we assume that either the caller is OK
         * with the answer changing under them, or that they already hold some
         * appropriate lock, and therefore return the first answer we get without
-        * checking for invalidation messages.  Also, if the requested lock is
+        * checking for invalidation messages.  Also, if the requested lock is
         * already held, no LockRelationOid will not AcceptInvalidationMessages,
         * so we may fail to notice a change.  We could protect against that case
-        * by calling AcceptInvalidationMessages() before beginning this loop,
-        * but that would add a significant amount overhead, so for now we don't.
+        * by calling AcceptInvalidationMessages() before beginning this loop, but
+        * that would add a significant amount overhead, so for now we don't.
         */
        for (;;)
        {
@@ -282,17 +282,18 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
                if (relation->relpersistence == RELPERSISTENCE_TEMP)
                {
                        if (!OidIsValid(myTempNamespace))
-                               relId = InvalidOid;     /* this probably can't happen? */
+                               relId = InvalidOid;             /* this probably can't happen? */
                        else
                        {
                                if (relation->schemaname)
                                {
-                                       Oid             namespaceId;
+                                       Oid                     namespaceId;
+
                                        namespaceId = LookupExplicitNamespace(relation->schemaname);
                                        if (namespaceId != myTempNamespace)
                                                ereport(ERROR,
                                                                (errcode(ERRCODE_INVALID_TABLE_DEFINITION),
-                                                          errmsg("temporary tables cannot specify a schema name")));
+                                                                errmsg("temporary tables cannot specify a schema name")));
                                }
 
                                relId = get_relname_relid(relation->relname, myTempNamespace);
@@ -315,12 +316,12 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
                /*
                 * Invoke caller-supplied callback, if any.
                 *
-                * This callback is a good place to check permissions: we haven't taken
-                * the table lock yet (and it's really best to check permissions before
-                * locking anything!), but we've gotten far enough to know what OID we
-                * think we should lock.  Of course, concurrent DDL might change things
-                * while we're waiting for the lock, but in that case the callback will
-                * be invoked again for the new OID.
+                * This callback is a good place to check permissions: we haven't
+                * taken the table lock yet (and it's really best to check permissions
+                * before locking anything!), but we've gotten far enough to know what
+                * OID we think we should lock.  Of course, concurrent DDL might
+                * change things while we're waiting for the lock, but in that case
+                * the callback will be invoked again for the new OID.
                 */
                if (callback)
                        callback(relation, relId, oldRelId, callback_arg);
@@ -328,21 +329,21 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
                /*
                 * If no lock requested, we assume the caller knows what they're
                 * doing.  They should have already acquired a heavyweight lock on
-                * this relation earlier in the processing of this same statement,
-                * so it wouldn't be appropriate to AcceptInvalidationMessages()
-                * here, as that might pull the rug out from under them.
+                * this relation earlier in the processing of this same statement, so
+                * it wouldn't be appropriate to AcceptInvalidationMessages() here, as
+                * that might pull the rug out from under them.
                 */
                if (lockmode == NoLock)
                        break;
 
                /*
-                * If, upon retry, we get back the same OID we did last time, then
-                * the invalidation messages we processed did not change the final
-                * answer.  So we're done.
+                * If, upon retry, we get back the same OID we did last time, then the
+                * invalidation messages we processed did not change the final answer.
+                * So we're done.
                 *
                 * If we got a different OID, we've locked the relation that used to
-                * have this name rather than the one that does now.  So release
-                * the lock.
+                * have this name rather than the one that does now.  So release the
+                * lock.
                 */
                if (retry)
                {
@@ -384,8 +385,8 @@ RangeVarGetRelidExtended(const RangeVar *relation, LOCKMODE lockmode,
                        break;
 
                /*
-                * Something may have changed.  Let's repeat the name lookup, to
-                * make sure this name still references the same relation it did
+                * Something may have changed.  Let's repeat the name lookup, to make
+                * sure this name still references the same relation it did
                 * previously.
                 */
                retry = true;
@@ -550,8 +551,8 @@ RangeVarGetAndCheckCreationNamespace(RangeVar *relation,
                        relid = InvalidOid;
 
                /*
-                * In bootstrap processing mode, we don't bother with permissions
-                * or locking.  Permissions might not be working yet, and locking is
+                * In bootstrap processing mode, we don't bother with permissions or
+                * locking.  Permissions might not be working yet, and locking is
                 * unnecessary.
                 */
                if (IsBootstrapProcessingMode())
index d133f64776ee59a9611d3cf094a4a3832314781d..5a06fcbf41d5184adb538768b8f24747c2b687e2 100644 (file)
  */
 typedef struct
 {
-       Oid                     class_oid;                      /* oid of catalog */
-       Oid                     oid_index_oid;          /* oid of index on system oid column */
-       int                     oid_catcache_id;        /* id of catcache on system oid column  */
-       AttrNumber      attnum_namespace;       /* attnum of namespace field */
+       Oid                     class_oid;              /* oid of catalog */
+       Oid                     oid_index_oid;  /* oid of index on system oid column */
+       int                     oid_catcache_id;        /* id of catcache on system oid column  */
+       AttrNumber      attnum_namespace;               /* attnum of namespace field */
 } ObjectPropertyType;
 
 static ObjectPropertyType ObjectProperty[] =
@@ -286,13 +286,13 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
        for (;;)
        {
                /*
-                * Remember this value, so that, after looking up the object name
-                * and locking it, we can check whether any invalidation messages
-                * have been processed that might require a do-over.
+                * Remember this value, so that, after looking up the object name and
+                * locking it, we can check whether any invalidation messages have
+                * been processed that might require a do-over.
                 */
                inval_count = SharedInvalidMessageCounter;
 
-               /* Look up object address. */   
+               /* Look up object address. */
                switch (objtype)
                {
                        case OBJECT_INDEX:
@@ -367,7 +367,7 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
                        case OBJECT_OPCLASS:
                        case OBJECT_OPFAMILY:
                                address = get_object_address_opcf(objtype,
-                                                                                                 objname, objargs, missing_ok);
+                                                                                          objname, objargs, missing_ok);
                                break;
                        case OBJECT_LARGEOBJECT:
                                Assert(list_length(objname) == 1);
@@ -377,10 +377,10 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
                                if (!LargeObjectExists(address.objectId))
                                {
                                        if (!missing_ok)
-                                       ereport(ERROR,
-                                                       (errcode(ERRCODE_UNDEFINED_OBJECT),
-                                                        errmsg("large object %u does not exist",
-                                                                       address.objectId)));
+                                               ereport(ERROR,
+                                                               (errcode(ERRCODE_UNDEFINED_OBJECT),
+                                                                errmsg("large object %u does not exist",
+                                                                               address.objectId)));
                                }
                                break;
                        case OBJECT_CAST:
@@ -475,8 +475,8 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
                 * At this point, we've resolved the name to an OID and locked the
                 * corresponding database object.  However, it's possible that by the
                 * time we acquire the lock on the object, concurrent DDL has modified
-                * the database in such a way that the name we originally looked up
-                * no longer resolves to that OID.
+                * the database in such a way that the name we originally looked up no
+                * longer resolves to that OID.
                 *
                 * We can be certain that this isn't an issue if (a) no shared
                 * invalidation messages have been processed or (b) we've locked a
@@ -488,12 +488,12 @@ get_object_address(ObjectType objtype, List *objname, List *objargs,
                 * the relation, which is enough to freeze out any concurrent DDL.
                 *
                 * In all other cases, however, it's possible that the name we looked
-                * up no longer refers to the object we locked, so we retry the
-                * lookup and see whether we get the same answer.
+                * up no longer refers to the object we locked, so we retry the lookup
+                * and see whether we get the same answer.
                 */
-        if (inval_count == SharedInvalidMessageCounter || relation != NULL)
-            break;
-        old_address = address;
+               if (inval_count == SharedInvalidMessageCounter || relation != NULL)
+                       break;
+               old_address = address;
        }
 
        /* Return the object address and the relation. */
@@ -621,7 +621,7 @@ get_relation_by_qualified_name(ObjectType objtype, List *objname,
                                                           bool missing_ok)
 {
        Relation        relation;
-       ObjectAddress   address;
+       ObjectAddress address;
 
        address.classId = RelationRelationId;
        address.objectId = InvalidOid;
@@ -721,8 +721,8 @@ get_object_address_relobject(ObjectType objtype, List *objname,
                address.objectSubId = 0;
 
                /*
-                * Caller is expecting to get back the relation, even though we
-                * didn't end up using it to find the rule.
+                * Caller is expecting to get back the relation, even though we didn't
+                * end up using it to find the rule.
                 */
                if (OidIsValid(address.objectId))
                        relation = heap_open(reloid, AccessShareLock);
@@ -768,7 +768,7 @@ get_object_address_relobject(ObjectType objtype, List *objname,
                if (!OidIsValid(address.objectId))
                {
                        heap_close(relation, AccessShareLock);
-                       relation = NULL;                /* department of accident prevention */
+                       relation = NULL;        /* department of accident prevention */
                        return address;
                }
        }
@@ -834,9 +834,10 @@ static ObjectAddress
 get_object_address_type(ObjectType objtype,
                                                List *objname, bool missing_ok)
 {
-       ObjectAddress   address;
+       ObjectAddress address;
        TypeName   *typename;
-       Type        tup;
+       Type            tup;
+
        typename = makeTypeNameFromNameList(objname);
 
        address.classId = TypeRelationId;
@@ -1083,7 +1084,7 @@ get_object_namespace(const ObjectAddress *address)
        HeapTuple       tuple;
        bool            isnull;
        Oid                     oid;
-       ObjectPropertyType         *property;
+       ObjectPropertyType *property;
 
        /* If not owned by a namespace, just return InvalidOid. */
        property = get_object_property_data(address->classId);
@@ -1122,5 +1123,5 @@ get_object_property_data(Oid class_id)
                        return &ObjectProperty[index];
 
        elog(ERROR, "unrecognized class id: %u", class_id);
-       return NULL;            /* not reached */
+       return NULL;                            /* not reached */
 }
index dca5d09ee69b832739287934b70355ecfbfd8db3..224859d76e76e6b577c4fca786033660698953ca 100644 (file)
@@ -831,8 +831,8 @@ get_domain_constraint_oid(Oid typid, const char *conname, bool missing_ok)
                        if (OidIsValid(conOid))
                                ereport(ERROR,
                                                (errcode(ERRCODE_DUPLICATE_OBJECT),
-                                errmsg("domain \"%s\" has multiple constraints named \"%s\"",
-                                               format_type_be(typid), conname)));
+                               errmsg("domain \"%s\" has multiple constraints named \"%s\"",
+                                          format_type_be(typid), conname)));
     &nbs