pgindent run for 9.4
authorBruce Momjian <bruce@momjian.us>
Tue, 6 May 2014 16:12:18 +0000 (12:12 -0400)
committerBruce Momjian <bruce@momjian.us>
Tue, 6 May 2014 16:12:18 +0000 (12:12 -0400)
This includes removing tabs after periods in C comments, which was
applied to back branches, so this change should not effect backpatching.

854 files changed:
config/test_quiet_include.h
contrib/auto_explain/auto_explain.c
contrib/btree_gist/btree_interval.c
contrib/cube/cube.c
contrib/cube/cubedata.h
contrib/dblink/dblink.c
contrib/earthdistance/earthdistance.c
contrib/file_fdw/file_fdw.c
contrib/fuzzystrmatch/levenshtein.c
contrib/hstore/hstore.h
contrib/hstore/hstore_gin.c
contrib/hstore/hstore_io.c
contrib/intarray/_int_bool.c
contrib/intarray/_int_gist.c
contrib/intarray/_int_tool.c
contrib/intarray/bench/bench.pl
contrib/ltree/ltree_op.c
contrib/oid2name/oid2name.c
contrib/pageinspect/rawpage.c
contrib/pg_archivecleanup/pg_archivecleanup.c
contrib/pg_prewarm/pg_prewarm.c
contrib/pg_stat_statements/pg_stat_statements.c
contrib/pg_test_fsync/pg_test_fsync.c
contrib/pg_trgm/trgm_gin.c
contrib/pg_trgm/trgm_gist.c
contrib/pg_trgm/trgm_regexp.c
contrib/pg_upgrade/check.c
contrib/pg_upgrade/controldata.c
contrib/pg_upgrade/dump.c
contrib/pg_upgrade/exec.c
contrib/pg_upgrade/file.c
contrib/pg_upgrade/function.c
contrib/pg_upgrade/info.c
contrib/pg_upgrade/option.c
contrib/pg_upgrade/page.c
contrib/pg_upgrade/parallel.c
contrib/pg_upgrade/pg_upgrade.c
contrib/pg_upgrade/pg_upgrade.h
contrib/pg_upgrade/relfilenode.c
contrib/pg_upgrade/server.c
contrib/pg_upgrade/tablespace.c
contrib/pg_upgrade/util.c
contrib/pg_upgrade/version_old_8_3.c
contrib/pg_xlogdump/compat.c
contrib/pg_xlogdump/pg_xlogdump.c
contrib/pgbench/pgbench.c
contrib/pgcrypto/crypt-des.c
contrib/pgcrypto/crypt-gensalt.c
contrib/pgcrypto/fortuna.c
contrib/pgcrypto/fortuna.h
contrib/pgcrypto/imath.c
contrib/pgcrypto/imath.h
contrib/pgcrypto/internal-sha2.c
contrib/pgcrypto/internal.c
contrib/pgcrypto/mbuf.c
contrib/pgcrypto/mbuf.h
contrib/pgcrypto/md5.c
contrib/pgcrypto/md5.h
contrib/pgcrypto/openssl.c
contrib/pgcrypto/pgcrypto.c
contrib/pgcrypto/pgcrypto.h
contrib/pgcrypto/pgp-armor.c
contrib/pgcrypto/pgp-cfb.c
contrib/pgcrypto/pgp-compress.c
contrib/pgcrypto/pgp-decrypt.c
contrib/pgcrypto/pgp-encrypt.c
contrib/pgcrypto/pgp-info.c
contrib/pgcrypto/pgp-mpi-internal.c
contrib/pgcrypto/pgp-mpi-openssl.c
contrib/pgcrypto/pgp-mpi.c
contrib/pgcrypto/pgp-pgsql.c
contrib/pgcrypto/pgp-pubdec.c
contrib/pgcrypto/pgp-pubenc.c
contrib/pgcrypto/pgp-pubkey.c
contrib/pgcrypto/pgp-s2k.c
contrib/pgcrypto/pgp.c
contrib/pgcrypto/pgp.h
contrib/pgcrypto/px-crypt.c
contrib/pgcrypto/px-crypt.h
contrib/pgcrypto/px-hmac.c
contrib/pgcrypto/px.c
contrib/pgcrypto/px.h
contrib/pgcrypto/random.c
contrib/pgcrypto/rijndael.c
contrib/pgcrypto/rijndael.h
contrib/pgcrypto/sha1.c
contrib/pgcrypto/sha1.h
contrib/pgcrypto/sha2.c
contrib/pgcrypto/sha2.h
contrib/pgstattuple/pgstatindex.c
contrib/pgstattuple/pgstattuple.c
contrib/postgres_fdw/connection.c
contrib/postgres_fdw/deparse.c
contrib/postgres_fdw/option.c
contrib/postgres_fdw/postgres_fdw.c
contrib/sepgsql/label.c
contrib/sepgsql/uavc.c
contrib/spi/timetravel.c
contrib/sslinfo/sslinfo.c
contrib/tcn/tcn.c
contrib/test_decoding/test_decoding.c
contrib/test_shm_mq/setup.c
contrib/test_shm_mq/test.c
contrib/test_shm_mq/test_shm_mq.h
contrib/test_shm_mq/worker.c
contrib/worker_spi/worker_spi.c
contrib/xml2/xpath.c
src/backend/access/common/heaptuple.c
src/backend/access/common/indextuple.c
src/backend/access/common/printtup.c
src/backend/access/common/reloptions.c
src/backend/access/common/tupconvert.c
src/backend/access/common/tupdesc.c
src/backend/access/gin/ginarrayproc.c
src/backend/access/gin/ginbtree.c
src/backend/access/gin/ginbulk.c
src/backend/access/gin/gindatapage.c
src/backend/access/gin/ginentrypage.c
src/backend/access/gin/ginfast.c
src/backend/access/gin/ginget.c
src/backend/access/gin/gininsert.c
src/backend/access/gin/ginlogic.c
src/backend/access/gin/ginpostinglist.c
src/backend/access/gin/ginscan.c
src/backend/access/gin/ginutil.c
src/backend/access/gin/ginvacuum.c
src/backend/access/gin/ginxlog.c
src/backend/access/gist/gist.c
src/backend/access/gist/gistget.c
src/backend/access/gist/gistscan.c
src/backend/access/gist/gistsplit.c
src/backend/access/gist/gistutil.c
src/backend/access/gist/gistvacuum.c
src/backend/access/gist/gistxlog.c
src/backend/access/hash/hash.c
src/backend/access/hash/hashfunc.c
src/backend/access/hash/hashinsert.c
src/backend/access/hash/hashovfl.c
src/backend/access/hash/hashpage.c
src/backend/access/hash/hashsearch.c
src/backend/access/hash/hashsort.c
src/backend/access/hash/hashutil.c
src/backend/access/heap/heapam.c
src/backend/access/heap/hio.c
src/backend/access/heap/pruneheap.c
src/backend/access/heap/rewriteheap.c
src/backend/access/heap/syncscan.c
src/backend/access/heap/tuptoaster.c
src/backend/access/heap/visibilitymap.c
src/backend/access/index/genam.c
src/backend/access/index/indexam.c
src/backend/access/nbtree/nbtcompare.c
src/backend/access/nbtree/nbtinsert.c
src/backend/access/nbtree/nbtpage.c
src/backend/access/nbtree/nbtree.c
src/backend/access/nbtree/nbtsearch.c
src/backend/access/nbtree/nbtsort.c
src/backend/access/nbtree/nbtutils.c
src/backend/access/nbtree/nbtxlog.c
src/backend/access/rmgrdesc/gindesc.c
src/backend/access/rmgrdesc/nbtdesc.c
src/backend/access/spgist/spgdoinsert.c
src/backend/access/spgist/spginsert.c
src/backend/access/spgist/spgscan.c
src/backend/access/spgist/spgtextproc.c
src/backend/access/spgist/spgutils.c
src/backend/access/spgist/spgvacuum.c
src/backend/access/spgist/spgxlog.c
src/backend/access/transam/clog.c
src/backend/access/transam/multixact.c
src/backend/access/transam/slru.c
src/backend/access/transam/subtrans.c
src/backend/access/transam/timeline.c
src/backend/access/transam/transam.c
src/backend/access/transam/twophase.c
src/backend/access/transam/varsup.c
src/backend/access/transam/xact.c
src/backend/access/transam/xlog.c
src/backend/access/transam/xlogarchive.c
src/backend/access/transam/xlogfuncs.c
src/backend/access/transam/xlogreader.c
src/backend/bootstrap/bootstrap.c
src/backend/catalog/aclchk.c
src/backend/catalog/catalog.c
src/backend/catalog/dependency.c
src/backend/catalog/heap.c
src/backend/catalog/index.c
src/backend/catalog/indexing.c
src/backend/catalog/namespace.c
src/backend/catalog/objectaddress.c
src/backend/catalog/pg_aggregate.c
src/backend/catalog/pg_collation.c
src/backend/catalog/pg_constraint.c
src/backend/catalog/pg_db_role_setting.c
src/backend/catalog/pg_depend.c
src/backend/catalog/pg_enum.c
src/backend/catalog/pg_largeobject.c
src/backend/catalog/pg_operator.c
src/backend/catalog/pg_proc.c
src/backend/catalog/pg_shdepend.c
src/backend/catalog/pg_type.c
src/backend/catalog/storage.c
src/backend/catalog/toasting.c
src/backend/commands/aggregatecmds.c
src/backend/commands/alter.c
src/backend/commands/analyze.c
src/backend/commands/async.c
src/backend/commands/cluster.c
src/backend/commands/constraint.c
src/backend/commands/copy.c
src/backend/commands/createas.c
src/backend/commands/dbcommands.c
src/backend/commands/define.c
src/backend/commands/event_trigger.c
src/backend/commands/explain.c
src/backend/commands/extension.c
src/backend/commands/foreigncmds.c
src/backend/commands/functioncmds.c
src/backend/commands/indexcmds.c
src/backend/commands/matview.c
src/backend/commands/opclasscmds.c
src/backend/commands/operatorcmds.c
src/backend/commands/portalcmds.c
src/backend/commands/prepare.c
src/backend/commands/proclang.c
src/backend/commands/schemacmds.c
src/backend/commands/sequence.c
src/backend/commands/tablecmds.c
src/backend/commands/tablespace.c
src/backend/commands/trigger.c
src/backend/commands/typecmds.c
src/backend/commands/user.c
src/backend/commands/vacuum.c
src/backend/commands/vacuumlazy.c
src/backend/commands/variable.c
src/backend/commands/view.c
src/backend/executor/execAmi.c
src/backend/executor/execCurrent.c
src/backend/executor/execJunk.c
src/backend/executor/execMain.c
src/backend/executor/execProcnode.c
src/backend/executor/execQual.c
src/backend/executor/execScan.c
src/backend/executor/execTuples.c
src/backend/executor/execUtils.c
src/backend/executor/functions.c
src/backend/executor/nodeAgg.c
src/backend/executor/nodeAppend.c
src/backend/executor/nodeBitmapHeapscan.c
src/backend/executor/nodeForeignscan.c
src/backend/executor/nodeFunctionscan.c
src/backend/executor/nodeHash.c
src/backend/executor/nodeHashjoin.c
src/backend/executor/nodeIndexonlyscan.c
src/backend/executor/nodeIndexscan.c
src/backend/executor/nodeLimit.c
src/backend/executor/nodeLockRows.c
src/backend/executor/nodeMaterial.c
src/backend/executor/nodeMergeAppend.c
src/backend/executor/nodeMergejoin.c
src/backend/executor/nodeModifyTable.c
src/backend/executor/nodeRecursiveunion.c
src/backend/executor/nodeSetOp.c
src/backend/executor/nodeSubplan.c
src/backend/executor/nodeSubqueryscan.c
src/backend/executor/nodeUnique.c
src/backend/executor/nodeValuesscan.c
src/backend/executor/nodeWindowAgg.c
src/backend/executor/nodeWorktablescan.c
src/backend/executor/spi.c
src/backend/executor/tstoreReceiver.c
src/backend/lib/stringinfo.c
src/backend/libpq/auth.c
src/backend/libpq/be-fsstubs.c
src/backend/libpq/be-secure.c
src/backend/libpq/hba.c
src/backend/libpq/md5.c
src/backend/libpq/pqcomm.c
src/backend/libpq/pqformat.c
src/backend/main/main.c
src/backend/nodes/bitmapset.c
src/backend/nodes/copyfuncs.c
src/backend/nodes/equalfuncs.c
src/backend/nodes/list.c
src/backend/nodes/makefuncs.c
src/backend/nodes/nodeFuncs.c
src/backend/nodes/outfuncs.c
src/backend/nodes/params.c
src/backend/nodes/read.c
src/backend/nodes/readfuncs.c
src/backend/nodes/tidbitmap.c
src/backend/optimizer/geqo/geqo_eval.c
src/backend/optimizer/path/allpaths.c
src/backend/optimizer/path/clausesel.c
src/backend/optimizer/path/costsize.c
src/backend/optimizer/path/equivclass.c
src/backend/optimizer/path/indxpath.c
src/backend/optimizer/path/joinpath.c
src/backend/optimizer/path/joinrels.c
src/backend/optimizer/path/pathkeys.c
src/backend/optimizer/path/tidpath.c
src/backend/optimizer/plan/analyzejoins.c
src/backend/optimizer/plan/createplan.c
src/backend/optimizer/plan/initsplan.c
src/backend/optimizer/plan/planagg.c
src/backend/optimizer/plan/planmain.c
src/backend/optimizer/plan/planner.c
src/backend/optimizer/plan/setrefs.c
src/backend/optimizer/plan/subselect.c
src/backend/optimizer/prep/prepjointree.c
src/backend/optimizer/prep/prepqual.c
src/backend/optimizer/prep/prepsecurity.c
src/backend/optimizer/prep/preptlist.c
src/backend/optimizer/prep/prepunion.c
src/backend/optimizer/util/clauses.c
src/backend/optimizer/util/joininfo.c
src/backend/optimizer/util/orclauses.c
src/backend/optimizer/util/pathnode.c
src/backend/optimizer/util/placeholder.c
src/backend/optimizer/util/plancat.c
src/backend/optimizer/util/predtest.c
src/backend/optimizer/util/relnode.c
src/backend/optimizer/util/restrictinfo.c
src/backend/optimizer/util/tlist.c
src/backend/optimizer/util/var.c
src/backend/parser/analyze.c
src/backend/parser/kwlookup.c
src/backend/parser/parse_agg.c
src/backend/parser/parse_clause.c
src/backend/parser/parse_coerce.c
src/backend/parser/parse_collate.c
src/backend/parser/parse_cte.c
src/backend/parser/parse_expr.c
src/backend/parser/parse_func.c
src/backend/parser/parse_node.c
src/backend/parser/parse_oper.c
src/backend/parser/parse_param.c
src/backend/parser/parse_relation.c
src/backend/parser/parse_target.c
src/backend/parser/parse_type.c
src/backend/parser/parse_utilcmd.c
src/backend/parser/parser.c
src/backend/parser/scansup.c
src/backend/port/darwin/system.c
src/backend/port/dynloader/darwin.c
src/backend/port/dynloader/freebsd.c
src/backend/port/dynloader/netbsd.c
src/backend/port/dynloader/openbsd.c
src/backend/port/posix_sema.c
src/backend/port/sysv_sema.c
src/backend/port/sysv_shmem.c
src/backend/port/unix_latch.c
src/backend/port/win32/socket.c
src/backend/port/win32_latch.c
src/backend/port/win32_shmem.c
src/backend/postmaster/autovacuum.c
src/backend/postmaster/bgworker.c
src/backend/postmaster/bgwriter.c
src/backend/postmaster/checkpointer.c
src/backend/postmaster/fork_process.c
src/backend/postmaster/pgarch.c
src/backend/postmaster/pgstat.c
src/backend/postmaster/postmaster.c
src/backend/postmaster/startup.c
src/backend/postmaster/syslogger.c
src/backend/postmaster/walwriter.c
src/backend/regex/regc_color.c
src/backend/regex/regc_cvec.c
src/backend/regex/regc_lex.c
src/backend/regex/regc_locale.c
src/backend/regex/regc_nfa.c
src/backend/regex/regc_pg_locale.c
src/backend/regex/regcomp.c
src/backend/regex/rege_dfa.c
src/backend/regex/regerror.c
src/backend/regex/regexec.c
src/backend/regex/regfree.c
src/backend/regex/regprefix.c
src/backend/replication/basebackup.c
src/backend/replication/libpqwalreceiver/libpqwalreceiver.c
src/backend/replication/logical/decode.c
src/backend/replication/logical/logical.c
src/backend/replication/logical/logicalfuncs.c
src/backend/replication/logical/reorderbuffer.c
src/backend/replication/logical/snapbuild.c
src/backend/replication/slot.c
src/backend/replication/slotfuncs.c
src/backend/replication/syncrep.c
src/backend/replication/walreceiver.c
src/backend/replication/walreceiverfuncs.c
src/backend/replication/walsender.c
src/backend/rewrite/rewriteDefine.c
src/backend/rewrite/rewriteHandler.c
src/backend/rewrite/rewriteManip.c
src/backend/rewrite/rewriteSupport.c
src/backend/storage/buffer/buf_init.c
src/backend/storage/buffer/buf_table.c
src/backend/storage/buffer/bufmgr.c
src/backend/storage/buffer/freelist.c
src/backend/storage/buffer/localbuf.c
src/backend/storage/file/buffile.c
src/backend/storage/file/fd.c
src/backend/storage/freespace/freespace.c
src/backend/storage/freespace/fsmpage.c
src/backend/storage/ipc/dsm.c
src/backend/storage/ipc/dsm_impl.c
src/backend/storage/ipc/ipc.c
src/backend/storage/ipc/ipci.c
src/backend/storage/ipc/pmsignal.c
src/backend/storage/ipc/procarray.c
src/backend/storage/ipc/procsignal.c
src/backend/storage/ipc/shm_mq.c
src/backend/storage/ipc/shm_toc.c
src/backend/storage/ipc/shmem.c
src/backend/storage/ipc/shmqueue.c
src/backend/storage/ipc/sinval.c
src/backend/storage/ipc/sinvaladt.c
src/backend/storage/ipc/standby.c
src/backend/storage/large_object/inv_api.c
src/backend/storage/lmgr/deadlock.c
src/backend/storage/lmgr/lmgr.c
src/backend/storage/lmgr/lock.c
src/backend/storage/lmgr/lwlock.c
src/backend/storage/lmgr/predicate.c
src/backend/storage/lmgr/proc.c
src/backend/storage/lmgr/s_lock.c
src/backend/storage/lmgr/spin.c
src/backend/storage/page/bufpage.c
src/backend/storage/smgr/md.c
src/backend/storage/smgr/smgr.c
src/backend/tcop/fastpath.c
src/backend/tcop/postgres.c
src/backend/tcop/pquery.c
src/backend/tcop/utility.c
src/backend/tsearch/ts_locale.c
src/backend/tsearch/ts_selfuncs.c
src/backend/tsearch/ts_typanalyze.c
src/backend/tsearch/ts_utils.c
src/backend/tsearch/wparser_def.c
src/backend/utils/adt/acl.c
src/backend/utils/adt/array_selfuncs.c
src/backend/utils/adt/array_typanalyze.c
src/backend/utils/adt/array_userfuncs.c
src/backend/utils/adt/arrayfuncs.c
src/backend/utils/adt/arrayutils.c
src/backend/utils/adt/cash.c
src/backend/utils/adt/char.c
src/backend/utils/adt/date.c
src/backend/utils/adt/datetime.c
src/backend/utils/adt/datum.c
src/backend/utils/adt/dbsize.c
src/backend/utils/adt/domains.c
src/backend/utils/adt/float.c
src/backend/utils/adt/format_type.c
src/backend/utils/adt/formatting.c
src/backend/utils/adt/geo_ops.c
src/backend/utils/adt/geo_selfuncs.c
src/backend/utils/adt/inet_cidr_ntop.c
src/backend/utils/adt/int.c
src/backend/utils/adt/int8.c
src/backend/utils/adt/json.c
src/backend/utils/adt/jsonb.c
src/backend/utils/adt/jsonb_gin.c
src/backend/utils/adt/jsonb_op.c
src/backend/utils/adt/jsonb_util.c
src/backend/utils/adt/jsonfuncs.c
src/backend/utils/adt/like.c
src/backend/utils/adt/misc.c
src/backend/utils/adt/nabstime.c
src/backend/utils/adt/network.c
src/backend/utils/adt/network_gist.c
src/backend/utils/adt/numeric.c
src/backend/utils/adt/oid.c
src/backend/utils/adt/orderedsetaggs.c
src/backend/utils/adt/pg_locale.c
src/backend/utils/adt/pg_lsn.c
src/backend/utils/adt/pg_lzcompress.c
src/backend/utils/adt/pgstatfuncs.c
src/backend/utils/adt/pseudotypes.c
src/backend/utils/adt/rangetypes.c
src/backend/utils/adt/rangetypes_gist.c
src/backend/utils/adt/regexp.c
src/backend/utils/adt/regproc.c
src/backend/utils/adt/ri_triggers.c
src/backend/utils/adt/rowtypes.c
src/backend/utils/adt/ruleutils.c
src/backend/utils/adt/selfuncs.c
src/backend/utils/adt/timestamp.c
src/backend/utils/adt/tsginidx.c
src/backend/utils/adt/varchar.c
src/backend/utils/adt/varlena.c
src/backend/utils/adt/xml.c
src/backend/utils/cache/attoptcache.c
src/backend/utils/cache/catcache.c
src/backend/utils/cache/inval.c
src/backend/utils/cache/lsyscache.c
src/backend/utils/cache/plancache.c
src/backend/utils/cache/relcache.c
src/backend/utils/cache/relfilenodemap.c
src/backend/utils/cache/relmapper.c
src/backend/utils/cache/spccache.c
src/backend/utils/cache/syscache.c
src/backend/utils/cache/typcache.c
src/backend/utils/error/elog.c
src/backend/utils/fmgr/dfmgr.c
src/backend/utils/fmgr/fmgr.c
src/backend/utils/fmgr/funcapi.c
src/backend/utils/hash/dynahash.c
src/backend/utils/init/miscinit.c
src/backend/utils/init/postinit.c
src/backend/utils/mb/conversion_procs/euc_tw_and_big5/big5.c
src/backend/utils/mb/mbutils.c
src/backend/utils/mb/wchar.c
src/backend/utils/mb/wstrcmp.c
src/backend/utils/mb/wstrncmp.c
src/backend/utils/misc/guc.c
src/backend/utils/misc/ps_status.c
src/backend/utils/misc/rbtree.c
src/backend/utils/misc/timeout.c
src/backend/utils/misc/tzparser.c
src/backend/utils/mmgr/aset.c
src/backend/utils/mmgr/mcxt.c
src/backend/utils/mmgr/portalmem.c
src/backend/utils/resowner/resowner.c
src/backend/utils/sort/logtape.c
src/backend/utils/sort/tuplesort.c
src/backend/utils/sort/tuplestore.c
src/backend/utils/time/combocid.c
src/backend/utils/time/snapmgr.c
src/backend/utils/time/tqual.c
src/bin/initdb/findtimezone.c
src/bin/initdb/initdb.c
src/bin/initdb/t/001_initdb.pl
src/bin/pg_basebackup/pg_basebackup.c
src/bin/pg_basebackup/pg_receivexlog.c
src/bin/pg_basebackup/pg_recvlogical.c
src/bin/pg_basebackup/receivelog.c
src/bin/pg_basebackup/streamutil.c
src/bin/pg_basebackup/streamutil.h
src/bin/pg_basebackup/t/010_pg_basebackup.pl
src/bin/pg_config/t/001_pg_config.pl
src/bin/pg_controldata/pg_controldata.c
src/bin/pg_controldata/t/001_pg_controldata.pl
src/bin/pg_ctl/pg_ctl.c
src/bin/pg_ctl/t/001_start_stop.pl
src/bin/pg_ctl/t/002_status.pl
src/bin/pg_dump/common.c
src/bin/pg_dump/compress_io.c
src/bin/pg_dump/dumputils.c
src/bin/pg_dump/parallel.c
src/bin/pg_dump/pg_backup.h
src/bin/pg_dump/pg_backup_archiver.c
src/bin/pg_dump/pg_backup_archiver.h
src/bin/pg_dump/pg_backup_custom.c
src/bin/pg_dump/pg_backup_db.c
src/bin/pg_dump/pg_backup_directory.c
src/bin/pg_dump/pg_backup_tar.c
src/bin/pg_dump/pg_dump.c
src/bin/pg_dump/pg_dump.h
src/bin/pg_dump/pg_dump_sort.c
src/bin/pg_dump/pg_dumpall.c
src/bin/pg_resetxlog/pg_resetxlog.c
src/bin/psql/command.c
src/bin/psql/common.c
src/bin/psql/copy.c
src/bin/psql/describe.c
src/bin/psql/input.c
src/bin/psql/large_obj.c
src/bin/psql/mainloop.c
src/bin/psql/mbprint.c
src/bin/psql/print.c
src/bin/psql/settings.h
src/bin/psql/stringutils.c
src/bin/psql/tab-complete.c
src/bin/psql/variables.c
src/bin/scripts/common.c
src/bin/scripts/createuser.c
src/bin/scripts/pg_isready.c
src/bin/scripts/t/010_clusterdb.pl
src/bin/scripts/t/011_clusterdb_all.pl
src/bin/scripts/t/020_createdb.pl
src/bin/scripts/t/030_createlang.pl
src/bin/scripts/t/040_createuser.pl
src/bin/scripts/t/050_dropdb.pl
src/bin/scripts/t/060_droplang.pl
src/bin/scripts/t/070_dropuser.pl
src/bin/scripts/t/090_reindexdb.pl
src/bin/scripts/t/091_reindexdb_all.pl
src/bin/scripts/t/100_vacuumdb.pl
src/bin/scripts/t/101_vacuumdb_all.pl
src/bin/scripts/t/102_vacuumdb_stages.pl
src/bin/scripts/vacuumdb.c
src/common/psprintf.c
src/common/relpath.c
src/include/access/attnum.h
src/include/access/genam.h
src/include/access/gin.h
src/include/access/gin_private.h
src/include/access/gist.h
src/include/access/hash.h
src/include/access/heapam.h
src/include/access/heapam_xlog.h
src/include/access/htup.h
src/include/access/htup_details.h
src/include/access/itup.h
src/include/access/nbtree.h
src/include/access/reloptions.h
src/include/access/rewriteheap.h
src/include/access/rmgr.h
src/include/access/rmgrlist.h
src/include/access/skey.h
src/include/access/slru.h
src/include/access/spgist_private.h
src/include/access/transam.h
src/include/access/tupdesc.h
src/include/access/tupmacs.h
src/include/access/tuptoaster.h
src/include/access/xlog.h
src/include/access/xlog_internal.h
src/include/access/xlogdefs.h
src/include/c.h
src/include/catalog/catversion.h
src/include/catalog/dependency.h
src/include/catalog/duplicate_oids
src/include/catalog/genbki.h
src/include/catalog/namespace.h
src/include/catalog/objectaccess.h
src/include/catalog/pg_aggregate.h
src/include/catalog/pg_attrdef.h
src/include/catalog/pg_attribute.h
src/include/catalog/pg_authid.h
src/include/catalog/pg_constraint.h
src/include/catalog/pg_control.h
src/include/catalog/pg_db_role_setting.h
src/include/catalog/pg_default_acl.h
src/include/catalog/pg_description.h
src/include/catalog/pg_event_trigger.h
src/include/catalog/pg_index.h
src/include/catalog/pg_largeobject.h
src/include/catalog/pg_opclass.h
src/include/catalog/pg_proc.h
src/include/catalog/pg_rewrite.h
src/include/catalog/pg_shdepend.h
src/include/catalog/pg_shdescription.h
src/include/catalog/pg_statistic.h
src/include/catalog/pg_trigger.h
src/include/catalog/pg_ts_dict.h
src/include/catalog/pg_ts_template.h
src/include/catalog/pg_type.h
src/include/catalog/toasting.h
src/include/commands/comment.h
src/include/commands/tablecmds.h
src/include/commands/vacuum.h
src/include/common/fe_memutils.h
src/include/common/relpath.h
src/include/datatype/timestamp.h
src/include/executor/executor.h
src/include/executor/hashjoin.h
src/include/executor/spi_priv.h
src/include/executor/tuptable.h
src/include/fmgr.h
src/include/funcapi.h
src/include/lib/ilist.h
src/include/lib/stringinfo.h
src/include/libpq/libpq-be.h
src/include/libpq/pqcomm.h
src/include/mb/pg_wchar.h
src/include/miscadmin.h
src/include/nodes/execnodes.h
src/include/nodes/nodes.h
src/include/nodes/params.h
src/include/nodes/parsenodes.h
src/include/nodes/plannodes.h
src/include/nodes/primnodes.h
src/include/nodes/relation.h
src/include/nodes/replnodes.h
src/include/nodes/tidbitmap.h
src/include/nodes/value.h
src/include/parser/gramparse.h
src/include/parser/parse_node.h
src/include/parser/scanner.h
src/include/pg_config_manual.h
src/include/pgstat.h
src/include/port.h
src/include/port/linux.h
src/include/port/win32.h
src/include/portability/instr_time.h
src/include/postgres.h
src/include/postgres_ext.h
src/include/postmaster/bgworker.h
src/include/postmaster/bgworker_internals.h
src/include/postmaster/syslogger.h
src/include/regex/regcustom.h
src/include/regex/regex.h
src/include/regex/regexport.h
src/include/regex/regguts.h
src/include/replication/basebackup.h
src/include/replication/decode.h
src/include/replication/logical.h
src/include/replication/output_plugin.h
src/include/replication/reorderbuffer.h
src/include/replication/slot.h
src/include/replication/snapbuild.h
src/include/replication/walreceiver.h
src/include/rewrite/rewriteHandler.h
src/include/snowball/header.h
src/include/storage/barrier.h
src/include/storage/block.h
src/include/storage/buf_internals.h
src/include/storage/bufpage.h
src/include/storage/dsm.h
src/include/storage/dsm_impl.h
src/include/storage/ipc.h
src/include/storage/itemid.h
src/include/storage/itemptr.h
src/include/storage/large_object.h
src/include/storage/lock.h
src/include/storage/lwlock.h
src/include/storage/pg_sema.h
src/include/storage/pg_shmem.h
src/include/storage/pos.h
src/include/storage/predicate_internals.h
src/include/storage/proc.h
src/include/storage/procarray.h
src/include/storage/relfilenode.h
src/include/storage/shm_mq.h
src/include/storage/shm_toc.h
src/include/storage/sinval.h
src/include/storage/sinvaladt.h
src/include/storage/smgr.h
src/include/storage/spin.h
src/include/tcop/dest.h
src/include/tcop/tcopdebug.h
src/include/utils/acl.h
src/include/utils/builtins.h
src/include/utils/catcache.h
src/include/utils/datetime.h
src/include/utils/elog.h
src/include/utils/guc.h
src/include/utils/hsearch.h
src/include/utils/inet.h
src/include/utils/jsonapi.h
src/include/utils/jsonb.h
src/include/utils/memutils.h
src/include/utils/palloc.h
src/include/utils/pg_crc.h
src/include/utils/plancache.h
src/include/utils/portal.h
src/include/utils/rel.h
src/include/utils/relcache.h
src/include/utils/relfilenodemap.h
src/include/utils/resowner.h
src/include/utils/resowner_private.h
src/include/utils/selfuncs.h
src/include/utils/snapshot.h
src/include/utils/sortsupport.h
src/include/utils/tqual.h
src/include/utils/tuplesort.h
src/include/utils/tuplestore.h
src/include/utils/typcache.h
src/interfaces/ecpg/ecpglib/data.c
src/interfaces/ecpg/ecpglib/execute.c
src/interfaces/ecpg/ecpglib/extern.h
src/interfaces/ecpg/include/sqlca.h
src/interfaces/ecpg/pgtypeslib/dt.h
src/interfaces/ecpg/pgtypeslib/dt_common.c
src/interfaces/ecpg/pgtypeslib/interval.c
src/interfaces/ecpg/pgtypeslib/numeric.c
src/interfaces/ecpg/preproc/c_keywords.c
src/interfaces/ecpg/preproc/extern.h
src/interfaces/ecpg/preproc/output.c
src/interfaces/ecpg/preproc/parse.pl
src/interfaces/ecpg/preproc/parser.c
src/interfaces/ecpg/preproc/type.c
src/interfaces/ecpg/preproc/variable.c
src/interfaces/ecpg/test/expected/compat_informix-test_informix2.c
src/interfaces/ecpg/test/expected/preproc-init.c
src/interfaces/ecpg/test/expected/sql-array.c
src/interfaces/ecpg/test/expected/sql-code100.c
src/interfaces/ecpg/test/expected/sql-copystdout.c
src/interfaces/ecpg/test/expected/sql-define.c
src/interfaces/ecpg/test/expected/sql-dynalloc.c
src/interfaces/ecpg/test/expected/sql-dynalloc2.c
src/interfaces/ecpg/test/expected/sql-dyntest.c
src/interfaces/ecpg/test/expected/sql-indicators.c
src/interfaces/ecpg/test/expected/thread-alloc.c
src/interfaces/ecpg/test/expected/thread-descriptor.c
src/interfaces/ecpg/test/expected/thread-prep.c
src/interfaces/ecpg/test/pg_regress_ecpg.c
src/interfaces/libpq/fe-auth.c
src/interfaces/libpq/fe-connect.c
src/interfaces/libpq/fe-exec.c
src/interfaces/libpq/fe-lobj.c
src/interfaces/libpq/fe-misc.c
src/interfaces/libpq/fe-protocol2.c
src/interfaces/libpq/fe-protocol3.c
src/interfaces/libpq/fe-secure.c
src/interfaces/libpq/libpq-fe.h
src/interfaces/libpq/libpq-int.h
src/interfaces/libpq/pqexpbuffer.c
src/interfaces/libpq/pqexpbuffer.h
src/interfaces/libpq/test/uri-regress.c
src/pl/plperl/plperl.c
src/pl/plpgsql/src/pl_comp.c
src/pl/plpgsql/src/pl_exec.c
src/pl/plpgsql/src/pl_funcs.c
src/pl/plpgsql/src/pl_handler.c
src/pl/plpgsql/src/pl_scanner.c
src/pl/plpgsql/src/plpgsql.h
src/pl/plpython/plpy_elog.c
src/pl/plpython/plpy_exec.c
src/pl/plpython/plpy_typeio.c
src/pl/plpython/plpy_util.c
src/pl/tcl/pltcl.c
src/port/chklocale.c
src/port/crypt.c
src/port/dirent.c
src/port/erand48.c
src/port/fls.c
src/port/getaddrinfo.c
src/port/getopt.c
src/port/getopt_long.c
src/port/inet_aton.c
src/port/kill.c
src/port/path.c
src/port/pgmkdirp.c
src/port/pqsignal.c
src/port/qsort.c
src/port/qsort_arg.c
src/port/snprintf.c
src/port/sprompt.c
src/port/strlcat.c
src/port/strlcpy.c
src/port/thread.c
src/port/unsetenv.c
src/test/isolation/isolation_main.c
src/test/isolation/isolationtester.c
src/test/isolation/isolationtester.h
src/test/perl/TestLib.pm
src/test/regress/pg_regress.c
src/test/regress/pg_regress.h
src/test/regress/pg_regress_main.c
src/test/regress/regress.c
src/timezone/localtime.c
src/timezone/pgtz.c
src/timezone/zic.c
src/tools/entab/entab.c
src/tools/git_changelog
src/tools/msvc/MSBuildProject.pm
src/tools/msvc/Mkvcbuild.pm
src/tools/msvc/Solution.pm
src/tools/msvc/gendef.pl
src/tools/pgindent/pgindent
src/tutorial/complex.c

index eb0515ede7861f5f4cb7bf27595c547877c53e11..f4fa4d30dd774aa90c6715c032e4f70500136ea4 100644 (file)
@@ -2,4 +2,8 @@
  * For the raison d'etre of this file, check the comment above the definition
  * of the PGAC_C_INLINE macro in config/c-compiler.m4.
  */
-static inline int fun () { return 0; }
+static inline int
+fun()
+{
+       return 0;
+}
index c8ca7c4a3403059a6d8c8cbd9b96059b14f26174..cbbd25753f72deb33123b47508d732e57f1cb247 100644 (file)
@@ -116,7 +116,7 @@ _PG_init(void)
 
        DefineCustomBoolVariable("auto_explain.log_triggers",
                                                         "Include trigger statistics in plans.",
-                                                        "This has no effect unless log_analyze is also set.",
+                                               "This has no effect unless log_analyze is also set.",
                                                         &auto_explain_log_triggers,
                                                         false,
                                                         PGC_SUSET,
index e406d7120c4937a2582e84e916af4dc464b90777..93a341eb77e4abf9ac287a275379e532de39d809 100644 (file)
@@ -86,7 +86,7 @@ gbt_intv_dist(const void *a, const void *b)
 
 /*
  * INTERVALSIZE should be the actual size-on-disk of an Interval, as shown
- * in pg_type. This might be less than sizeof(Interval) if the compiler
+ * in pg_type.  This might be less than sizeof(Interval) if the compiler
  * insists on adding alignment padding at the end of the struct.
  */
 #define INTERVALSIZE 16
index 906cc9efc7f7684253f5f50bafc83fa9f74f89dc..b0305ef431dca7a23272f6a98a6096ff3feae504 100644 (file)
@@ -104,7 +104,7 @@ bool                g_cube_internal_consistent(NDBOX *key, NDBOX *query, StrategyNumber strate
 ** Auxiliary funxtions
 */
 static double distance_1D(double a1, double a2, double b1, double b2);
-static bool    cube_is_point_internal(NDBOX *cube);
+static bool cube_is_point_internal(NDBOX *cube);
 
 
 /*****************************************************************************
@@ -538,7 +538,7 @@ g_cube_picksplit(PG_FUNCTION_ARGS)
        rt_cube_size(datum_r, &size_r);
 
        /*
-        * Now split up the regions between the two seeds.      An important property
+        * Now split up the regions between the two seeds.  An important property
         * of this split algorithm is that the split vector v has the indices of
         * items to be split in order in its left and right vectors.  We exploit
         * this property by doing a merge in the code that actually splits the
@@ -554,7 +554,7 @@ g_cube_picksplit(PG_FUNCTION_ARGS)
        {
                /*
                 * If we've already decided where to place this item, just put it on
-                * the right list.      Otherwise, we need to figure out which page needs
+                * the right list.  Otherwise, we need to figure out which page needs
                 * the least enlargement in order to store the item.
                 */
 
@@ -728,27 +728,27 @@ cube_union_v0(NDBOX *a, NDBOX *b)
        SET_VARSIZE(result, size);
        SET_DIM(result, dim);
 
-        /* First compute the union of the dimensions present in both args */
+       /* First compute the union of the dimensions present in both args */
        for (i = 0; i < DIM(b); i++)
        {
                result->x[i] = Min(
-                       Min(LL_COORD(a, i), UR_COORD(a, i)),
-                       Min(LL_COORD(b, i), UR_COORD(b, i))
-               );
+                                                  Min(LL_COORD(a, i), UR_COORD(a, i)),
+                                                  Min(LL_COORD(b, i), UR_COORD(b, i))
+                       );
                result->x[i + DIM(a)] = Max(
-                       Max(LL_COORD(a, i), UR_COORD(a, i)),
-                       Max(LL_COORD(b, i), UR_COORD(b, i))
-               );
+                                                                       Max(LL_COORD(a, i), UR_COORD(a, i)),
+                                                                       Max(LL_COORD(b, i), UR_COORD(b, i))
+                       );
        }
        /* continue on the higher dimensions only present in 'a' */
        for (; i < DIM(a); i++)
        {
                result->x[i] = Min(0,
-                       Min(LL_COORD(a, i), UR_COORD(a, i))
-               );
+                                                  Min(LL_COORD(a, i), UR_COORD(a, i))
+                       );
                result->x[i + dim] = Max(0,
-                       Max(LL_COORD(a, i), UR_COORD(a, i))
-               );
+                                                                Max(LL_COORD(a, i), UR_COORD(a, i))
+                       );
        }
 
        /*
@@ -795,6 +795,7 @@ cube_inter(PG_FUNCTION_ARGS)
        if (DIM(a) < DIM(b))
        {
                NDBOX      *tmp = b;
+
                b = a;
                a = tmp;
                swapped = true;
@@ -806,27 +807,27 @@ cube_inter(PG_FUNCTION_ARGS)
        SET_VARSIZE(result, size);
        SET_DIM(result, dim);
 
-        /* First compute intersection of the dimensions present in both args */
+       /* First compute intersection of the dimensions present in both args */
        for (i = 0; i < DIM(b); i++)
        {
                result->x[i] = Max(
-                       Min(LL_COORD(a, i), UR_COORD(a, i)),
-                       Min(LL_COORD(b, i), UR_COORD(b, i))
-               );
+                                                  Min(LL_COORD(a, i), UR_COORD(a, i)),
+                                                  Min(LL_COORD(b, i), UR_COORD(b, i))
+                       );
                result->x[i + DIM(a)] = Min(
-                       Max(LL_COORD(a, i), UR_COORD(a, i)),
-                       Max(LL_COORD(b, i), UR_COORD(b, i))
-               );
+                                                                       Max(LL_COORD(a, i), UR_COORD(a, i)),
+                                                                       Max(LL_COORD(b, i), UR_COORD(b, i))
+                       );
        }
        /* continue on the higher dimemsions only present in 'a' */
        for (; i < DIM(a); i++)
        {
                result->x[i] = Max(0,
-                       Min(LL_COORD(a, i), UR_COORD(a, i))
-               );
+                                                  Min(LL_COORD(a, i), UR_COORD(a, i))
+                       );
                result->x[i + DIM(a)] = Min(0,
-                       Max(LL_COORD(a, i), UR_COORD(a, i))
-               );
+                                                                       Max(LL_COORD(a, i), UR_COORD(a, i))
+                       );
        }
 
        /*
@@ -1236,14 +1237,14 @@ cube_distance(PG_FUNCTION_ARGS)
        /* compute within the dimensions of (b) */
        for (i = 0; i < DIM(b); i++)
        {
-               d = distance_1D(LL_COORD(a,i), UR_COORD(a,i), LL_COORD(b,i), UR_COORD(b,i));
+               d = distance_1D(LL_COORD(a, i), UR_COORD(a, i), LL_COORD(b, i), UR_COORD(b, i));
                distance += d * d;
        }
 
        /* compute distance to zero for those dimensions in (a) absent in (b) */
        for (i = DIM(b); i < DIM(a); i++)
        {
-               d = distance_1D(LL_COORD(a,i), UR_COORD(a,i), 0.0, 0.0);
+               d = distance_1D(LL_COORD(a, i), UR_COORD(a, i), 0.0, 0.0);
                distance += d * d;
        }
 
@@ -1297,11 +1298,11 @@ cube_is_point_internal(NDBOX *cube)
                return true;
 
        /*
-        * Even if the point-flag is not set, all the lower-left coordinates
-        * might match the upper-right coordinates, so that the value is in
-        * fact a point. Such values don't arise with current code - the point
-        * flag is always set if appropriate - but they might be present on-disk
-        * in clusters upgraded from pre-9.4 versions.
+        * Even if the point-flag is not set, all the lower-left coordinates might
+        * match the upper-right coordinates, so that the value is in fact a
+        * point. Such values don't arise with current code - the point flag is
+        * always set if appropriate - but they might be present on-disk in
+        * clusters upgraded from pre-9.4 versions.
         */
        for (i = 0; i < DIM(cube); i++)
        {
@@ -1317,6 +1318,7 @@ cube_dim(PG_FUNCTION_ARGS)
 {
        NDBOX      *c = PG_GETARG_NDBOX(0);
        int                     dim = DIM(c);
+
        PG_FREE_IF_COPY(c, 0);
        PG_RETURN_INT32(dim);
 }
@@ -1330,7 +1332,7 @@ cube_ll_coord(PG_FUNCTION_ARGS)
        double          result;
 
        if (DIM(c) >= n && n > 0)
-               result = Min(LL_COORD(c, n-1), UR_COORD(c, n-1));
+               result = Min(LL_COORD(c, n - 1), UR_COORD(c, n - 1));
        else
                result = 0;
 
@@ -1347,7 +1349,7 @@ cube_ur_coord(PG_FUNCTION_ARGS)
        double          result;
 
        if (DIM(c) >= n && n > 0)
-               result = Max(LL_COORD(c, n-1), UR_COORD(c, n-1));
+               result = Max(LL_COORD(c, n - 1), UR_COORD(c, n - 1));
        else
                result = 0;
 
@@ -1382,15 +1384,15 @@ cube_enlarge(PG_FUNCTION_ARGS)
 
        for (i = 0, j = dim; i < DIM(a); i++, j++)
        {
-               if (LL_COORD(a,i) >= UR_COORD(a,i))
+               if (LL_COORD(a, i) >= UR_COORD(a, i))
                {
-                       result->x[i] = UR_COORD(a,i) - r;
-                       result->x[j] = LL_COORD(a,i) + r;
+                       result->x[i] = UR_COORD(a, i) - r;
+                       result->x[j] = LL_COORD(a, i) + r;
                }
                else
                {
-                       result->x[i] = LL_COORD(a,i) - r;
-                       result->x[j] = UR_COORD(a,i) + r;
+                       result->x[i] = LL_COORD(a, i) - r;
+                       result->x[j] = UR_COORD(a, i) + r;
                }
                if (result->x[i] > result->x[j])
                {
@@ -1503,7 +1505,7 @@ cube_c_f8(PG_FUNCTION_ARGS)
                        result->x[DIM(result) + i] = cube->x[DIM(cube) + i];
                }
                result->x[DIM(result) - 1] = x;
-               result->x[2*DIM(result) - 1] = x;
+               result->x[2 * DIM(result) - 1] = x;
        }
 
        PG_FREE_IF_COPY(cube, 0);
@@ -1521,7 +1523,8 @@ cube_c_f8_f8(PG_FUNCTION_ARGS)
        int                     size;
        int                     i;
 
-       if (IS_POINT(cube) && (x1 == x2)){
+       if (IS_POINT(cube) && (x1 == x2))
+       {
                size = POINT_SIZE((DIM(cube) + 1));
                result = (NDBOX *) palloc0(size);
                SET_VARSIZE(result, size);
index 6c825b98ea735b89506755dd290b5ce0e67e92a6..5d44e110817ad5281e3f983ec2f7a0fb4350e38b 100644 (file)
@@ -13,9 +13,9 @@ typedef struct NDBOX
         *
         * Following information is stored:
         *
-        *  bits 0-7  : number of cube dimensions;
-        *  bits 8-30 : unused, initialize to zero;
-        *  bit  31   : point flag. If set, the upper right coordinates are not
+        *      bits 0-7  : number of cube dimensions;
+        *      bits 8-30 : unused, initialize to zero;
+        *      bit  31   : point flag. If set, the upper right coordinates are not
         *                              stored, and are implicitly the same as the lower left
         *                              coordinates.
         *----------
@@ -31,12 +31,12 @@ typedef struct NDBOX
 } NDBOX;
 
 #define POINT_BIT                      0x80000000
-#define        DIM_MASK                        0x7fffffff
+#define DIM_MASK                       0x7fffffff
 
 #define IS_POINT(cube)         ( ((cube)->header & POINT_BIT) != 0 )
-#define SET_POINT_BIT(cube)    ( (cube)->header |= POINT_BIT )
+#define SET_POINT_BIT(cube) ( (cube)->header |= POINT_BIT )
 #define DIM(cube)                      ( (cube)->header & DIM_MASK )
-#define SET_DIM(cube, _dim)    ( (cube)->header = ((cube)->header & ~DIM_MASK) | (_dim) )
+#define SET_DIM(cube, _dim) ( (cube)->header = ((cube)->header & ~DIM_MASK) | (_dim) )
 
 #define LL_COORD(cube, i) ( (cube)->x[i] )
 #define UR_COORD(cube, i) ( IS_POINT(cube) ? (cube)->x[i] : (cube)->x[(i) + DIM(cube)] )
index 7b711f13b3e2be2f4d3f83e9a5ada258fa836115..a81853fa9118170f7773363c76afb07afe633f19 100644 (file)
@@ -2394,7 +2394,7 @@ get_tuple_of_interest(Relation rel, int *pkattnums, int pknumatts, char **src_pk
         * Build sql statement to look up tuple of interest, ie, the one matching
         * src_pkattvals.  We used to use "SELECT *" here, but it's simpler to
         * generate a result tuple that matches the table's physical structure,
-        * with NULLs for any dropped columns.  Otherwise we have to deal with two
+        * with NULLs for any dropped columns.  Otherwise we have to deal with two
         * different tupdescs and everything's very confusing.
         */
        appendStringInfoString(&buf, "SELECT ");
@@ -2620,7 +2620,7 @@ dblink_security_check(PGconn *conn, remoteConn *rconn)
 }
 
 /*
- * For non-superusers, insist that the connstr specify a password.     This
+ * For non-superusers, insist that the connstr specify a password.  This
  * prevents a password from being picked up from .pgpass, a service file,
  * the environment, etc.  We don't want the postgres user's passwords
  * to be accessible to non-superusers.
index eadfce4605475e850f198ec93d5267c061940974..6bbebdfd1a31e4b91cc824c727a9884a03f1ebd8 100644 (file)
@@ -91,7 +91,7 @@ geo_distance_internal(Point *pt1, Point *pt2)
  *      distance between the points in miles on earth's surface
  *
  * If float8 is passed-by-value, the oldstyle version-0 calling convention
- * is unportable, so we use version-1. However, if it's passed-by-reference,
+ * is unportable, so we use version-1.  However, if it's passed-by-reference,
  * continue to use oldstyle.  This is just because we'd like earthdistance
  * to serve as a canary for any unintentional breakage of version-0 functions
  * with float8 results.
index 476e520301aa992b4ceb10345ffb2842dda6471d..5a4d5aac2156709e3c6398b45a8a7c7741639319 100644 (file)
@@ -70,6 +70,7 @@ static const struct FileFdwOption valid_options[] = {
        {"encoding", ForeignTableRelationId},
        {"force_not_null", AttributeRelationId},
        {"force_null", AttributeRelationId},
+
        /*
         * force_quote is not supported by file_fdw because it's for COPY TO.
         */
@@ -253,6 +254,7 @@ file_fdw_validator(PG_FUNCTION_ARGS)
                                                 errmsg("conflicting or redundant options")));
                        filename = defGetString(def);
                }
+
                /*
                 * force_not_null is a boolean option; after validation we can discard
                 * it - it will be retrieved later in get_file_fdw_attribute_options()
@@ -397,7 +399,7 @@ get_file_fdw_attribute_options(Oid relid)
        List       *fnncolumns = NIL;
        List       *fncolumns = NIL;
 
-       List *options = NIL;
+       List       *options = NIL;
 
        rel = heap_open(relid, AccessShareLock);
        tupleDesc = RelationGetDescr(rel);
@@ -443,12 +445,15 @@ get_file_fdw_attribute_options(Oid relid)
 
        heap_close(rel, AccessShareLock);
 
-       /* Return DefElem only when some column(s) have force_not_null / force_null options set */
+       /*
+        * Return DefElem only when some column(s) have force_not_null /
+        * force_null options set
+        */
        if (fnncolumns != NIL)
                options = lappend(options, makeDefElem("force_not_null", (Node *) fnncolumns));
 
        if (fncolumns != NIL)
-               options = lappend(options,makeDefElem("force_null", (Node *) fncolumns));
+               options = lappend(options, makeDefElem("force_null", (Node *) fncolumns));
 
        return options;
 }
@@ -508,7 +513,7 @@ fileGetForeignPaths(PlannerInfo *root,
                                   &startup_cost, &total_cost);
 
        /*
-        * Create a ForeignPath node and add it as only possible path.  We use the
+        * Create a ForeignPath node and add it as only possible path.  We use the
         * fdw_private list of the path to carry the convert_selectively option;
         * it will be propagated into the fdw_private list of the Plan node.
         */
@@ -921,7 +926,7 @@ estimate_size(PlannerInfo *root, RelOptInfo *baserel,
                 * planner's idea of the relation width; which is bogus if not all
                 * columns are being read, not to mention that the text representation
                 * of a row probably isn't the same size as its internal
-                * representation.      Possibly we could do something better, but the
+                * representation.  Possibly we could do something better, but the
                 * real answer to anyone who complains is "ANALYZE" ...
                 */
                int                     tuple_width;
@@ -986,7 +991,7 @@ estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
  * which must have at least targrows entries.
  * The actual number of rows selected is returned as the function result.
  * We also count the total number of rows in the file and return it into
- * *totalrows. Note that *totaldeadrows is always set to 0.
+ * *totalrows.  Note that *totaldeadrows is always set to 0.
  *
  * Note that the returned list of rows is not always in order by physical
  * position in the file.  Therefore, correlation estimates derived later
index 989c4161d6ad7d0a33d12d9253403b6628a5a554..4f37a54b1e446338e79da6686b4646a2fcf32fed 100644 (file)
@@ -50,7 +50,7 @@ static int levenshtein_internal(text *s, text *t,
  * array.
  *
  * If max_d >= 0, we only need to provide an accurate answer when that answer
- * is less than or equal to the bound. From any cell in the matrix, there is
+ * is less than or equal to the bound.  From any cell in the matrix, there is
  * theoretical "minimum residual distance" from that cell to the last column
  * of the final row.  This minimum residual distance is zero when the
  * untransformed portions of the strings are of equal length (because we might
@@ -141,7 +141,7 @@ levenshtein_internal(text *s, text *t,
        stop_column = m + 1;
 
        /*
-        * If max_d >= 0, determine whether the bound is impossibly tight.      If so,
+        * If max_d >= 0, determine whether the bound is impossibly tight.  If so,
         * return max_d + 1 immediately.  Otherwise, determine whether it's tight
         * enough to limit the computation we must perform.  If so, figure out
         * initial stop column.
@@ -168,7 +168,7 @@ levenshtein_internal(text *s, text *t,
                         * need to fill in.  If the string is growing, the theoretical
                         * minimum distance already incorporates the cost of deleting the
                         * number of characters necessary to make the two strings equal in
-                        * length.      Each additional deletion forces another insertion, so
+                        * length.  Each additional deletion forces another insertion, so
                         * the best-case total cost increases by ins_c + del_c. If the
                         * string is shrinking, the minimum theoretical cost assumes no
                         * excess deletions; that is, we're starting no further right than
@@ -246,7 +246,7 @@ levenshtein_internal(text *s, text *t,
                /*
                 * The main loop fills in curr, but curr[0] needs a special case: to
                 * transform the first 0 characters of s into the first j characters
-                * of t, we must perform j insertions.  However, if start_column > 0,
+                * of t, we must perform j insertions.  However, if start_column > 0,
                 * this special case does not apply.
                 */
                if (start_column == 0)
index 382111617a01bf7359219c40f60ac4461d60d73b..f9088f8b1b6c43510e751b8341816e9c69352e99 100644 (file)
@@ -12,7 +12,7 @@
  * HEntry: there is one of these for each key _and_ value in an hstore
  *
  * the position offset points to the _end_ so that we can get the length
- * by subtraction from the previous entry.     the ISFIRST flag lets us tell
+ * by subtraction from the previous entry.  the ISFIRST flag lets us tell
  * whether there is a previous entry.
  */
 typedef struct
index 9f2de06f7fe301d083ed99b2253e0118c661c2df..68f9061db14a963a83b3c8f1625b8992d2a396b5 100644 (file)
@@ -13,7 +13,7 @@
 /*
  * When using a GIN index for hstore, we choose to index both keys and values.
  * The storage format is "text" values, with K, V, or N prepended to the string
- * to indicate key, value, or null values.     (As of 9.1 it might be better to
+ * to indicate key, value, or null values.  (As of 9.1 it might be better to
  * store null values as nulls, but we'll keep it this way for on-disk
  * compatibility.)
  */
@@ -165,7 +165,7 @@ gin_consistent_hstore(PG_FUNCTION_ARGS)
        {
                /*
                 * Index doesn't have information about correspondence of keys and
-                * values, so we need recheck.  However, if not all the keys are
+                * values, so we need recheck.  However, if not all the keys are
                 * present, we can fail at once.
                 */
                *recheck = true;
index 9899317bfca33cc6a66a75458f538f14dec8fe61..cd303fc972959e57605da62fd75b52387adfd88b 100644 (file)
@@ -1245,7 +1245,7 @@ hstore_to_json_loose(PG_FUNCTION_ARGS)
                                dst;
 
        if (count == 0)
-               PG_RETURN_TEXT_P(cstring_to_text_with_len("{}",2));
+               PG_RETURN_TEXT_P(cstring_to_text_with_len("{}", 2));
 
        initStringInfo(&tmp);
        initStringInfo(&dst);
@@ -1335,7 +1335,7 @@ hstore_to_json(PG_FUNCTION_ARGS)
                                dst;
 
        if (count == 0)
-               PG_RETURN_TEXT_P(cstring_to_text_with_len("{}",2));
+               PG_RETURN_TEXT_P(cstring_to_text_with_len("{}", 2));
 
        initStringInfo(&tmp);
        initStringInfo(&dst);
@@ -1381,7 +1381,8 @@ hstore_to_jsonb(PG_FUNCTION_ARGS)
 
        for (i = 0; i < count; i++)
        {
-               JsonbValue key, val;
+               JsonbValue      key,
+                                       val;
 
                key.estSize = sizeof(JEntry);
                key.type = jbvString;
@@ -1424,7 +1425,7 @@ hstore_to_jsonb_loose(PG_FUNCTION_ARGS)
        JsonbParseState *state = NULL;
        JsonbValue *res;
        StringInfoData tmp;
-       bool        is_number;
+       bool            is_number;
 
        initStringInfo(&tmp);
 
@@ -1432,7 +1433,8 @@ hstore_to_jsonb_loose(PG_FUNCTION_ARGS)
 
        for (i = 0; i < count; i++)
        {
-               JsonbValue key, val;
+               JsonbValue      key,
+                                       val;
 
                key.estSize = sizeof(JEntry);
                key.type = jbvString;
@@ -1507,7 +1509,8 @@ hstore_to_jsonb_loose(PG_FUNCTION_ARGS)
                        {
                                val.type = jbvNumeric;
                                val.val.numeric = DatumGetNumeric(
-                                       DirectFunctionCall3(numeric_in, CStringGetDatum(tmp.data), 0, -1));
+                                                                                                 DirectFunctionCall3(numeric_in, CStringGetDatum(tmp.data), 0, -1));
+
                                val.estSize += VARSIZE_ANY(val.val.numeric) +sizeof(JEntry);
                        }
                        else
index 566a6c25056849e8591b99f664ed06d069da7ded..c3c39d194bc89e9bf49139f9cd5819b066c66d2f 100644 (file)
@@ -345,7 +345,7 @@ gin_bool_consistent(QUERYTYPE *query, bool *check)
                return FALSE;
 
        /*
-        * Set up data for checkcondition_gin.  This must agree with the query
+        * Set up data for checkcondition_gin.  This must agree with the query
         * extraction code in ginint4_queryextract.
         */
        gcv.first = items;
index d64a81a5595e26f6289f4edcd45152ad3e987013..53abcc45a5ffe952cec62a4bee58d2a106e78fd8 100644 (file)
@@ -472,7 +472,7 @@ g_int_picksplit(PG_FUNCTION_ARGS)
        qsort((void *) costvector, maxoff, sizeof(SPLITCOST), comparecost);
 
        /*
-        * Now split up the regions between the two seeds.      An important property
+        * Now split up the regions between the two seeds.  An important property
         * of this split algorithm is that the split vector v has the indices of
         * items to be split in order in its left and right vectors.  We exploit
         * this property by doing a merge in the code that actually splits the
@@ -490,7 +490,7 @@ g_int_picksplit(PG_FUNCTION_ARGS)
 
                /*
                 * If we've already decided where to place this item, just put it on
-                * the right list.      Otherwise, we need to figure out which page needs
+                * the right list.  Otherwise, we need to figure out which page needs
                 * the least enlargement in order to store the item.
                 */
 
index fc462b20766054cff68fb6e0b345db58a77eacc8..511c7acb54e811c5b3612230149253309e9f3f2e 100644 (file)
@@ -184,7 +184,7 @@ rt__int_size(ArrayType *a, float *size)
        *size = (float) ARRNELEMS(a);
 }
 
-/* Sort the given data (len >= 2).     Return true if any duplicates found */
+/* Sort the given data (len >= 2).  Return true if any duplicates found */
 bool
 isort(int32 *a, int len)
 {
@@ -196,7 +196,7 @@ isort(int32 *a, int len)
        bool            r = FALSE;
 
        /*
-        * We use a simple insertion sort.      While this is O(N^2) in the worst
+        * We use a simple insertion sort.  While this is O(N^2) in the worst
         * case, it's quite fast if the input is already sorted or nearly so.
         * Also, for not-too-large inputs it's faster than more complex methods
         * anyhow.
index 76f66288c0fc8c9425cc3b528ff8604714288fd4..87462911147451428e9aa7c3ee016fc40570e1d6 100755 (executable)
@@ -80,7 +80,7 @@ else
        $outf = ($opt{u}) ? 'distinct( message.mid )' : 'message.mid';
 }
 my $sql =
-       "select $outf from "
+    "select $outf from "
   . join(', ', keys %table)
   . " where "
   . join(' AND ', @where) . ';';
index 992106f05ac18f04b48f2f38a7f3763ff4a92527..4561073fa0933043bee915ed211e8b518fe78641 100644 (file)
@@ -593,7 +593,7 @@ ltreeparentsel(PG_FUNCTION_ARGS)
                /*
                 * If the histogram is large enough, see what fraction of it the
                 * constant is "<@" to, and assume that's representative of the
-                * non-MCV population.  Otherwise use the default selectivity for the
+                * non-MCV population.  Otherwise use the default selectivity for the
                 * non-MCV population.
                 */
                selec = histogram_selectivity(&vardata, &contproc,
index d8565941f90997726efe0dc5e141e6c957be0878..e5eeec21c153f6138d58ae77cb2d9cb824f6bba2 100644 (file)
@@ -407,7 +407,7 @@ sql_exec(PGconn *conn, const char *todo, bool quiet)
 }
 
 /*
- * Dump all databases. There are no system objects to worry about.
+ * Dump all databases.  There are no system objects to worry about.
  */
 void
 sql_exec_dumpalldbs(PGconn *conn, struct options * opts)
@@ -503,20 +503,20 @@ sql_exec_searchtables(PGconn *conn, struct options * opts)
 
        /* now build the query */
        todo = psprintf(
-                        "SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s\n"
-                        "FROM pg_catalog.pg_class c \n"
-                "      LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace \n"
-                        "      LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),\n"
-                        "      pg_catalog.pg_tablespace t \n"
-                        "WHERE relkind IN ('r', 'm', 'i', 'S', 't') AND \n"
-                        "              t.oid = CASE\n"
-                        "                      WHEN reltablespace <> 0 THEN reltablespace\n"
-                        "                      ELSE dattablespace\n"
-                        "              END AND \n"
-                        "  (%s) \n"
-                        "ORDER BY relname\n",
-                        opts->extended ? addfields : "",
-                        qualifiers);
+                                       "SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s\n"
+                                       "FROM pg_catalog.pg_class c \n"
+                     LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace \n"
+                                       "       LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),\n"
+                                       "       pg_catalog.pg_tablespace t \n"
+                                       "WHERE relkind IN ('r', 'm', 'i', 'S', 't') AND \n"
+                                       "               t.oid = CASE\n"
+                                             WHEN reltablespace <> 0 THEN reltablespace\n"
+                                       "                       ELSE dattablespace\n"
+                                       "               END AND \n"
+                                       "  (%s) \n"
+                                       "ORDER BY relname\n",
+                                       opts->extended ? addfields : "",
+                                       qualifiers);
 
        free(qualifiers);
 
index 66c88b17afa600e3581bc9e03a25594724d5e65c..cc66fc8b04d9907da2ee10a8382e38980090f475 100644 (file)
@@ -208,7 +208,8 @@ page_header(PG_FUNCTION_ARGS)
        /* pageinspect >= 1.2 uses pg_lsn instead of text for the LSN field. */
        if (tupdesc->attrs[0]->atttypid == TEXTOID)
        {
-               char    lsnchar[64];
+               char            lsnchar[64];
+
                snprintf(lsnchar, sizeof(lsnchar), "%X/%X",
                                 (uint32) (lsn >> 32), (uint32) lsn);
                values[0] = CStringGetTextDatum(lsnchar);
index 39348c6db107ccee2caa0441d9d9a0171c204d13..212b267fcfa6cdd4b406570e325c54effcf704bc 100644 (file)
@@ -113,7 +113,7 @@ CleanupPriorWALFiles(void)
 
                        /*
                         * We ignore the timeline part of the XLOG segment identifiers in
-                        * deciding whether a segment is still needed.  This ensures that
+                        * deciding whether a segment is still needed.  This ensures that
                         * we won't prematurely remove a segment from a parent timeline.
                         * We could probably be a little more proactive about removing
                         * segments of non-parent timelines, but that would be a whole lot
@@ -140,7 +140,7 @@ CleanupPriorWALFiles(void)
                                {
                                        /*
                                         * Prints the name of the file to be removed and skips the
-                                        * actual removal.      The regular printout is so that the
+                                        * actual removal.  The regular printout is so that the
                                         * user can pipe the output into some other program.
                                         */
                                        printf("%s\n", WALFilePath);
index d50726d109ba7a7c7031c89060b4780e0a012a95..df20e888eff08d927475d535312a114a6e0ae260 100644 (file)
@@ -45,7 +45,7 @@ static char blockbuffer[BLCKSZ];
  *
  * The first argument is the relation to be prewarmed; the second controls
  * how prewarming is done; legal options are 'prefetch', 'read', and 'buffer'.
- * The third is the name of the relation fork to be prewarmed. The fourth
+ * The third is the name of the relation fork to be prewarmed.  The fourth
  * and fifth arguments specify the first and last block to be prewarmed.
  * If the fourth argument is NULL, it will be taken as 0; if the fifth argument
  * is NULL, it will be taken as the number of blocks in the relation.  The
index de82ef5a1f5fcf5c5be82694d95864093e07e275..07f09e1e94100fec16e84f50e95dce4f7569c9c7 100644 (file)
@@ -4,7 +4,7 @@
  *             Track statement execution times across a whole database cluster.
  *
  * Execution costs are totalled for each distinct source query, and kept in
- * a shared hashtable. (We track only as many distinct queries as will fit
+ * a shared hashtable.  (We track only as many distinct queries as will fit
  * in the designated amount of shared memory.)
  *
  * As of Postgres 9.2, this module normalizes query entries.  Normalization
@@ -15,7 +15,7 @@
  *
  * Normalization is implemented by fingerprinting queries, selectively
  * serializing those fields of each query tree's nodes that are judged to be
- * essential to the query.     This is referred to as a query jumble.  This is
+ * essential to the query.  This is referred to as a query jumble.  This is
  * distinct from a regular serialization in that various extraneous
  * information is ignored as irrelevant or not essential to the query, such
  * as the collations of Vars and, most notably, the values of constants.
@@ -615,7 +615,7 @@ pgss_shmem_startup(void)
         * because we remove that file on startup; it acts inversely to
         * PGSS_DUMP_FILE, in that it is only supposed to be around when the
         * server is running, whereas PGSS_DUMP_FILE is only supposed to be around
-        * when the server is not running.      Leaving the file creates no danger of
+        * when the server is not running.  Leaving the file creates no danger of
         * a newly restored database having a spurious record of execution costs,
         * which is what we're really concerned about here.
         */
@@ -702,7 +702,7 @@ pgss_shmem_shutdown(int code, Datum arg)
 
        /*
         * When serializing to disk, we store query texts immediately after their
-        * entry data.  Any orphaned query texts are thereby excluded.
+        * entry data.  Any orphaned query texts are thereby excluded.
         */
        hash_seq_init(&hash_seq, pgss_hash);
        while ((entry = hash_seq_search(&hash_seq)) != NULL)
@@ -1363,9 +1363,9 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
 
        /*
         * We'd like to load the query text file (if needed) while not holding any
-        * lock on pgss->lock.  In the worst case we'll have to do this again
+        * lock on pgss->lock.  In the worst case we'll have to do this again
         * after we have the lock, but it's unlikely enough to make this a win
-        * despite occasional duplicated work.  We need to reload if anybody
+        * despite occasional duplicated work.  We need to reload if anybody
         * writes to the file (either a retail qtext_store(), or a garbage
         * collection) between this point and where we've gotten shared lock.  If
         * a qtext_store is actually in progress when we look, we might as well
@@ -1572,7 +1572,7 @@ pgss_memsize(void)
  * would be difficult to demonstrate this even under artificial conditions.)
  *
  * Note: despite needing exclusive lock, it's not an error for the target
- * entry to already exist.     This is because pgss_store releases and
+ * entry to already exist.  This is because pgss_store releases and
  * reacquires lock after failing to find a match; so someone else could
  * have made the entry while we waited to get exclusive lock.
  */
@@ -1692,13 +1692,13 @@ entry_dealloc(void)
  * have it handy, so we require them to pass it too.
  *
  * If successful, returns true, and stores the new entry's offset in the file
- * into *query_offset. Also, if gc_count isn't NULL, *gc_count is set to the
+ * into *query_offset.  Also, if gc_count isn't NULL, *gc_count is set to the
  * number of garbage collections that have occurred so far.
  *
  * On failure, returns false.
  *
  * At least a shared lock on pgss->lock must be held by the caller, so as
- * to prevent a concurrent garbage collection. Share-lock-holding callers
+ * to prevent a concurrent garbage collection.  Share-lock-holding callers
  * should pass a gc_count pointer to obtain the number of garbage collections,
  * so that they can recheck the count after obtaining exclusive lock to
  * detect whether a garbage collection occurred (and removed this entry).
@@ -1940,7 +1940,7 @@ gc_qtexts(void)
        /*
         * When called from pgss_store, some other session might have proceeded
         * with garbage collection in the no-lock-held interim of lock strength
-        * escalation.  Check once more that this is actually necessary.
+        * escalation.  Check once more that this is actually necessary.
         */
        if (!need_gc_qtexts())
                return;
@@ -2005,7 +2005,7 @@ gc_qtexts(void)
        }
 
        /*
-        * Truncate away any now-unused space.  If this fails for some odd reason,
+        * Truncate away any now-unused space.  If this fails for some odd reason,
         * we log it, but there's no need to fail.
         */
        if (ftruncate(fileno(qfile), extent) != 0)
@@ -2258,7 +2258,7 @@ JumbleRangeTable(pgssJumbleState *jstate, List *rtable)
  *
  * Note: the reason we don't simply use expression_tree_walker() is that the
  * point of that function is to support tree walkers that don't care about
- * most tree node types, but here we care about all types.     We should complain
+ * most tree node types, but here we care about all types.  We should complain
  * about any unrecognized node type.
  */
 static void
@@ -2772,7 +2772,7 @@ generate_normalized_query(pgssJumbleState *jstate, const char *query,
  * a problem.
  *
  * Duplicate constant pointers are possible, and will have their lengths
- * marked as '-1', so that they are later ignored.     (Actually, we assume the
+ * marked as '-1', so that they are later ignored.  (Actually, we assume the
  * lengths were initialized as -1 to start with, and don't change them here.)
  *
  * N.B. There is an assumption that a '-' character at a Const location begins
@@ -2841,7 +2841,7 @@ fill_in_constant_lengths(pgssJumbleState *jstate, const char *query)
                                         * adjustment of location to that of the leading '-'
                                         * operator in the event of a negative constant.  It is
                                         * also useful for our purposes to start from the minus
-                                        * symbol.      In this way, queries like "select * from foo
+                                        * symbol.  In this way, queries like "select * from foo
                                         * where bar = 1" and "select * from foo where bar = -2"
                                         * will have identical normalized query strings.
                                         */
index 682821a8d787392fc4fa9ffa99e00d2cb8b77067..842295ae3d97681e24cf903f99be767b9620a7ca 100644 (file)
@@ -369,12 +369,13 @@ test_sync(int writes_per_op)
                {
                        for (writes = 0; writes < writes_per_op; writes++)
                                if (write(tmpfile, buf, XLOG_BLCKSZ) != XLOG_BLCKSZ)
-                               /*
-                                * This can generate write failures if the filesystem
-                                * has a large block size, e.g. 4k, and there is no
-                                * support for O_DIRECT writes smaller than the
-                                * file system block size, e.g. XFS.
-                                */
+
+                                       /*
+                                        * This can generate write failures if the filesystem has
+                                        * a large block size, e.g. 4k, and there is no support
+                                        * for O_DIRECT writes smaller than the file system block
+                                        * size, e.g. XFS.
+                                        */
                                        die("write failed");
                        if (lseek(tmpfile, 0, SEEK_SET) == -1)
                                die("seek failed");
index ac18c7b80f18ffdd72723b62726ff16da8e941d7..c59925c5750b270eb58b2777708a0aec19b0826d 100644 (file)
@@ -114,7 +114,7 @@ gin_extract_query_trgm(PG_FUNCTION_ARGS)
                        {
                                /*
                                 * Successful regex processing: store NFA-like graph as
-                                * extra_data.  GIN API requires an array of nentries
+                                * extra_data.  GIN API requires an array of nentries
                                 * Pointers, but we just put the same value in each element.
                                 */
                                trglen = ARRNELEM(trg);
index ede969f424cd78479c778a2f6f194ea42d1a8fae..69dc7f71f07069810b01e1484095c89abed97969 100644 (file)
@@ -386,7 +386,7 @@ gtrgm_consistent(PG_FUNCTION_ARGS)
 
                                        /*
                                         * GETBIT() tests may give false positives, due to limited
-                                        * size of the sign array.      But since trigramsMatchGraph()
+                                        * size of the sign array.  But since trigramsMatchGraph()
                                         * implements a monotone boolean function, false positives
                                         * in the check array can't lead to false negative answer.
                                         * So we can apply trigramsMatchGraph despite uncertainty,
index 9720408a025723ef7ea93468bcc26bea100dbd1a..9f050533c59853d487590dca194cea425ce583e2 100644 (file)
@@ -62,7 +62,7 @@
  * In the 2nd stage, the automaton is transformed into a graph based on the
  * original NFA.  Each state in the expanded graph represents a state from
  * the original NFA, plus a prefix identifying the last two characters
- * (colors, to be precise) seen before entering the state.     There can be
+ * (colors, to be precise) seen before entering the state.  There can be
  * multiple states in the expanded graph for each state in the original NFA,
  * depending on what characters can precede it.  A prefix position can be
  * "unknown" if it's uncertain what the preceding character was, or "blank"
@@ -74,7 +74,7 @@
  * "enter key".
  *
  * Each arc of the expanded graph is labelled with a trigram that must be
- * present in the string to match.     We can construct this from an out-arc of
+ * present in the string to match.  We can construct this from an out-arc of
  * the underlying NFA state by combining the expanded state's prefix with the
  * color label of the underlying out-arc, if neither prefix position is
  * "unknown".  But note that some of the colors in the trigram might be
  *
  * When building the graph, if the number of states or arcs exceed pre-defined
  * limits, we give up and simply mark any states not yet processed as final
- * states.     Roughly speaking, that means that we make use of some portion from
+ * states.  Roughly speaking, that means that we make use of some portion from
  * the beginning of the regexp.  Also, any colors that have too many member
  * characters are treated as "unknown", so that we can't derive trigrams
  * from them.
  * 1) Create state 1 with enter key (UNKNOWN, UNKNOWN, 1).
  * 2) Add key (UNKNOWN, "a", 2) to state 1.
  * 3) Add key ("a", "b", 3) to state 1.
- * 4) Create new state 2 with enter key ("b", "c", 4). Add an arc
+ * 4) Create new state 2 with enter key ("b", "c", 4).  Add an arc
  *       from state 1 to state 2 with label trigram "abc".
  * 5) Mark state 2 final because state 4 of source NFA is marked as final.
- * 6) Create new state 3 with enter key ("b", "d", 5). Add an arc
+ * 6) Create new state 3 with enter key ("b", "d", 5).  Add an arc
  *       from state 1 to state 3 with label trigram "abd".
  * 7) Mark state 3 final because state 5 of source NFA is marked as final.
  *
@@ -273,10 +273,10 @@ typedef struct
  *
  * We call a prefix ambiguous if at least one of its colors is unknown.  It's
  * fully ambiguous if both are unknown, partially ambiguous if only the first
- * is unknown. (The case of first color known, second unknown is not valid.)
+ * is unknown.  (The case of first color known, second unknown is not valid.)
  *
  * Wholly- or partly-blank prefixes are mostly handled the same as regular
- * color prefixes.     This allows us to generate appropriate partly-blank
+ * color prefixes.  This allows us to generate appropriate partly-blank
  * trigrams when the NFA requires word character(s) to appear adjacent to
  * non-word character(s).
  */
@@ -302,7 +302,7 @@ typedef struct
 
 /*
  * Key identifying a state of our expanded graph: color prefix, and number
- * of the corresponding state in the underlying regex NFA.     The color prefix
+ * of the corresponding state in the underlying regex NFA.  The color prefix
  * shows how we reached the regex state (to the extent that we know it).
  */
 typedef struct
@@ -437,7 +437,7 @@ struct TrgmPackedGraph
         * colorTrigramsCount and colorTrigramsGroups contain information about
         * how trigrams are grouped into color trigrams.  "colorTrigramsCount" is
         * the count of color trigrams and "colorTrigramGroups" contains number of
-        * simple trigrams for each color trigram.      The array of simple trigrams
+        * simple trigrams for each color trigram.  The array of simple trigrams
         * (stored separately from this struct) is ordered so that the simple
         * trigrams for each color trigram are consecutive, and they're in order
         * by color trigram number.
@@ -524,7 +524,7 @@ createTrgmNFA(text *text_re, Oid collation,
        /*
         * This processing generates a great deal of cruft, which we'd like to
         * clean up before returning (since this function may be called in a
-        * query-lifespan memory context).      Make a temp context we can work in so
+        * query-lifespan memory context).  Make a temp context we can work in so
         * that cleanup is easy.
         */
        tmpcontext = AllocSetContextCreate(CurrentMemoryContext,
@@ -840,7 +840,7 @@ convertPgWchar(pg_wchar c, trgm_mb_char *result)
 
        /*
         * We can ignore the NUL character, since it can never appear in a PG text
-        * string.      This avoids the need for various special cases when
+        * string.  This avoids the need for various special cases when
         * reconstructing trigrams.
         */
        if (c == 0)
@@ -851,7 +851,7 @@ convertPgWchar(pg_wchar c, trgm_mb_char *result)
        pg_wchar2mb_with_len(&c, s, 1);
 
        /*
-        * In IGNORECASE mode, we can ignore uppercase characters.      We assume that
+        * In IGNORECASE mode, we can ignore uppercase characters.  We assume that
         * the regex engine generated both uppercase and lowercase equivalents
         * within each color, since we used the REG_ICASE option; so there's no
         * need to process the uppercase version.
@@ -933,7 +933,7 @@ transformGraph(TrgmNFA *trgmNFA)
 
        /*
         * Recursively build the expanded graph by processing queue of states
-        * (breadth-first search).      getState already put initstate in the queue.
+        * (breadth-first search).  getState already put initstate in the queue.
         */
        while (trgmNFA->queue != NIL)
        {
@@ -942,7 +942,7 @@ transformGraph(TrgmNFA *trgmNFA)
                trgmNFA->queue = list_delete_first(trgmNFA->queue);
 
                /*
-                * If we overflowed then just mark state as final.      Otherwise do
+                * If we overflowed then just mark state as final.  Otherwise do
                 * actual processing.
                 */
                if (trgmNFA->overflowed)
@@ -968,7 +968,7 @@ processState(TrgmNFA *trgmNFA, TrgmState *state)
 
        /*
         * Add state's own key, and then process all keys added to keysQueue until
-        * queue is empty.      But we can quit if the state gets marked final.
+        * queue is empty.  But we can quit if the state gets marked final.
         */
        addKey(trgmNFA, state, &state->stateKey);
        while (trgmNFA->keysQueue != NIL && !state->fin)
@@ -1022,7 +1022,7 @@ addKey(TrgmNFA *trgmNFA, TrgmState *state, TrgmStateKey *key)
 
        /*
         * Compare key to each existing enter key of the state to check for
-        * redundancy.  We can drop either old key(s) or the new key if we find
+        * redundancy.  We can drop either old key(s) or the new key if we find
         * redundancy.
         */
        prev = NULL;
@@ -1096,7 +1096,7 @@ addKey(TrgmNFA *trgmNFA, TrgmState *state, TrgmStateKey *key)
                else if (pg_reg_colorisend(trgmNFA->regex, arc->co))
                {
                        /*
-                        * End of line/string ($).      We must consider this arc as a
+                        * End of line/string ($).  We must consider this arc as a
                         * transition that doesn't read anything.  The reason for adding
                         * this enter key to the state is that if the arc leads to the
                         * NFA's final state, we must mark this expanded state as final.
@@ -1141,7 +1141,7 @@ addKey(TrgmNFA *trgmNFA, TrgmState *state, TrgmStateKey *key)
                                         * We can reach the arc destination after reading a word
                                         * character, but the prefix is not something that addArc
                                         * will accept, so no trigram arc can get made for this
-                                        * transition.  We must make an enter key to show that the
+                                        * transition.  We must make an enter key to show that the
                                         * arc destination is reachable.  The prefix for the enter
                                         * key should reflect the info we have for this arc.
                                         */
@@ -1154,9 +1154,9 @@ addKey(TrgmNFA *trgmNFA, TrgmState *state, TrgmStateKey *key)
                        else
                        {
                                /*
-                                * Unexpandable color.  Add enter key with ambiguous prefix,
+                                * Unexpandable color.  Add enter key with ambiguous prefix,
                                 * showing we can reach the destination from this state, but
-                                * the preceding colors will be uncertain.      (We do not set the
+                                * the preceding colors will be uncertain.  (We do not set the
                                 * first prefix color to key->prefix.colors[1], because a
                                 * prefix of known followed by unknown is invalid.)
                                 */
@@ -1345,9 +1345,9 @@ validArcLabel(TrgmStateKey *key, TrgmColor co)
                return false;
 
        /*
-        * We also reject nonblank-blank-anything.      The nonblank-blank-nonblank
+        * We also reject nonblank-blank-anything.  The nonblank-blank-nonblank
         * case doesn't correspond to any trigram the trigram extraction code
-        * would make.  The nonblank-blank-blank case is also not possible with
+        * would make.  The nonblank-blank-blank case is also not possible with
         * RPADDING = 1.  (Note that in many cases we'd fail to generate such a
         * trigram even if it were valid, for example processing "foo bar" will
         * not result in considering the trigram "o  ".  So if you want to support
@@ -1557,7 +1557,7 @@ selectColorTrigrams(TrgmNFA *trgmNFA)
 
        /*
         * Remove color trigrams from the graph so long as total penalty of color
-        * trigrams exceeds WISH_TRGM_PENALTY.  (If we fail to get down to
+        * trigrams exceeds WISH_TRGM_PENALTY.  (If we fail to get down to
         * WISH_TRGM_PENALTY, it's OK so long as total count is no more than
         * MAX_TRGM_COUNT.)  We prefer to remove color trigrams with higher
         * penalty, since those are the most promising for reducing the total
index d22b6d38e291da113c796833ec651ed57fbfc7fa..20b117bba772479a680d8965aebe01d89f2ea6f7 100644 (file)
@@ -278,7 +278,7 @@ check_cluster_versions(void)
        /* Only current PG version is supported as a target */
        if (GET_MAJOR_VERSION(new_cluster.major_version) != GET_MAJOR_VERSION(PG_VERSION_NUM))
                pg_fatal("This utility can only upgrade to PostgreSQL version %s.\n",
-                          PG_MAJORVERSION);
+                                PG_MAJORVERSION);
 
        /*
         * We can't allow downgrading because we use the target pg_dumpall, and
@@ -316,17 +316,17 @@ check_cluster_compatibility(bool live_check)
        if (GET_MAJOR_VERSION(new_cluster.major_version) == 900 &&
                new_cluster.controldata.cat_ver < TABLE_SPACE_SUBDIRS_CAT_VER)
                pg_fatal("This utility can only upgrade to PostgreSQL version 9.0 after 2010-01-11\n"
-                          "because of backend API changes made during development.\n");
+                                "because of backend API changes made during development.\n");
 
        /* We read the real port number for PG >= 9.1 */
        if (live_check && GET_MAJOR_VERSION(old_cluster.major_version) < 901 &&
                old_cluster.port == DEF_PGUPORT)
                pg_fatal("When checking a pre-PG 9.1 live old server, "
-                          "you must specify the old server's port number.\n");
+                                "you must specify the old server's port number.\n");
 
        if (live_check && old_cluster.port == new_cluster.port)
                pg_fatal("When checking a live server, "
-                          "the old and new port numbers must be different.\n");
+                                "the old and new port numbers must be different.\n");
 }
 
 
@@ -438,7 +438,7 @@ equivalent_locale(const char *loca, const char *locb)
                return (pg_strcasecmp(loca, locb) == 0);
 
        /*
-        * Compare the encoding parts.  Windows tends to use code page numbers for
+        * Compare the encoding parts.  Windows tends to use code page numbers for
         * the encoding part, which equivalent_encoding() won't like, so accept if
         * the strings are case-insensitive equal; otherwise use
         * equivalent_encoding() to compare.
@@ -499,7 +499,7 @@ check_new_cluster_is_empty(void)
                        /* pg_largeobject and its index should be skipped */
                        if (strcmp(rel_arr->rels[relnum].nspname, "pg_catalog") != 0)
                                pg_fatal("New cluster database \"%s\" is not empty\n",
-                                          new_cluster.dbarr.dbs[dbnum].db_name);
+                                                new_cluster.dbarr.dbs[dbnum].db_name);
                }
        }
 
@@ -526,7 +526,7 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
 
        if ((script = fopen_priv(*analyze_script_file_name, "w")) == NULL)
                pg_fatal("Could not open file \"%s\": %s\n",
-                          *analyze_script_file_name, getErrorText(errno));
+                                *analyze_script_file_name, getErrorText(errno));
 
 #ifndef WIN32
        /* add shebang header */
@@ -581,7 +581,7 @@ create_script_for_cluster_analyze(char **analyze_script_file_name)
 #ifndef WIN32
        if (chmod(*analyze_script_file_name, S_IRWXU) != 0)
                pg_fatal("Could not add execute permission to file \"%s\": %s\n",
-                          *analyze_script_file_name, getErrorText(errno));
+                                *analyze_script_file_name, getErrorText(errno));
 #endif
 
        if (os_info.user_specified)
@@ -632,7 +632,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
 
        if ((script = fopen_priv(*deletion_script_file_name, "w")) == NULL)
                pg_fatal("Could not open file \"%s\": %s\n",
-                          *deletion_script_file_name, getErrorText(errno));
+                                *deletion_script_file_name, getErrorText(errno));
 
 #ifndef WIN32
        /* add shebang header */
@@ -668,7 +668,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
                }
                else
                {
-                       char    *suffix_path = pg_strdup(old_cluster.tablespace_suffix);
+                       char       *suffix_path = pg_strdup(old_cluster.tablespace_suffix);
 
                        /*
                         * Simply delete the tablespace directory, which might be ".old"
@@ -686,7 +686,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
 #ifndef WIN32
        if (chmod(*deletion_script_file_name, S_IRWXU) != 0)
                pg_fatal("Could not add execute permission to file \"%s\": %s\n",
-                          *deletion_script_file_name, getErrorText(errno));
+                                *deletion_script_file_name, getErrorText(errno));
 #endif
 
        check_ok();
@@ -714,7 +714,7 @@ check_is_super_user(ClusterInfo *cluster)
 
        if (PQntuples(res) != 1 || strcmp(PQgetvalue(res, 0, 0), "t") != 0)
                pg_fatal("database user \"%s\" is not a superuser\n",
-                          os_info.user);
+                                os_info.user);
 
        cluster->install_role_oid = atooid(PQgetvalue(res, 0, 1));
 
@@ -757,7 +757,7 @@ check_for_prepared_transactions(ClusterInfo *cluster)
 
        if (PQntuples(res) != 0)
                pg_fatal("The %s cluster contains prepared transactions\n",
-                          CLUSTER_NAME(cluster));
+                                CLUSTER_NAME(cluster));
 
        PQclear(res);
 
@@ -822,7 +822,7 @@ check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster)
                        found = true;
                        if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
                                pg_fatal("Could not open file \"%s\": %s\n",
-                                          output_path, getErrorText(errno));
+                                                output_path, getErrorText(errno));
                        if (!db_used)
                        {
                                fprintf(script, "Database: %s\n", active_db->db_name);
@@ -847,10 +847,10 @@ check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster)
                pg_fatal("Your installation contains \"contrib/isn\" functions which rely on the\n"
                  "bigint data type.  Your old and new clusters pass bigint values\n"
                "differently so this cluster cannot currently be upgraded.  You can\n"
-                          "manually upgrade databases that use \"contrib/isn\" facilities and remove\n"
-                          "\"contrib/isn\" from the old cluster and restart the upgrade.  A list of\n"
-                          "the problem functions is in the file:\n"
-                          "    %s\n\n", output_path);
+                                "manually upgrade databases that use \"contrib/isn\" facilities and remove\n"
+                                "\"contrib/isn\" from the old cluster and restart the upgrade.  A list of\n"
+                                "the problem functions is in the file:\n"
+                                "    %s\n\n", output_path);
        }
        else
                check_ok();
@@ -926,7 +926,7 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
                        found = true;
                        if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
                                pg_fatal("Could not open file \"%s\": %s\n",
-                                          output_path, getErrorText(errno));
+                                                output_path, getErrorText(errno));
                        if (!db_used)
                        {
                                fprintf(script, "Database: %s\n", active_db->db_name);
@@ -952,9 +952,9 @@ check_for_reg_data_type_usage(ClusterInfo *cluster)
                pg_fatal("Your installation contains one of the reg* data types in user tables.\n"
                 "These data types reference system OIDs that are not preserved by\n"
                "pg_upgrade, so this cluster cannot currently be upgraded.  You can\n"
-                          "remove the problem tables and restart the upgrade.  A list of the problem\n"
-                          "columns is in the file:\n"
-                          "    %s\n\n", output_path);
+                                "remove the problem tables and restart the upgrade.  A list of the problem\n"
+                                "columns is in the file:\n"
+                                "    %s\n\n", output_path);
        }
        else
                check_ok();
@@ -975,7 +975,7 @@ get_bin_version(ClusterInfo *cluster)
        if ((output = popen(cmd, "r")) == NULL ||
                fgets(cmd_output, sizeof(cmd_output), output) == NULL)
                pg_fatal("Could not get pg_ctl version data using %s: %s\n",
-                          cmd, getErrorText(errno));
+                                cmd, getErrorText(errno));
 
        pclose(output);
 
index 476c6be276e0efd317be6e98f30b8f42050dfb2b..2906ccbf8c8f7c8074c8f738c97a1d86c5107ff1 100644 (file)
@@ -27,7 +27,7 @@
  * pg_control data.  pg_resetxlog cannot be run while the server is running
  * so we use pg_controldata;  pg_controldata doesn't provide all the fields
  * we need to actually perform the upgrade, but it provides enough for
- * check mode. We do not implement pg_resetxlog -n because it is hard to
+ * check mode.  We do not implement pg_resetxlog -n because it is hard to
  * return valid xid data for a running server.
  */
 void
@@ -119,7 +119,7 @@ get_control_data(ClusterInfo *cluster, bool live_check)
 
        if ((output = popen(cmd, "r")) == NULL)
                pg_fatal("Could not get control data using %s: %s\n",
-                          cmd, getErrorText(errno));
+                                cmd, getErrorText(errno));
 
        /* Only pre-8.4 has these so if they are not set below we will check later */
        cluster->controldata.lc_collate = NULL;
@@ -156,8 +156,8 @@ get_control_data(ClusterInfo *cluster, bool live_check)
                        for (p = bufin; *p; p++)
                                if (!isascii(*p))
                                        pg_fatal("The 8.3 cluster's pg_controldata is incapable of outputting ASCII, even\n"
-                                                  "with LANG=C.  You must upgrade this cluster to a newer version of PostgreSQL\n"
-                                                  "8.3 to fix this bug.  PostgreSQL 8.3.7 and later are known to work properly.\n");
+                                                        "with LANG=C.  You must upgrade this cluster to a newer version of PostgreSQL\n"
+                                                        "8.3 to fix this bug.  PostgreSQL 8.3.7 and later are known to work properly.\n");
                }
 #endif
 
@@ -606,8 +606,8 @@ check_control_data(ControlData *oldctrl,
                 * This is a common 8.3 -> 8.4 upgrade problem, so we are more verbose
                 */
                pg_fatal("You will need to rebuild the new server with configure option\n"
-                          "--disable-integer-datetimes or get server binaries built with those\n"
-                          "options.\n");
+                                "--disable-integer-datetimes or get server binaries built with those\n"
+                                "options.\n");
        }
 
        /*
index 9b6bf4d0ede31ee05950d95ef8fb17d26ab7a569..6c7661049c7be5925343effacd7141ebc13e4cc3 100644 (file)
@@ -34,8 +34,8 @@ generate_old_dump(void)
 
        /*
         * Set umask for this function, all functions it calls, and all
-        * subprocesses/threads it creates.      We can't use fopen_priv()
-        * as Windows uses threads and umask is process-global.
+        * subprocesses/threads it creates.  We can't use fopen_priv() as Windows
+        * uses threads and umask is process-global.
         */
        old_umask = umask(S_IRWXG | S_IRWXO);
 
index 91e66e6ecf465766ef88558754d3989384d4aeae..6c217c902d86cc97f111ee7ba0c32a4b5aa0f725 100644 (file)
@@ -52,7 +52,7 @@ exec_prog(const char *log_file, const char *opt_log_file,
        va_list         ap;
 
 #ifdef WIN32
-static DWORD       mainThreadId = 0;
+       static DWORD mainThreadId = 0;
 
        /* We assume we are called from the primary thread first */
        if (mainThreadId == 0)
@@ -73,14 +73,15 @@ static DWORD       mainThreadId = 0;
        pg_log(PG_VERBOSE, "%s\n", cmd);
 
 #ifdef WIN32
+
        /*
-        * For some reason, Windows issues a file-in-use error if we write data
-        * to the log file from a non-primary thread just before we create a
-        * subprocess that also writes to the same log file.  One fix is to
-        * sleep for 100ms.  A cleaner fix is to write to the log file _after_
-        * the subprocess has completed, so we do this only when writing from
-        * a non-primary thread.  fflush(), running system() twice, and
-        * pre-creating the file do not see to help.
+        * For some reason, Windows issues a file-in-use error if we write data to
+        * the log file from a non-primary thread just before we create a
+        * subprocess that also writes to the same log file.  One fix is to sleep
+        * for 100ms.  A cleaner fix is to write to the log file _after_ the
+        * subprocess has completed, so we do this only when writing from a
+        * non-primary thread.  fflush(), running system() twice, and pre-creating
+        * the file do not see to help.
         */
        if (mainThreadId != GetCurrentThreadId())
                result = system(cmd);
@@ -101,7 +102,7 @@ static DWORD       mainThreadId = 0;
 
                for (iter = 0; iter < 4 && log == NULL; iter++)
                {
-                       pg_usleep(1000000);             /* 1 sec */
+                       pg_usleep(1000000); /* 1 sec */
                        log = fopen(log_file, "a");
                }
        }
@@ -154,11 +155,12 @@ static DWORD       mainThreadId = 0;
        }
 
 #ifndef WIN32
+
        /*
         * We can't do this on Windows because it will keep the "pg_ctl start"
         * output filename open until the server stops, so we do the \n\n above on
         * that platform.  We use a unique filename for "pg_ctl start" that is
-        * never reused while the server is running, so it works fine.  We could
+        * never reused while the server is running, so it works fine.  We could
         * log these commands to a third file, but that just adds complexity.
         */
        if ((log = fopen(log_file, "a")) == NULL)
@@ -189,7 +191,7 @@ pid_lock_file_exists(const char *datadir)
                /* ENOTDIR means we will throw a more useful error later */
                if (errno != ENOENT && errno != ENOTDIR)
                        pg_fatal("could not open file \"%s\" for reading: %s\n",
-                                  path, getErrorText(errno));
+                                        path, getErrorText(errno));
 
                return false;
        }
@@ -238,7 +240,7 @@ win32_check_directory_write_permissions(void)
        int                     fd;
 
        /*
-        * We open a file we would normally create anyway.      We do this even in
+        * We open a file we would normally create anyway.  We do this even in
         * 'check' mode, which isn't ideal, but this is the best we can do.
         */
        if ((fd = open(GLOBALS_DUMP_FILE, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR)) < 0)
@@ -255,7 +257,7 @@ win32_check_directory_write_permissions(void)
  *
  *     This function validates the given cluster directory - we search for a
  *     small set of subdirectories that we expect to find in a valid $PGDATA
- *     directory.      If any of the subdirectories are missing (or secured against
+ *     directory.  If any of the subdirectories are missing (or secured against
  *     us) we display an error message and exit()
  *
  */
@@ -295,7 +297,7 @@ check_data_dir(const char *pg_data)
  * check_bin_dir()
  *
  *     This function searches for the executables that we expect to find
- *     in the binaries directory.      If we find that a required executable
+ *     in the binaries directory.  If we find that a required executable
  *     is missing (or secured against us), we display an error message and
  *     exit().
  */
@@ -349,10 +351,10 @@ validate_exec(const char *dir, const char *cmdName)
         */
        if (stat(path, &buf) < 0)
                pg_fatal("check for \"%s\" failed: %s\n",
-                          path, getErrorText(errno));
+                                path, getErrorText(errno));
        else if (!S_ISREG(buf.st_mode))
                pg_fatal("check for \"%s\" failed: not an executable file\n",
-                          path);
+                                path);
 
        /*
         * Ensure that the file is both executable and readable (required for
@@ -364,7 +366,7 @@ validate_exec(const char *dir, const char *cmdName)
        if ((buf.st_mode & S_IRUSR) == 0)
 #endif
                pg_fatal("check for \"%s\" failed: cannot read file (permission denied)\n",
-                          path);
+                                path);
 
 #ifndef WIN32
        if (access(path, X_OK) != 0)
@@ -372,5 +374,5 @@ validate_exec(const char *dir, const char *cmdName)
        if ((buf.st_mode & S_IXUSR) == 0)
 #endif
                pg_fatal("check for \"%s\" failed: cannot execute (permission denied)\n",
-                          path);
+                                path);
 }
index 04d948cfd7c4b29f5f2b37f1d9868e0f262ee386..ab9d1edcb6d86f26efb9d018ce8848403fd3e8a6 100644 (file)
@@ -214,8 +214,8 @@ check_hard_link(void)
        if (pg_link_file(existing_file, new_link_file) == -1)
        {
                pg_fatal("Could not create hard link between old and new data directories: %s\n"
-                          "In link mode the old and new data directories must be on the same file system volume.\n",
-                          getErrorText(errno));
+                                "In link mode the old and new data directories must be on the same file system volume.\n",
+                                getErrorText(errno));
        }
        unlink(new_link_file);
 }
index 5300fd603553ba6db3e73f6854d5b141fa1c461c..f2cd4716c72958bf3f28a2385bc4bb11a81cd977 100644 (file)
@@ -296,7 +296,7 @@ check_loadable_libraries(void)
                 * plpython2u language was created with library name plpython2.so as a
                 * symbolic link to plpython.so.  In Postgres 9.1, only the
                 * plpython2.so library was created, and both plpythonu and plpython2u
-                * pointing to it.      For this reason, any reference to library name
+                * pointing to it.  For this reason, any reference to library name
                 * "plpython" in an old PG <= 9.1 cluster must look for "plpython2" in
                 * the new cluster.
                 *
@@ -327,7 +327,7 @@ check_loadable_libraries(void)
 
                        if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
                                pg_fatal("Could not open file \"%s\": %s\n",
-                                          output_path, getErrorText(errno));
+                                                output_path, getErrorText(errno));
                        fprintf(script, "Could not load library \"%s\"\n%s\n",
                                        lib,
                                        PQerrorMessage(conn));
@@ -343,10 +343,10 @@ check_loadable_libraries(void)
                fclose(script);
                pg_log(PG_REPORT, "fatal\n");
                pg_fatal("Your installation references loadable libraries that are missing from the\n"
-                          "new installation.  You can add these libraries to the new installation,\n"
-                          "or remove the functions using them from the old installation.  A list of\n"
-                          "problem libraries is in the file:\n"
-                          "    %s\n\n", output_path);
+                                "new installation.  You can add these libraries to the new installation,\n"
+                                "or remove the functions using them from the old installation.  A list of\n"
+                                "problem libraries is in the file:\n"
+                                "    %s\n\n", output_path);
        }
        else
                check_ok();
index fd083de821f1c4bdef611a1fc3fd9f197a65cae6..d2968b479a930673fa61093970ddccf9a1e86483 100644 (file)
@@ -52,7 +52,7 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
 
                if (old_rel->reloid != new_rel->reloid)
                        pg_fatal("Mismatch of relation OID in database \"%s\": old OID %d, new OID %d\n",
-                                  old_db->db_name, old_rel->reloid, new_rel->reloid);
+                                        old_db->db_name, old_rel->reloid, new_rel->reloid);
 
                /*
                 * TOAST table names initially match the heap pg_class oid. In
@@ -69,9 +69,9 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
                          strcmp(old_rel->nspname, "pg_toast") != 0) &&
                         strcmp(old_rel->relname, new_rel->relname) != 0))
                        pg_fatal("Mismatch of relation names in database \"%s\": "
-                                  "old name \"%s.%s\", new name \"%s.%s\"\n",
-                                  old_db->db_name, old_rel->nspname, old_rel->relname,
-                                  new_rel->nspname, new_rel->relname);
+                                        "old name \"%s.%s\", new name \"%s.%s\"\n",
+                                        old_db->db_name, old_rel->nspname, old_rel->relname,
+                                        new_rel->nspname, new_rel->relname);
 
                create_rel_filename_map(old_pgdata, new_pgdata, old_db, new_db,
                                                                old_rel, new_rel, maps + num_maps);
@@ -84,7 +84,7 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db,
         */
        if (old_db->rel_arr.nrels != new_db->rel_arr.nrels)
                pg_fatal("old and new databases \"%s\" have a different number of relations\n",
-                          old_db->db_name);
+                                old_db->db_name);
 
        *nmaps = num_maps;
        return maps;
@@ -270,7 +270,8 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
                                i_relfilenode,
                                i_reltablespace;
        char            query[QUERY_ALLOC];
-       char       *last_namespace = NULL, *last_tablespace = NULL;
+       char       *last_namespace = NULL,
+                          *last_tablespace = NULL;
 
        /*
         * pg_largeobject contains user data that does not appear in pg_dumpall
@@ -322,7 +323,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
                                                          "SELECT reltoastrelid "
                                                          "FROM info_rels i JOIN pg_catalog.pg_class c "
                                                          "             ON i.reloid = c.oid "
-                                                         "             AND c.reltoastrelid != %u", InvalidOid));
+                                                 "             AND c.reltoastrelid != %u", InvalidOid));
        PQclear(executeQueryOrDie(conn,
                                                          "INSERT INTO info_rels "
                                                          "SELECT indexrelid "
@@ -373,9 +374,9 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo)
                curr->nsp_alloc = false;
 
                /*
-                * Many of the namespace and tablespace strings are identical,
-                * so we try to reuse the allocated string pointers where possible
-                * to reduce memory consumption.
+                * Many of the namespace and tablespace strings are identical, so we
+                * try to reuse the allocated string pointers where possible to reduce
+                * memory consumption.
                 */
                /* Can we reuse the previous string allocation? */
                if (last_namespace && strcmp(nspname, last_namespace) == 0)
index acec083c0f0d565d0ce54c0fe65d8349904115ea..b81010a813083b199b88162f2713b7c927aa0b8e 100644 (file)
@@ -188,7 +188,7 @@ parseCommandLine(int argc, char *argv[])
 
                        default:
                                pg_fatal("Try \"%s --help\" for more information.\n",
-                                          os_info.progname);
+                                                os_info.progname);
                                break;
                }
        }
@@ -211,8 +211,9 @@ parseCommandLine(int argc, char *argv[])
        /* Turn off read-only mode;  add prefix to PGOPTIONS? */
        if (getenv("PGOPTIONS"))
        {
-               char *pgoptions = psprintf("%s %s", FIX_DEFAULT_READ_ONLY,
-                                                                       getenv("PGOPTIONS"));
+               char       *pgoptions = psprintf("%s %s", FIX_DEFAULT_READ_ONLY,
+                                                                                getenv("PGOPTIONS"));
+
                pg_putenv("PGOPTIONS", pgoptions);
                pfree(pgoptions);
        }
@@ -319,8 +320,8 @@ check_required_directory(char **dirpath, char **configpath,
                }
                else
                        pg_fatal("You must identify the directory where the %s.\n"
-                                  "Please use the %s command-line option or the %s environment variable.\n",
-                                  description, cmdLineOption, envVarName);
+                                        "Please use the %s command-line option or the %s environment variable.\n",
+                                        description, cmdLineOption, envVarName);
        }
 
        /*
@@ -373,7 +374,7 @@ adjust_data_dir(ClusterInfo *cluster)
 
        /*
         * We don't have a data directory yet, so we can't check the PG version,
-        * so this might fail --- only works for PG 9.2+.       If this fails,
+        * so this might fail --- only works for PG 9.2+.   If this fails,
         * pg_upgrade will fail anyway because the data files will not be found.
         */
        snprintf(cmd, sizeof(cmd), "\"%s/postmaster\" -D \"%s\" -C data_directory",
@@ -382,7 +383,7 @@ adjust_data_dir(ClusterInfo *cluster)
        if ((output = popen(cmd, "r")) == NULL ||
                fgets(cmd_output, sizeof(cmd_output), output) == NULL)
                pg_fatal("Could not get data directory using %s: %s\n",
-                          cmd, getErrorText(errno));
+                                cmd, getErrorText(errno));
 
        pclose(output);
 
index 9f88818c475bfdca5470a251c407d7cd00d01a1f..6354cec2b0f0b22a3dfb4f0e868d96e6e74b35bf 100644 (file)
@@ -30,7 +30,7 @@ static pageCnvCtx *loadConverterPlugin(
  *     the PageLayoutVersion of the new cluster.  If the versions differ, this
  *     function loads a converter plugin and returns a pointer to a pageCnvCtx
  *     object (in *result) that knows how to convert pages from the old format
- *     to the new format.      If the versions are identical, this function just
+ *     to the new format.  If the versions are identical, this function just
  *     returns a NULL pageCnvCtx pointer to indicate that page-by-page conversion
  *     is not required.
  */
@@ -110,7 +110,7 @@ getPageVersion(uint16 *version, const char *pathName)
  *     This function loads a page-converter plugin library and grabs a
  *     pointer to each of the (interesting) functions provided by that
  *     plugin.  The name of the plugin library is derived from the given
- *     newPageVersion and oldPageVersion.      If a plugin is found, this
+ *     newPageVersion and oldPageVersion.  If a plugin is found, this
  *     function returns a pointer to a pageCnvCtx object (which will contain
  *     a collection of plugin function pointers). If the required plugin
  *     is not found, this function returns NULL.
index f4201e112145e69fb11a020a9bfa304120449be1..5d2565d44100e5b7d75fe8ce1a9d73704a0d4f23 100644 (file)
@@ -339,10 +339,10 @@ reap_child(bool wait_for_child)
                thread_handles[thread_num] = thread_handles[parallel_jobs - 1];
 
                /*
-                * Move last active thead arg struct into the now-dead slot,
-                * and the now-dead slot to the end for reuse by the next thread.
-                * Though the thread struct is in use by another thread, we can
-                * safely swap the struct pointers within the array.
+                * Move last active thead arg struct into the now-dead slot, and the
+                * now-dead slot to the end for reuse by the next thread. Though the
+                * thread struct is in use by another thread, we can safely swap the
+                * struct pointers within the array.
                 */
                tmp_args = cur_thread_args[thread_num];
                cur_thread_args[thread_num] = cur_thread_args[parallel_jobs - 1];
index 5e21028506029bd42a4577a675e634f323e7688b..773bb07e04eb9c2a3384ff68fc9398fe5cbd7313 100644 (file)
@@ -125,7 +125,7 @@ main(int argc, char **argv)
 
        /*
         * Most failures happen in create_new_objects(), which has completed at
-        * this point.  We do this here because it is just before linking, which
+        * this point.  We do this here because it is just before linking, which
         * will link the old and new cluster data files, preventing the old
         * cluster from being safely started once the new cluster is started.
         */
@@ -193,7 +193,7 @@ setup(char *argv0, bool *live_check)
        {
                /*
                 * If we have a postmaster.pid file, try to start the server.  If it
-                * starts, the pid file was stale, so stop the server.  If it doesn't
+                * starts, the pid file was stale, so stop the server.  If it doesn't
                 * start, assume the server is running.  If the pid file is left over
                 * from a server crash, this also allows any committed transactions
                 * stored in the WAL to be replayed so they are not lost, because WAL
@@ -205,7 +205,7 @@ setup(char *argv0, bool *live_check)
                {
                        if (!user_opts.check)
                                pg_fatal("There seems to be a postmaster servicing the old cluster.\n"
-                                          "Please shutdown that postmaster and try again.\n");
+                                                "Please shutdown that postmaster and try again.\n");
                        else
                                *live_check = true;
                }
@@ -218,7 +218,7 @@ setup(char *argv0, bool *live_check)
                        stop_postmaster(false);
                else
                        pg_fatal("There seems to be a postmaster servicing the new cluster.\n"
-                                  "Please shutdown that postmaster and try again.\n");
+                                        "Please shutdown that postmaster and try again.\n");
        }
 
        /* get path to pg_upgrade executable */
@@ -279,8 +279,8 @@ prepare_new_databases(void)
 
        /*
         * Install support functions in the global-object restore database to
-        * preserve pg_authid.oid.      pg_dumpall uses 'template0' as its template
-        * database so objects we add into 'template1' are not propogated.      They
+        * preserve pg_authid.oid.  pg_dumpall uses 'template0' as its template
+        * database so objects we add into 'template1' are not propogated.  They
         * are removed on pg_upgrade exit.
         */
        install_support_functions_in_new_db("template1");
index 14039b4d3dea11f1797fca65c87ce7222e0ad2c4..33be33d64062eb5833bb4fb6ca67781be07894d7 100644 (file)
@@ -142,10 +142,10 @@ typedef struct
  */
 typedef struct
 {
-       const char              *old_tablespace;
-       const char              *new_tablespace;
-       const char              *old_tablespace_suffix;
-       const char              *new_tablespace_suffix;
+       const char *old_tablespace;
+       const char *new_tablespace;
+       const char *old_tablespace_suffix;
+       const char *new_tablespace_suffix;
        Oid                     old_db_oid;
        Oid                     new_db_oid;
 
@@ -167,7 +167,8 @@ typedef struct
 {
        Oid                     db_oid;                 /* oid of the database */
        char       *db_name;            /* database name */
-       char            db_tablespace[MAXPGPATH]; /* database default tablespace path */
+       char            db_tablespace[MAXPGPATH];               /* database default tablespace
+                                                                                                * path */
        RelInfoArr      rel_arr;                /* array of all user relinfos */
 } DbInfo;
 
@@ -454,7 +455,7 @@ pg_log(eLogType type, const char *fmt,...)
 __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
 void
 pg_fatal(const char *fmt,...)
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2),noreturn));
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2), noreturn));
 void           end_progress_output(void);
 void
 prep_status(const char *fmt,...)
index dccc86d25a2fc0238bf50d4e9bef9dfdb81fd120..aa6aafde5e97d70ff1673599a56197fed388c038 100644 (file)
@@ -37,7 +37,7 @@ transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
        /*
         * Transfering files by tablespace is tricky because a single database can
         * use multiple tablespaces.  For non-parallel mode, we just pass a NULL
-        * tablespace path, which matches all tablespaces.      In parallel mode, we
+        * tablespace path, which matches all tablespaces.  In parallel mode, we
         * pass the default tablespace and all user-created tablespaces and let
         * those operations happen in parallel.
         */
@@ -108,7 +108,7 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
 
                if (new_dbnum >= new_db_arr->ndbs)
                        pg_fatal("old database \"%s\" not found in the new cluster\n",
-                                  old_db->db_name);
+                                        old_db->db_name);
 
                n_maps = 0;
                mappings = gen_db_file_maps(old_db, new_db, &n_maps, old_pgdata,
@@ -135,7 +135,7 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
 /*
  * get_pg_database_relfilenode()
  *
- *     Retrieves the relfilenode for a few system-catalog tables.      We need these
+ *     Retrieves the relfilenode for a few system-catalog tables.  We need these
  *     relfilenodes later in the upgrade process.
  */
 void
@@ -259,8 +259,8 @@ transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
                                        return;
                                else
                                        pg_fatal("error while checking for file existence \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
-                                                  map->nspname, map->relname, old_file, new_file,
-                                                  getErrorText(errno));
+                                                        map->nspname, map->relname, old_file, new_file,
+                                                        getErrorText(errno));
                        }
                        close(fd);
                }
@@ -272,7 +272,7 @@ transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
 
                if ((user_opts.transfer_mode == TRANSFER_MODE_LINK) && (pageConverter != NULL))
                        pg_fatal("This upgrade requires page-by-page conversion, "
-                                  "you must use copy mode instead of link mode.\n");
+                                        "you must use copy mode instead of link mode.\n");
 
                if (user_opts.transfer_mode == TRANSFER_MODE_COPY)
                {
@@ -280,7 +280,7 @@ transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
 
                        if ((msg = copyAndUpdateFile(pageConverter, old_file, new_file, true)) != NULL)
                                pg_fatal("error while copying relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
-                                          map->nspname, map->relname, old_file, new_file, msg);
+                                                map->nspname, map->relname, old_file, new_file, msg);
                }
                else
                {
@@ -288,7 +288,7 @@ transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
 
                        if ((msg = linkAndUpdateFile(pageConverter, old_file, new_file)) != NULL)
                                pg_fatal("error while creating link for relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
-                                          map->nspname, map->relname, old_file, new_file, msg);
+                                                map->nspname, map->relname, old_file, new_file, msg);
                }
        }
 
index 705510320ef7f6c776f554f7af015f689700f7e3..5f4b5307cbad79ef5e9a68109b2c271b1fe4329f 100644 (file)
@@ -240,28 +240,26 @@ start_postmaster(ClusterInfo *cluster, bool throw_error)
                return false;
 
        /*
-        * We set this here to make sure atexit() shuts down the server,
-        * but only if we started the server successfully.  We do it
-        * before checking for connectivity in case the server started but
-        * there is a connectivity failure.  If pg_ctl did not return success,
-        * we will exit below.
+        * We set this here to make sure atexit() shuts down the server, but only
+        * if we started the server successfully.  We do it before checking for
+        * connectivity in case the server started but there is a connectivity
+        * failure.  If pg_ctl did not return success, we will exit below.
         *
         * Pre-9.1 servers do not have PQping(), so we could be leaving the server
-        * running if authentication was misconfigured, so someday we might went to
-        * be more aggressive about doing server shutdowns even if pg_ctl fails,
-        * but now (2013-08-14) it seems prudent to be cautious.  We don't want to
-        * shutdown a server that might have been accidentally started during the
-        * upgrade.
+        * running if authentication was misconfigured, so someday we might went
+        * to be more aggressive about doing server shutdowns even if pg_ctl
+        * fails, but now (2013-08-14) it seems prudent to be cautious.  We don't
+        * want to shutdown a server that might have been accidentally started
+        * during the upgrade.
         */
        if (pg_ctl_return)
                os_info.running_cluster = cluster;
 
        /*
-        * pg_ctl -w might have failed because the server couldn't be started,
-        * or there might have been a connection problem in _checking_ if the
-        * server has started.  Therefore, even if pg_ctl failed, we continue
-        * and test for connectivity in case we get a connection reason for the
-        * failure.
+        * pg_ctl -w might have failed because the server couldn't be started, or
+        * there might have been a connection problem in _checking_ if the server
+        * has started.  Therefore, even if pg_ctl failed, we continue and test
+        * for connectivity in case we get a connection reason for the failure.
         */
        if ((conn = get_db_conn(cluster, "template1")) == NULL ||
                PQstatus(conn) != CONNECTION_OK)
@@ -271,18 +269,19 @@ start_postmaster(ClusterInfo *cluster, bool throw_error)
                if (conn)
                        PQfinish(conn);
                pg_fatal("could not connect to %s postmaster started with the command:\n"
-                          "%s\n",
-                          CLUSTER_NAME(cluster), cmd);
+                                "%s\n",
+                                CLUSTER_NAME(cluster), cmd);
        }
        PQfinish(conn);
 
        /*
         * If pg_ctl failed, and the connection didn't fail, and throw_error is
-        * enabled, fail now.  This could happen if the server was already running.
+        * enabled, fail now.  This could happen if the server was already
+        * running.
         */
        if (!pg_ctl_return)
                pg_fatal("pg_ctl failed to start the %s server, or connection failed\n",
-                          CLUSTER_NAME(cluster));
+                                CLUSTER_NAME(cluster));
 
        return true;
 }
@@ -340,7 +339,7 @@ check_pghost_envvar(void)
                                (strcmp(value, "localhost") != 0 && strcmp(value, "127.0.0.1") != 0 &&
                                 strcmp(value, "::1") != 0 && value[0] != '/'))
                                pg_fatal("libpq environment variable %s has a non-local server value: %s\n",
-                                          option->envvar, value);
+                                                option->envvar, value);
                }
        }
 
index 94bba087bb7f8ecb4647b9dbc69f56d9db94e06a..68e9cb241c75dbc0bb42a976733238d7e77de717 100644 (file)
@@ -28,7 +28,7 @@ init_tablespaces(void)
        if (os_info.num_old_tablespaces > 0 &&
        strcmp(old_cluster.tablespace_suffix, new_cluster.tablespace_suffix) == 0)
                pg_fatal("Cannot upgrade to/from the same system catalog version when\n"
-                          "using tablespaces.\n");
+                                "using tablespaces.\n");
 }
 
 
@@ -78,10 +78,9 @@ get_tablespace_paths(void)
                 * Effectively, this is checking only for tables/indexes in
                 * non-existent tablespace directories.  Databases located in
                 * non-existent tablespaces already throw a backend error.
-                * Non-existent tablespace directories can occur when a data
-                * directory that contains user tablespaces is moved as part
-                * of pg_upgrade preparation and the symbolic links are not
-                * updated.
+                * Non-existent tablespace directories can occur when a data directory
+                * that contains user tablespaces is moved as part of pg_upgrade
+                * preparation and the symbolic links are not updated.
                 */
                if (stat(os_info.old_tablespaces[tblnum], &statBuf) != 0)
                {
@@ -91,13 +90,13 @@ get_tablespace_paths(void)
                                                          os_info.old_tablespaces[tblnum]);
                        else
                                report_status(PG_FATAL,
-                                                         "cannot stat() tablespace directory \"%s\": %s\n",
-                                                         os_info.old_tablespaces[tblnum], getErrorText(errno));
+                                                  "cannot stat() tablespace directory \"%s\": %s\n",
+                                          os_info.old_tablespaces[tblnum], getErrorText(errno));
                }
                if (!S_ISDIR(statBuf.st_mode))
-                               report_status(PG_FATAL,
-                                                         "tablespace path \"%s\" is not a directory\n",
-                                                         os_info.old_tablespaces[tblnum]);
+                       report_status(PG_FATAL,
+                                                 "tablespace path \"%s\" is not a directory\n",
+                                                 os_info.old_tablespaces[tblnum]);
        }
 
        PQclear(res);
index 7f4458c0acef2c3b1f43695406af1b538b872368..3b94057696d0ad7cae3ead2521af10c39e25c2b1 100644 (file)
@@ -82,7 +82,7 @@ prep_status(const char *fmt,...)
 
 
 static
- __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 0)))
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 0)))
 void
 pg_log_v(eLogType type, const char *fmt, va_list ap)
 {
@@ -280,7 +280,7 @@ pg_putenv(const char *var, const char *val)
 
                /*
                 * Do not free envstr because it becomes part of the environment on
-                * some operating systems.      See port/unsetenv.c::unsetenv.
+                * some operating systems.  See port/unsetenv.c::unsetenv.
                 */
 #else
                SetEnvironmentVariableA(var, val);
index f58f74511f756d5ab2ee86d74765bf19bc744646..07e79bd609a5f613e93bf94d3c2d9a3e16c5cd6c 100644 (file)
@@ -98,10 +98,10 @@ old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster)
                pg_log(PG_REPORT, "fatal\n");
                pg_fatal("Your installation contains the \"name\" data type in user tables.  This\n"
                "data type changed its internal alignment between your old and new\n"
-                          "clusters so this cluster cannot currently be upgraded.  You can remove\n"
+                                "clusters so this cluster cannot currently be upgraded.  You can remove\n"
                "the problem tables and restart the upgrade.  A list of the problem\n"
-                          "columns is in the file:\n"
-                          "    %s\n\n", output_path);
+                                "columns is in the file:\n"
+                                "    %s\n\n", output_path);
        }
        else
                check_ok();
@@ -187,11 +187,11 @@ old_8_3_check_for_tsquery_usage(ClusterInfo *cluster)
        {
                pg_log(PG_REPORT, "fatal\n");
                pg_fatal("Your installation contains the \"tsquery\" data type.    This data type\n"
-                          "added a new internal field between your old and new clusters so this\n"
+                                "added a new internal field between your old and new clusters so this\n"
                "cluster cannot currently be upgraded.  You can remove the problem\n"
-                          "columns and restart the upgrade.  A list of the problem columns is in the\n"
-                          "file:\n"
-                          "    %s\n\n", output_path);
+                                "columns and restart the upgrade.  A list of the problem columns is in the\n"
+                                "file:\n"
+                                "    %s\n\n", output_path);
        }
        else
                check_ok();
@@ -242,7 +242,7 @@ old_8_3_check_ltree_usage(ClusterInfo *cluster)
                        found = true;
                        if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
                                pg_fatal("Could not open file \"%s\": %s\n",
-                                          output_path, getErrorText(errno));
+                                                output_path, getErrorText(errno));
                        if (!db_used)
                        {
                                fprintf(script, "Database: %s\n", active_db->db_name);
@@ -265,12 +265,12 @@ old_8_3_check_ltree_usage(ClusterInfo *cluster)
        {
                pg_log(PG_REPORT, "fatal\n");
                pg_fatal("Your installation contains the \"ltree\" data type.  This data type\n"
-                          "changed its internal storage format between your old and new clusters so this\n"
-                          "cluster cannot currently be upgraded.  You can manually upgrade databases\n"
-                          "that use \"contrib/ltree\" facilities and remove \"contrib/ltree\" from the old\n"
-                          "cluster and restart the upgrade.  A list of the problem functions is in the\n"
-                          "file:\n"
-                          "    %s\n\n", output_path);
+                                "changed its internal storage format between your old and new clusters so this\n"
+                                "cluster cannot currently be upgraded.  You can manually upgrade databases\n"
+                                "that use \"contrib/ltree\" facilities and remove \"contrib/ltree\" from the old\n"
+                                "cluster and restart the upgrade.  A list of the problem functions is in the\n"
+                                "file:\n"
+                                "    %s\n\n", output_path);
        }
        else
                check_ok();
index 9c9b68a7a9789419870c67c705d28aa85c627c16..6ca7012fd906bc80b4d2e1da7dc2537b8acefd83 100644 (file)
@@ -41,7 +41,7 @@ timestamptz_to_time_t(TimestampTz t)
 
 /*
  * Stopgap implementation of timestamptz_to_str that doesn't depend on backend
- * infrastructure.     This will work for timestamps that are within the range
+ * infrastructure.  This will work for timestamps that are within the range
  * of the platform time_t type.  (pg_time_t is compatible except for possibly
  * being wider.)
  *
index 31b5d7589aa3a25766b048fa871e1e7f267cb32b..824b8c393c9f601ccd7539b4347de9196c3546ff 100644 (file)
@@ -704,7 +704,7 @@ main(int argc, char **argv)
                                break;
                        else
                        {
-                               pg_usleep(1000000L);            /* 1 second */
+                               pg_usleep(1000000L);    /* 1 second */
                                continue;
                        }
                }
index 7c1e59e4c4a0d34158cce59e834ddecb8cb205ad..e399d9a68e271d6c3ee9ca289f3b412bf7e4d1d0 100644 (file)
@@ -162,9 +162,11 @@ bool               use_log;                        /* log transaction latencies to a file */
 bool           use_quiet;                      /* quiet logging onto stderr */
 int                    agg_interval;           /* log aggregates instead of individual
                                                                 * transactions */
-int                    progress = 0;       /* thread progress report every this seconds */
-int         progress_nclients = 0; /* number of clients for progress report */
-int                    progress_nthreads = 0; /* number of threads for progress report */
+int                    progress = 0;           /* thread progress report every this seconds */
+int                    progress_nclients = 0;          /* number of clients for progress
+                                                                                * report */
+int                    progress_nthreads = 0;          /* number of threads for progress
+                                                                                * report */
 bool           is_connect;                     /* establish connection for each transaction */
 bool           is_latencies;           /* report per-command latencies */
 int                    main_pid;                       /* main process id used in log filename */
@@ -201,7 +203,7 @@ typedef struct
        int                     listen;                 /* 0 indicates that an async query has been
                                                                 * sent */
        int                     sleeping;               /* 1 indicates that the client is napping */
-       bool        throttling;     /* whether nap is for throttling */
+       bool            throttling;             /* whether nap is for throttling */
        int64           until;                  /* napping until (usec) */
        Variable   *variables;          /* array of variable definitions */
        int                     nvariables;
@@ -227,9 +229,9 @@ typedef struct
        instr_time *exec_elapsed;       /* time spent executing cmds (per Command) */
        int                *exec_count;         /* number of cmd executions (per Command) */
        unsigned short random_state[3];         /* separate randomness for each thread */
-       int64       throttle_trigger;   /* previous/next throttling (us) */
-       int64       throttle_lag;               /* total transaction lag behind throttling */
-       int64       throttle_lag_max;   /* max transaction lag */
+       int64           throttle_trigger;               /* previous/next throttling (us) */
+       int64           throttle_lag;   /* total transaction lag behind throttling */
+       int64           throttle_lag_max;               /* max transaction lag */
 } TState;
 
 #define INVALID_THREAD         ((pthread_t) 0)
@@ -240,8 +242,8 @@ typedef struct
        int                     xacts;
        int64           latencies;
        int64           sqlats;
-       int64       throttle_lag;
-       int64       throttle_lag_max;
+       int64           throttle_lag;
+       int64           throttle_lag_max;
 } TResult;
 
 /*
@@ -343,20 +345,20 @@ usage(void)
                   "\nInitialization options:\n"
                   "  -i, --initialize         invokes initialization mode\n"
                   "  -F, --fillfactor=NUM     set fill factor\n"
-                  "  -n, --no-vacuum          do not run VACUUM after initialization\n"
-                  "  -q, --quiet              quiet logging (one message each 5 seconds)\n"
+               "  -n, --no-vacuum          do not run VACUUM after initialization\n"
+       "  -q, --quiet              quiet logging (one message each 5 seconds)\n"
                   "  -s, --scale=NUM          scaling factor\n"
                   "  --foreign-keys           create foreign key constraints between tables\n"
                   "  --index-tablespace=TABLESPACE\n"
-                  "                           create indexes in the specified tablespace\n"
-                  "  --tablespace=TABLESPACE  create tables in the specified tablespace\n"
+       "                           create indexes in the specified tablespace\n"
+        "  --tablespace=TABLESPACE  create tables in the specified tablespace\n"
                   "  --unlogged-tables        create tables as unlogged tables\n"
                   "\nBenchmarking options:\n"
                   "  -c, --client=NUM         number of concurrent database clients (default: 1)\n"
                   "  -C, --connect            establish new connection for each transaction\n"
                   "  -D, --define=VARNAME=VALUE\n"
-                  "                           define variable for use by custom script\n"
-                  "  -f, --file=FILENAME      read transaction script from FILENAME\n"
+         "                           define variable for use by custom script\n"
+                "  -f, --file=FILENAME      read transaction script from FILENAME\n"
                   "  -j, --jobs=NUM           number of threads (default: 1)\n"
                   "  -l, --log                write transaction times to log file\n"
                   "  -M, --protocol=simple|extended|prepared\n"
@@ -365,20 +367,20 @@ usage(void)
                   "  -N, --skip-some-updates  skip updates of pgbench_tellers and pgbench_branches\n"
                   "  -P, --progress=NUM       show thread progress report every NUM seconds\n"
                   "  -r, --report-latencies   report average latency per command\n"
-                  "  -R, --rate=NUM           target rate in transactions per second\n"
+               "  -R, --rate=NUM