* For the raison d'etre of this file, check the comment above the definition
* of the PGAC_C_INLINE macro in config/c-compiler.m4.
*/
-static inline int fun () { return 0; }
+static inline int
+fun()
+{
+ return 0;
+}
DefineCustomBoolVariable("auto_explain.log_triggers",
"Include trigger statistics in plans.",
- "This has no effect unless log_analyze is also set.",
+ "This has no effect unless log_analyze is also set.",
&auto_explain_log_triggers,
false,
PGC_SUSET,
/*
* INTERVALSIZE should be the actual size-on-disk of an Interval, as shown
- * in pg_type. This might be less than sizeof(Interval) if the compiler
+ * in pg_type. This might be less than sizeof(Interval) if the compiler
* insists on adding alignment padding at the end of the struct.
*/
#define INTERVALSIZE 16
** Auxiliary funxtions
*/
static double distance_1D(double a1, double a2, double b1, double b2);
-static bool cube_is_point_internal(NDBOX *cube);
+static bool cube_is_point_internal(NDBOX *cube);
/*****************************************************************************
rt_cube_size(datum_r, &size_r);
/*
- * Now split up the regions between the two seeds. An important property
+ * Now split up the regions between the two seeds. An important property
* of this split algorithm is that the split vector v has the indices of
* items to be split in order in its left and right vectors. We exploit
* this property by doing a merge in the code that actually splits the
{
/*
* If we've already decided where to place this item, just put it on
- * the right list. Otherwise, we need to figure out which page needs
+ * the right list. Otherwise, we need to figure out which page needs
* the least enlargement in order to store the item.
*/
SET_VARSIZE(result, size);
SET_DIM(result, dim);
- /* First compute the union of the dimensions present in both args */
+ /* First compute the union of the dimensions present in both args */
for (i = 0; i < DIM(b); i++)
{
result->x[i] = Min(
- Min(LL_COORD(a, i), UR_COORD(a, i)),
- Min(LL_COORD(b, i), UR_COORD(b, i))
- );
+ Min(LL_COORD(a, i), UR_COORD(a, i)),
+ Min(LL_COORD(b, i), UR_COORD(b, i))
+ );
result->x[i + DIM(a)] = Max(
- Max(LL_COORD(a, i), UR_COORD(a, i)),
- Max(LL_COORD(b, i), UR_COORD(b, i))
- );
+ Max(LL_COORD(a, i), UR_COORD(a, i)),
+ Max(LL_COORD(b, i), UR_COORD(b, i))
+ );
}
/* continue on the higher dimensions only present in 'a' */
for (; i < DIM(a); i++)
{
result->x[i] = Min(0,
- Min(LL_COORD(a, i), UR_COORD(a, i))
- );
+ Min(LL_COORD(a, i), UR_COORD(a, i))
+ );
result->x[i + dim] = Max(0,
- Max(LL_COORD(a, i), UR_COORD(a, i))
- );
+ Max(LL_COORD(a, i), UR_COORD(a, i))
+ );
}
/*
if (DIM(a) < DIM(b))
{
NDBOX *tmp = b;
+
b = a;
a = tmp;
swapped = true;
SET_VARSIZE(result, size);
SET_DIM(result, dim);
- /* First compute intersection of the dimensions present in both args */
+ /* First compute intersection of the dimensions present in both args */
for (i = 0; i < DIM(b); i++)
{
result->x[i] = Max(
- Min(LL_COORD(a, i), UR_COORD(a, i)),
- Min(LL_COORD(b, i), UR_COORD(b, i))
- );
+ Min(LL_COORD(a, i), UR_COORD(a, i)),
+ Min(LL_COORD(b, i), UR_COORD(b, i))
+ );
result->x[i + DIM(a)] = Min(
- Max(LL_COORD(a, i), UR_COORD(a, i)),
- Max(LL_COORD(b, i), UR_COORD(b, i))
- );
+ Max(LL_COORD(a, i), UR_COORD(a, i)),
+ Max(LL_COORD(b, i), UR_COORD(b, i))
+ );
}
/* continue on the higher dimemsions only present in 'a' */
for (; i < DIM(a); i++)
{
result->x[i] = Max(0,
- Min(LL_COORD(a, i), UR_COORD(a, i))
- );
+ Min(LL_COORD(a, i), UR_COORD(a, i))
+ );
result->x[i + DIM(a)] = Min(0,
- Max(LL_COORD(a, i), UR_COORD(a, i))
- );
+ Max(LL_COORD(a, i), UR_COORD(a, i))
+ );
}
/*
/* compute within the dimensions of (b) */
for (i = 0; i < DIM(b); i++)
{
- d = distance_1D(LL_COORD(a,i), UR_COORD(a,i), LL_COORD(b,i), UR_COORD(b,i));
+ d = distance_1D(LL_COORD(a, i), UR_COORD(a, i), LL_COORD(b, i), UR_COORD(b, i));
distance += d * d;
}
/* compute distance to zero for those dimensions in (a) absent in (b) */
for (i = DIM(b); i < DIM(a); i++)
{
- d = distance_1D(LL_COORD(a,i), UR_COORD(a,i), 0.0, 0.0);
+ d = distance_1D(LL_COORD(a, i), UR_COORD(a, i), 0.0, 0.0);
distance += d * d;
}
return true;
/*
- * Even if the point-flag is not set, all the lower-left coordinates
- * might match the upper-right coordinates, so that the value is in
- * fact a point. Such values don't arise with current code - the point
- * flag is always set if appropriate - but they might be present on-disk
- * in clusters upgraded from pre-9.4 versions.
+ * Even if the point-flag is not set, all the lower-left coordinates might
+ * match the upper-right coordinates, so that the value is in fact a
+ * point. Such values don't arise with current code - the point flag is
+ * always set if appropriate - but they might be present on-disk in
+ * clusters upgraded from pre-9.4 versions.
*/
for (i = 0; i < DIM(cube); i++)
{
{
NDBOX *c = PG_GETARG_NDBOX(0);
int dim = DIM(c);
+
PG_FREE_IF_COPY(c, 0);
PG_RETURN_INT32(dim);
}
double result;
if (DIM(c) >= n && n > 0)
- result = Min(LL_COORD(c, n-1), UR_COORD(c, n-1));
+ result = Min(LL_COORD(c, n - 1), UR_COORD(c, n - 1));
else
result = 0;
double result;
if (DIM(c) >= n && n > 0)
- result = Max(LL_COORD(c, n-1), UR_COORD(c, n-1));
+ result = Max(LL_COORD(c, n - 1), UR_COORD(c, n - 1));
else
result = 0;
for (i = 0, j = dim; i < DIM(a); i++, j++)
{
- if (LL_COORD(a,i) >= UR_COORD(a,i))
+ if (LL_COORD(a, i) >= UR_COORD(a, i))
{
- result->x[i] = UR_COORD(a,i) - r;
- result->x[j] = LL_COORD(a,i) + r;
+ result->x[i] = UR_COORD(a, i) - r;
+ result->x[j] = LL_COORD(a, i) + r;
}
else
{
- result->x[i] = LL_COORD(a,i) - r;
- result->x[j] = UR_COORD(a,i) + r;
+ result->x[i] = LL_COORD(a, i) - r;
+ result->x[j] = UR_COORD(a, i) + r;
}
if (result->x[i] > result->x[j])
{
result->x[DIM(result) + i] = cube->x[DIM(cube) + i];
}
result->x[DIM(result) - 1] = x;
- result->x[2*DIM(result) - 1] = x;
+ result->x[2 * DIM(result) - 1] = x;
}
PG_FREE_IF_COPY(cube, 0);
int size;
int i;
- if (IS_POINT(cube) && (x1 == x2)){
+ if (IS_POINT(cube) && (x1 == x2))
+ {
size = POINT_SIZE((DIM(cube) + 1));
result = (NDBOX *) palloc0(size);
SET_VARSIZE(result, size);
*
* Following information is stored:
*
- * bits 0-7 : number of cube dimensions;
- * bits 8-30 : unused, initialize to zero;
- * bit 31 : point flag. If set, the upper right coordinates are not
+ * bits 0-7 : number of cube dimensions;
+ * bits 8-30 : unused, initialize to zero;
+ * bit 31 : point flag. If set, the upper right coordinates are not
* stored, and are implicitly the same as the lower left
* coordinates.
*----------
} NDBOX;
#define POINT_BIT 0x80000000
-#define DIM_MASK 0x7fffffff
+#define DIM_MASK 0x7fffffff
#define IS_POINT(cube) ( ((cube)->header & POINT_BIT) != 0 )
-#define SET_POINT_BIT(cube) ( (cube)->header |= POINT_BIT )
+#define SET_POINT_BIT(cube) ( (cube)->header |= POINT_BIT )
#define DIM(cube) ( (cube)->header & DIM_MASK )
-#define SET_DIM(cube, _dim) ( (cube)->header = ((cube)->header & ~DIM_MASK) | (_dim) )
+#define SET_DIM(cube, _dim) ( (cube)->header = ((cube)->header & ~DIM_MASK) | (_dim) )
#define LL_COORD(cube, i) ( (cube)->x[i] )
#define UR_COORD(cube, i) ( IS_POINT(cube) ? (cube)->x[i] : (cube)->x[(i) + DIM(cube)] )
* Build sql statement to look up tuple of interest, ie, the one matching
* src_pkattvals. We used to use "SELECT *" here, but it's simpler to
* generate a result tuple that matches the table's physical structure,
- * with NULLs for any dropped columns. Otherwise we have to deal with two
+ * with NULLs for any dropped columns. Otherwise we have to deal with two
* different tupdescs and everything's very confusing.
*/
appendStringInfoString(&buf, "SELECT ");
}
/*
- * For non-superusers, insist that the connstr specify a password. This
+ * For non-superusers, insist that the connstr specify a password. This
* prevents a password from being picked up from .pgpass, a service file,
* the environment, etc. We don't want the postgres user's passwords
* to be accessible to non-superusers.
* distance between the points in miles on earth's surface
*
* If float8 is passed-by-value, the oldstyle version-0 calling convention
- * is unportable, so we use version-1. However, if it's passed-by-reference,
+ * is unportable, so we use version-1. However, if it's passed-by-reference,
* continue to use oldstyle. This is just because we'd like earthdistance
* to serve as a canary for any unintentional breakage of version-0 functions
* with float8 results.
{"encoding", ForeignTableRelationId},
{"force_not_null", AttributeRelationId},
{"force_null", AttributeRelationId},
+
/*
* force_quote is not supported by file_fdw because it's for COPY TO.
*/
errmsg("conflicting or redundant options")));
filename = defGetString(def);
}
+
/*
* force_not_null is a boolean option; after validation we can discard
* it - it will be retrieved later in get_file_fdw_attribute_options()
List *fnncolumns = NIL;
List *fncolumns = NIL;
- List *options = NIL;
+ List *options = NIL;
rel = heap_open(relid, AccessShareLock);
tupleDesc = RelationGetDescr(rel);
heap_close(rel, AccessShareLock);
- /* Return DefElem only when some column(s) have force_not_null / force_null options set */
+ /*
+ * Return DefElem only when some column(s) have force_not_null /
+ * force_null options set
+ */
if (fnncolumns != NIL)
options = lappend(options, makeDefElem("force_not_null", (Node *) fnncolumns));
if (fncolumns != NIL)
- options = lappend(options,makeDefElem("force_null", (Node *) fncolumns));
+ options = lappend(options, makeDefElem("force_null", (Node *) fncolumns));
return options;
}
&startup_cost, &total_cost);
/*
- * Create a ForeignPath node and add it as only possible path. We use the
+ * Create a ForeignPath node and add it as only possible path. We use the
* fdw_private list of the path to carry the convert_selectively option;
* it will be propagated into the fdw_private list of the Plan node.
*/
* planner's idea of the relation width; which is bogus if not all
* columns are being read, not to mention that the text representation
* of a row probably isn't the same size as its internal
- * representation. Possibly we could do something better, but the
+ * representation. Possibly we could do something better, but the
* real answer to anyone who complains is "ANALYZE" ...
*/
int tuple_width;
* which must have at least targrows entries.
* The actual number of rows selected is returned as the function result.
* We also count the total number of rows in the file and return it into
- * *totalrows. Note that *totaldeadrows is always set to 0.
+ * *totalrows. Note that *totaldeadrows is always set to 0.
*
* Note that the returned list of rows is not always in order by physical
* position in the file. Therefore, correlation estimates derived later
* array.
*
* If max_d >= 0, we only need to provide an accurate answer when that answer
- * is less than or equal to the bound. From any cell in the matrix, there is
+ * is less than or equal to the bound. From any cell in the matrix, there is
* theoretical "minimum residual distance" from that cell to the last column
* of the final row. This minimum residual distance is zero when the
* untransformed portions of the strings are of equal length (because we might
stop_column = m + 1;
/*
- * If max_d >= 0, determine whether the bound is impossibly tight. If so,
+ * If max_d >= 0, determine whether the bound is impossibly tight. If so,
* return max_d + 1 immediately. Otherwise, determine whether it's tight
* enough to limit the computation we must perform. If so, figure out
* initial stop column.
* need to fill in. If the string is growing, the theoretical
* minimum distance already incorporates the cost of deleting the
* number of characters necessary to make the two strings equal in
- * length. Each additional deletion forces another insertion, so
+ * length. Each additional deletion forces another insertion, so
* the best-case total cost increases by ins_c + del_c. If the
* string is shrinking, the minimum theoretical cost assumes no
* excess deletions; that is, we're starting no further right than
/*
* The main loop fills in curr, but curr[0] needs a special case: to
* transform the first 0 characters of s into the first j characters
- * of t, we must perform j insertions. However, if start_column > 0,
+ * of t, we must perform j insertions. However, if start_column > 0,
* this special case does not apply.
*/
if (start_column == 0)
* HEntry: there is one of these for each key _and_ value in an hstore
*
* the position offset points to the _end_ so that we can get the length
- * by subtraction from the previous entry. the ISFIRST flag lets us tell
+ * by subtraction from the previous entry. the ISFIRST flag lets us tell
* whether there is a previous entry.
*/
typedef struct
/*
* When using a GIN index for hstore, we choose to index both keys and values.
* The storage format is "text" values, with K, V, or N prepended to the string
- * to indicate key, value, or null values. (As of 9.1 it might be better to
+ * to indicate key, value, or null values. (As of 9.1 it might be better to
* store null values as nulls, but we'll keep it this way for on-disk
* compatibility.)
*/
{
/*
* Index doesn't have information about correspondence of keys and
- * values, so we need recheck. However, if not all the keys are
+ * values, so we need recheck. However, if not all the keys are
* present, we can fail at once.
*/
*recheck = true;
dst;
if (count == 0)
- PG_RETURN_TEXT_P(cstring_to_text_with_len("{}",2));
+ PG_RETURN_TEXT_P(cstring_to_text_with_len("{}", 2));
initStringInfo(&tmp);
initStringInfo(&dst);
dst;
if (count == 0)
- PG_RETURN_TEXT_P(cstring_to_text_with_len("{}",2));
+ PG_RETURN_TEXT_P(cstring_to_text_with_len("{}", 2));
initStringInfo(&tmp);
initStringInfo(&dst);
for (i = 0; i < count; i++)
{
- JsonbValue key, val;
+ JsonbValue key,
+ val;
key.estSize = sizeof(JEntry);
key.type = jbvString;
JsonbParseState *state = NULL;
JsonbValue *res;
StringInfoData tmp;
- bool is_number;
+ bool is_number;
initStringInfo(&tmp);
for (i = 0; i < count; i++)
{
- JsonbValue key, val;
+ JsonbValue key,
+ val;
key.estSize = sizeof(JEntry);
key.type = jbvString;
{
val.type = jbvNumeric;
val.val.numeric = DatumGetNumeric(
- DirectFunctionCall3(numeric_in, CStringGetDatum(tmp.data), 0, -1));
+ DirectFunctionCall3(numeric_in, CStringGetDatum(tmp.data), 0, -1));
+
val.estSize += VARSIZE_ANY(val.val.numeric) +sizeof(JEntry);
}
else
return FALSE;
/*
- * Set up data for checkcondition_gin. This must agree with the query
+ * Set up data for checkcondition_gin. This must agree with the query
* extraction code in ginint4_queryextract.
*/
gcv.first = items;
qsort((void *) costvector, maxoff, sizeof(SPLITCOST), comparecost);
/*
- * Now split up the regions between the two seeds. An important property
+ * Now split up the regions between the two seeds. An important property
* of this split algorithm is that the split vector v has the indices of
* items to be split in order in its left and right vectors. We exploit
* this property by doing a merge in the code that actually splits the
/*
* If we've already decided where to place this item, just put it on
- * the right list. Otherwise, we need to figure out which page needs
+ * the right list. Otherwise, we need to figure out which page needs
* the least enlargement in order to store the item.
*/
*size = (float) ARRNELEMS(a);
}
-/* Sort the given data (len >= 2). Return true if any duplicates found */
+/* Sort the given data (len >= 2). Return true if any duplicates found */
bool
isort(int32 *a, int len)
{
bool r = FALSE;
/*
- * We use a simple insertion sort. While this is O(N^2) in the worst
+ * We use a simple insertion sort. While this is O(N^2) in the worst
* case, it's quite fast if the input is already sorted or nearly so.
* Also, for not-too-large inputs it's faster than more complex methods
* anyhow.
$outf = ($opt{u}) ? 'distinct( message.mid )' : 'message.mid';
}
my $sql =
- "select $outf from "
+ "select $outf from "
. join(', ', keys %table)
. " where "
. join(' AND ', @where) . ';';
/*
* If the histogram is large enough, see what fraction of it the
* constant is "<@" to, and assume that's representative of the
- * non-MCV population. Otherwise use the default selectivity for the
+ * non-MCV population. Otherwise use the default selectivity for the
* non-MCV population.
*/
selec = histogram_selectivity(&vardata, &contproc,
}
/*
- * Dump all databases. There are no system objects to worry about.
+ * Dump all databases. There are no system objects to worry about.
*/
void
sql_exec_dumpalldbs(PGconn *conn, struct options * opts)
/* now build the query */
todo = psprintf(
- "SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s\n"
- "FROM pg_catalog.pg_class c \n"
- " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace \n"
- " LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),\n"
- " pg_catalog.pg_tablespace t \n"
- "WHERE relkind IN ('r', 'm', 'i', 'S', 't') AND \n"
- " t.oid = CASE\n"
- " WHEN reltablespace <> 0 THEN reltablespace\n"
- " ELSE dattablespace\n"
- " END AND \n"
- " (%s) \n"
- "ORDER BY relname\n",
- opts->extended ? addfields : "",
- qualifiers);
+ "SELECT pg_catalog.pg_relation_filenode(c.oid) as \"Filenode\", relname as \"Table Name\" %s\n"
+ "FROM pg_catalog.pg_class c \n"
+ " LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace \n"
+ " LEFT JOIN pg_catalog.pg_database d ON d.datname = pg_catalog.current_database(),\n"
+ " pg_catalog.pg_tablespace t \n"
+ "WHERE relkind IN ('r', 'm', 'i', 'S', 't') AND \n"
+ " t.oid = CASE\n"
+ " WHEN reltablespace <> 0 THEN reltablespace\n"
+ " ELSE dattablespace\n"
+ " END AND \n"
+ " (%s) \n"
+ "ORDER BY relname\n",
+ opts->extended ? addfields : "",
+ qualifiers);
free(qualifiers);
/* pageinspect >= 1.2 uses pg_lsn instead of text for the LSN field. */
if (tupdesc->attrs[0]->atttypid == TEXTOID)
{
- char lsnchar[64];
+ char lsnchar[64];
+
snprintf(lsnchar, sizeof(lsnchar), "%X/%X",
(uint32) (lsn >> 32), (uint32) lsn);
values[0] = CStringGetTextDatum(lsnchar);
/*
* We ignore the timeline part of the XLOG segment identifiers in
- * deciding whether a segment is still needed. This ensures that
+ * deciding whether a segment is still needed. This ensures that
* we won't prematurely remove a segment from a parent timeline.
* We could probably be a little more proactive about removing
* segments of non-parent timelines, but that would be a whole lot
{
/*
* Prints the name of the file to be removed and skips the
- * actual removal. The regular printout is so that the
+ * actual removal. The regular printout is so that the
* user can pipe the output into some other program.
*/
printf("%s\n", WALFilePath);
*
* The first argument is the relation to be prewarmed; the second controls
* how prewarming is done; legal options are 'prefetch', 'read', and 'buffer'.
- * The third is the name of the relation fork to be prewarmed. The fourth
+ * The third is the name of the relation fork to be prewarmed. The fourth
* and fifth arguments specify the first and last block to be prewarmed.
* If the fourth argument is NULL, it will be taken as 0; if the fifth argument
* is NULL, it will be taken as the number of blocks in the relation. The
* Track statement execution times across a whole database cluster.
*
* Execution costs are totalled for each distinct source query, and kept in
- * a shared hashtable. (We track only as many distinct queries as will fit
+ * a shared hashtable. (We track only as many distinct queries as will fit
* in the designated amount of shared memory.)
*
* As of Postgres 9.2, this module normalizes query entries. Normalization
*
* Normalization is implemented by fingerprinting queries, selectively
* serializing those fields of each query tree's nodes that are judged to be
- * essential to the query. This is referred to as a query jumble. This is
+ * essential to the query. This is referred to as a query jumble. This is
* distinct from a regular serialization in that various extraneous
* information is ignored as irrelevant or not essential to the query, such
* as the collations of Vars and, most notably, the values of constants.
* because we remove that file on startup; it acts inversely to
* PGSS_DUMP_FILE, in that it is only supposed to be around when the
* server is running, whereas PGSS_DUMP_FILE is only supposed to be around
- * when the server is not running. Leaving the file creates no danger of
+ * when the server is not running. Leaving the file creates no danger of
* a newly restored database having a spurious record of execution costs,
* which is what we're really concerned about here.
*/
/*
* When serializing to disk, we store query texts immediately after their
- * entry data. Any orphaned query texts are thereby excluded.
+ * entry data. Any orphaned query texts are thereby excluded.
*/
hash_seq_init(&hash_seq, pgss_hash);
while ((entry = hash_seq_search(&hash_seq)) != NULL)
/*
* We'd like to load the query text file (if needed) while not holding any
- * lock on pgss->lock. In the worst case we'll have to do this again
+ * lock on pgss->lock. In the worst case we'll have to do this again
* after we have the lock, but it's unlikely enough to make this a win
- * despite occasional duplicated work. We need to reload if anybody
+ * despite occasional duplicated work. We need to reload if anybody
* writes to the file (either a retail qtext_store(), or a garbage
* collection) between this point and where we've gotten shared lock. If
* a qtext_store is actually in progress when we look, we might as well
* would be difficult to demonstrate this even under artificial conditions.)
*
* Note: despite needing exclusive lock, it's not an error for the target
- * entry to already exist. This is because pgss_store releases and
+ * entry to already exist. This is because pgss_store releases and
* reacquires lock after failing to find a match; so someone else could
* have made the entry while we waited to get exclusive lock.
*/
* have it handy, so we require them to pass it too.
*
* If successful, returns true, and stores the new entry's offset in the file
- * into *query_offset. Also, if gc_count isn't NULL, *gc_count is set to the
+ * into *query_offset. Also, if gc_count isn't NULL, *gc_count is set to the
* number of garbage collections that have occurred so far.
*
* On failure, returns false.
*
* At least a shared lock on pgss->lock must be held by the caller, so as
- * to prevent a concurrent garbage collection. Share-lock-holding callers
+ * to prevent a concurrent garbage collection. Share-lock-holding callers
* should pass a gc_count pointer to obtain the number of garbage collections,
* so that they can recheck the count after obtaining exclusive lock to
* detect whether a garbage collection occurred (and removed this entry).
/*
* When called from pgss_store, some other session might have proceeded
* with garbage collection in the no-lock-held interim of lock strength
- * escalation. Check once more that this is actually necessary.
+ * escalation. Check once more that this is actually necessary.
*/
if (!need_gc_qtexts())
return;
}
/*
- * Truncate away any now-unused space. If this fails for some odd reason,
+ * Truncate away any now-unused space. If this fails for some odd reason,
* we log it, but there's no need to fail.
*/
if (ftruncate(fileno(qfile), extent) != 0)
*
* Note: the reason we don't simply use expression_tree_walker() is that the
* point of that function is to support tree walkers that don't care about
- * most tree node types, but here we care about all types. We should complain
+ * most tree node types, but here we care about all types. We should complain
* about any unrecognized node type.
*/
static void
* a problem.
*
* Duplicate constant pointers are possible, and will have their lengths
- * marked as '-1', so that they are later ignored. (Actually, we assume the
+ * marked as '-1', so that they are later ignored. (Actually, we assume the
* lengths were initialized as -1 to start with, and don't change them here.)
*
* N.B. There is an assumption that a '-' character at a Const location begins
* adjustment of location to that of the leading '-'
* operator in the event of a negative constant. It is
* also useful for our purposes to start from the minus
- * symbol. In this way, queries like "select * from foo
+ * symbol. In this way, queries like "select * from foo
* where bar = 1" and "select * from foo where bar = -2"
* will have identical normalized query strings.
*/
{
for (writes = 0; writes < writes_per_op; writes++)
if (write(tmpfile, buf, XLOG_BLCKSZ) != XLOG_BLCKSZ)
- /*
- * This can generate write failures if the filesystem
- * has a large block size, e.g. 4k, and there is no
- * support for O_DIRECT writes smaller than the
- * file system block size, e.g. XFS.
- */
+
+ /*
+ * This can generate write failures if the filesystem has
+ * a large block size, e.g. 4k, and there is no support
+ * for O_DIRECT writes smaller than the file system block
+ * size, e.g. XFS.
+ */
die("write failed");
if (lseek(tmpfile, 0, SEEK_SET) == -1)
die("seek failed");
{
/*
* Successful regex processing: store NFA-like graph as
- * extra_data. GIN API requires an array of nentries
+ * extra_data. GIN API requires an array of nentries
* Pointers, but we just put the same value in each element.
*/
trglen = ARRNELEM(trg);
/*
* GETBIT() tests may give false positives, due to limited
- * size of the sign array. But since trigramsMatchGraph()
+ * size of the sign array. But since trigramsMatchGraph()
* implements a monotone boolean function, false positives
* in the check array can't lead to false negative answer.
* So we can apply trigramsMatchGraph despite uncertainty,
* In the 2nd stage, the automaton is transformed into a graph based on the
* original NFA. Each state in the expanded graph represents a state from
* the original NFA, plus a prefix identifying the last two characters
- * (colors, to be precise) seen before entering the state. There can be
+ * (colors, to be precise) seen before entering the state. There can be
* multiple states in the expanded graph for each state in the original NFA,
* depending on what characters can precede it. A prefix position can be
* "unknown" if it's uncertain what the preceding character was, or "blank"
* "enter key".
*
* Each arc of the expanded graph is labelled with a trigram that must be
- * present in the string to match. We can construct this from an out-arc of
+ * present in the string to match. We can construct this from an out-arc of
* the underlying NFA state by combining the expanded state's prefix with the
* color label of the underlying out-arc, if neither prefix position is
* "unknown". But note that some of the colors in the trigram might be
*
* When building the graph, if the number of states or arcs exceed pre-defined
* limits, we give up and simply mark any states not yet processed as final
- * states. Roughly speaking, that means that we make use of some portion from
+ * states. Roughly speaking, that means that we make use of some portion from
* the beginning of the regexp. Also, any colors that have too many member
* characters are treated as "unknown", so that we can't derive trigrams
* from them.
* 1) Create state 1 with enter key (UNKNOWN, UNKNOWN, 1).
* 2) Add key (UNKNOWN, "a", 2) to state 1.
* 3) Add key ("a", "b", 3) to state 1.
- * 4) Create new state 2 with enter key ("b", "c", 4). Add an arc
+ * 4) Create new state 2 with enter key ("b", "c", 4). Add an arc
* from state 1 to state 2 with label trigram "abc".
* 5) Mark state 2 final because state 4 of source NFA is marked as final.
- * 6) Create new state 3 with enter key ("b", "d", 5). Add an arc
+ * 6) Create new state 3 with enter key ("b", "d", 5). Add an arc
* from state 1 to state 3 with label trigram "abd".
* 7) Mark state 3 final because state 5 of source NFA is marked as final.
*
*
* We call a prefix ambiguous if at least one of its colors is unknown. It's
* fully ambiguous if both are unknown, partially ambiguous if only the first
- * is unknown. (The case of first color known, second unknown is not valid.)
+ * is unknown. (The case of first color known, second unknown is not valid.)
*
* Wholly- or partly-blank prefixes are mostly handled the same as regular
- * color prefixes. This allows us to generate appropriate partly-blank
+ * color prefixes. This allows us to generate appropriate partly-blank
* trigrams when the NFA requires word character(s) to appear adjacent to
* non-word character(s).
*/
/*
* Key identifying a state of our expanded graph: color prefix, and number
- * of the corresponding state in the underlying regex NFA. The color prefix
+ * of the corresponding state in the underlying regex NFA. The color prefix
* shows how we reached the regex state (to the extent that we know it).
*/
typedef struct
* colorTrigramsCount and colorTrigramsGroups contain information about
* how trigrams are grouped into color trigrams. "colorTrigramsCount" is
* the count of color trigrams and "colorTrigramGroups" contains number of
- * simple trigrams for each color trigram. The array of simple trigrams
+ * simple trigrams for each color trigram. The array of simple trigrams
* (stored separately from this struct) is ordered so that the simple
* trigrams for each color trigram are consecutive, and they're in order
* by color trigram number.
/*
* This processing generates a great deal of cruft, which we'd like to
* clean up before returning (since this function may be called in a
- * query-lifespan memory context). Make a temp context we can work in so
+ * query-lifespan memory context). Make a temp context we can work in so
* that cleanup is easy.
*/
tmpcontext = AllocSetContextCreate(CurrentMemoryContext,
/*
* We can ignore the NUL character, since it can never appear in a PG text
- * string. This avoids the need for various special cases when
+ * string. This avoids the need for various special cases when
* reconstructing trigrams.
*/
if (c == 0)
pg_wchar2mb_with_len(&c, s, 1);
/*
- * In IGNORECASE mode, we can ignore uppercase characters. We assume that
+ * In IGNORECASE mode, we can ignore uppercase characters. We assume that
* the regex engine generated both uppercase and lowercase equivalents
* within each color, since we used the REG_ICASE option; so there's no
* need to process the uppercase version.
/*
* Recursively build the expanded graph by processing queue of states
- * (breadth-first search). getState already put initstate in the queue.
+ * (breadth-first search). getState already put initstate in the queue.
*/
while (trgmNFA->queue != NIL)
{
trgmNFA->queue = list_delete_first(trgmNFA->queue);
/*
- * If we overflowed then just mark state as final. Otherwise do
+ * If we overflowed then just mark state as final. Otherwise do
* actual processing.
*/
if (trgmNFA->overflowed)
/*
* Add state's own key, and then process all keys added to keysQueue until
- * queue is empty. But we can quit if the state gets marked final.
+ * queue is empty. But we can quit if the state gets marked final.
*/
addKey(trgmNFA, state, &state->stateKey);
while (trgmNFA->keysQueue != NIL && !state->fin)
/*
* Compare key to each existing enter key of the state to check for
- * redundancy. We can drop either old key(s) or the new key if we find
+ * redundancy. We can drop either old key(s) or the new key if we find
* redundancy.
*/
prev = NULL;
else if (pg_reg_colorisend(trgmNFA->regex, arc->co))
{
/*
- * End of line/string ($). We must consider this arc as a
+ * End of line/string ($). We must consider this arc as a
* transition that doesn't read anything. The reason for adding
* this enter key to the state is that if the arc leads to the
* NFA's final state, we must mark this expanded state as final.
* We can reach the arc destination after reading a word
* character, but the prefix is not something that addArc
* will accept, so no trigram arc can get made for this
- * transition. We must make an enter key to show that the
+ * transition. We must make an enter key to show that the
* arc destination is reachable. The prefix for the enter
* key should reflect the info we have for this arc.
*/
else
{
/*
- * Unexpandable color. Add enter key with ambiguous prefix,
+ * Unexpandable color. Add enter key with ambiguous prefix,
* showing we can reach the destination from this state, but
- * the preceding colors will be uncertain. (We do not set the
+ * the preceding colors will be uncertain. (We do not set the
* first prefix color to key->prefix.colors[1], because a
* prefix of known followed by unknown is invalid.)
*/
return false;
/*
- * We also reject nonblank-blank-anything. The nonblank-blank-nonblank
+ * We also reject nonblank-blank-anything. The nonblank-blank-nonblank
* case doesn't correspond to any trigram the trigram extraction code
- * would make. The nonblank-blank-blank case is also not possible with
+ * would make. The nonblank-blank-blank case is also not possible with
* RPADDING = 1. (Note that in many cases we'd fail to generate such a
* trigram even if it were valid, for example processing "foo bar" will
* not result in considering the trigram "o ". So if you want to support
/*
* Remove color trigrams from the graph so long as total penalty of color
- * trigrams exceeds WISH_TRGM_PENALTY. (If we fail to get down to
+ * trigrams exceeds WISH_TRGM_PENALTY. (If we fail to get down to
* WISH_TRGM_PENALTY, it's OK so long as total count is no more than
* MAX_TRGM_COUNT.) We prefer to remove color trigrams with higher
* penalty, since those are the most promising for reducing the total
/* Only current PG version is supported as a target */
if (GET_MAJOR_VERSION(new_cluster.major_version) != GET_MAJOR_VERSION(PG_VERSION_NUM))
pg_fatal("This utility can only upgrade to PostgreSQL version %s.\n",
- PG_MAJORVERSION);
+ PG_MAJORVERSION);
/*
* We can't allow downgrading because we use the target pg_dumpall, and
if (GET_MAJOR_VERSION(new_cluster.major_version) == 900 &&
new_cluster.controldata.cat_ver < TABLE_SPACE_SUBDIRS_CAT_VER)
pg_fatal("This utility can only upgrade to PostgreSQL version 9.0 after 2010-01-11\n"
- "because of backend API changes made during development.\n");
+ "because of backend API changes made during development.\n");
/* We read the real port number for PG >= 9.1 */
if (live_check && GET_MAJOR_VERSION(old_cluster.major_version) < 901 &&
old_cluster.port == DEF_PGUPORT)
pg_fatal("When checking a pre-PG 9.1 live old server, "
- "you must specify the old server's port number.\n");
+ "you must specify the old server's port number.\n");
if (live_check && old_cluster.port == new_cluster.port)
pg_fatal("When checking a live server, "
- "the old and new port numbers must be different.\n");
+ "the old and new port numbers must be different.\n");
}
return (pg_strcasecmp(loca, locb) == 0);
/*
- * Compare the encoding parts. Windows tends to use code page numbers for
+ * Compare the encoding parts. Windows tends to use code page numbers for
* the encoding part, which equivalent_encoding() won't like, so accept if
* the strings are case-insensitive equal; otherwise use
* equivalent_encoding() to compare.
/* pg_largeobject and its index should be skipped */
if (strcmp(rel_arr->rels[relnum].nspname, "pg_catalog") != 0)
pg_fatal("New cluster database \"%s\" is not empty\n",
- new_cluster.dbarr.dbs[dbnum].db_name);
+ new_cluster.dbarr.dbs[dbnum].db_name);
}
}
if ((script = fopen_priv(*analyze_script_file_name, "w")) == NULL)
pg_fatal("Could not open file \"%s\": %s\n",
- *analyze_script_file_name, getErrorText(errno));
+ *analyze_script_file_name, getErrorText(errno));
#ifndef WIN32
/* add shebang header */
#ifndef WIN32
if (chmod(*analyze_script_file_name, S_IRWXU) != 0)
pg_fatal("Could not add execute permission to file \"%s\": %s\n",
- *analyze_script_file_name, getErrorText(errno));
+ *analyze_script_file_name, getErrorText(errno));
#endif
if (os_info.user_specified)
if ((script = fopen_priv(*deletion_script_file_name, "w")) == NULL)
pg_fatal("Could not open file \"%s\": %s\n",
- *deletion_script_file_name, getErrorText(errno));
+ *deletion_script_file_name, getErrorText(errno));
#ifndef WIN32
/* add shebang header */
}
else
{
- char *suffix_path = pg_strdup(old_cluster.tablespace_suffix);
+ char *suffix_path = pg_strdup(old_cluster.tablespace_suffix);
/*
* Simply delete the tablespace directory, which might be ".old"
#ifndef WIN32
if (chmod(*deletion_script_file_name, S_IRWXU) != 0)
pg_fatal("Could not add execute permission to file \"%s\": %s\n",
- *deletion_script_file_name, getErrorText(errno));
+ *deletion_script_file_name, getErrorText(errno));
#endif
check_ok();
if (PQntuples(res) != 1 || strcmp(PQgetvalue(res, 0, 0), "t") != 0)
pg_fatal("database user \"%s\" is not a superuser\n",
- os_info.user);
+ os_info.user);
cluster->install_role_oid = atooid(PQgetvalue(res, 0, 1));
if (PQntuples(res) != 0)
pg_fatal("The %s cluster contains prepared transactions\n",
- CLUSTER_NAME(cluster));
+ CLUSTER_NAME(cluster));
PQclear(res);
found = true;
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
pg_fatal("Could not open file \"%s\": %s\n",
- output_path, getErrorText(errno));
+ output_path, getErrorText(errno));
if (!db_used)
{
fprintf(script, "Database: %s\n", active_db->db_name);
pg_fatal("Your installation contains \"contrib/isn\" functions which rely on the\n"
"bigint data type. Your old and new clusters pass bigint values\n"
"differently so this cluster cannot currently be upgraded. You can\n"
- "manually upgrade databases that use \"contrib/isn\" facilities and remove\n"
- "\"contrib/isn\" from the old cluster and restart the upgrade. A list of\n"
- "the problem functions is in the file:\n"
- " %s\n\n", output_path);
+ "manually upgrade databases that use \"contrib/isn\" facilities and remove\n"
+ "\"contrib/isn\" from the old cluster and restart the upgrade. A list of\n"
+ "the problem functions is in the file:\n"
+ " %s\n\n", output_path);
}
else
check_ok();
found = true;
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
pg_fatal("Could not open file \"%s\": %s\n",
- output_path, getErrorText(errno));
+ output_path, getErrorText(errno));
if (!db_used)
{
fprintf(script, "Database: %s\n", active_db->db_name);
pg_fatal("Your installation contains one of the reg* data types in user tables.\n"
"These data types reference system OIDs that are not preserved by\n"
"pg_upgrade, so this cluster cannot currently be upgraded. You can\n"
- "remove the problem tables and restart the upgrade. A list of the problem\n"
- "columns is in the file:\n"
- " %s\n\n", output_path);
+ "remove the problem tables and restart the upgrade. A list of the problem\n"
+ "columns is in the file:\n"
+ " %s\n\n", output_path);
}
else
check_ok();
if ((output = popen(cmd, "r")) == NULL ||
fgets(cmd_output, sizeof(cmd_output), output) == NULL)
pg_fatal("Could not get pg_ctl version data using %s: %s\n",
- cmd, getErrorText(errno));
+ cmd, getErrorText(errno));
pclose(output);
* pg_control data. pg_resetxlog cannot be run while the server is running
* so we use pg_controldata; pg_controldata doesn't provide all the fields
* we need to actually perform the upgrade, but it provides enough for
- * check mode. We do not implement pg_resetxlog -n because it is hard to
+ * check mode. We do not implement pg_resetxlog -n because it is hard to
* return valid xid data for a running server.
*/
void
if ((output = popen(cmd, "r")) == NULL)
pg_fatal("Could not get control data using %s: %s\n",
- cmd, getErrorText(errno));
+ cmd, getErrorText(errno));
/* Only pre-8.4 has these so if they are not set below we will check later */
cluster->controldata.lc_collate = NULL;
for (p = bufin; *p; p++)
if (!isascii(*p))
pg_fatal("The 8.3 cluster's pg_controldata is incapable of outputting ASCII, even\n"
- "with LANG=C. You must upgrade this cluster to a newer version of PostgreSQL\n"
- "8.3 to fix this bug. PostgreSQL 8.3.7 and later are known to work properly.\n");
+ "with LANG=C. You must upgrade this cluster to a newer version of PostgreSQL\n"
+ "8.3 to fix this bug. PostgreSQL 8.3.7 and later are known to work properly.\n");
}
#endif
* This is a common 8.3 -> 8.4 upgrade problem, so we are more verbose
*/
pg_fatal("You will need to rebuild the new server with configure option\n"
- "--disable-integer-datetimes or get server binaries built with those\n"
- "options.\n");
+ "--disable-integer-datetimes or get server binaries built with those\n"
+ "options.\n");
}
/*
/*
* Set umask for this function, all functions it calls, and all
- * subprocesses/threads it creates. We can't use fopen_priv()
- * as Windows uses threads and umask is process-global.
+ * subprocesses/threads it creates. We can't use fopen_priv() as Windows
+ * uses threads and umask is process-global.
*/
old_umask = umask(S_IRWXG | S_IRWXO);
va_list ap;
#ifdef WIN32
-static DWORD mainThreadId = 0;
+ static DWORD mainThreadId = 0;
/* We assume we are called from the primary thread first */
if (mainThreadId == 0)
pg_log(PG_VERBOSE, "%s\n", cmd);
#ifdef WIN32
+
/*
- * For some reason, Windows issues a file-in-use error if we write data
- * to the log file from a non-primary thread just before we create a
- * subprocess that also writes to the same log file. One fix is to
- * sleep for 100ms. A cleaner fix is to write to the log file _after_
- * the subprocess has completed, so we do this only when writing from
- * a non-primary thread. fflush(), running system() twice, and
- * pre-creating the file do not see to help.
+ * For some reason, Windows issues a file-in-use error if we write data to
+ * the log file from a non-primary thread just before we create a
+ * subprocess that also writes to the same log file. One fix is to sleep
+ * for 100ms. A cleaner fix is to write to the log file _after_ the
+ * subprocess has completed, so we do this only when writing from a
+ * non-primary thread. fflush(), running system() twice, and pre-creating
+ * the file do not see to help.
*/
if (mainThreadId != GetCurrentThreadId())
result = system(cmd);
for (iter = 0; iter < 4 && log == NULL; iter++)
{
- pg_usleep(1000000); /* 1 sec */
+ pg_usleep(1000000); /* 1 sec */
log = fopen(log_file, "a");
}
}
}
#ifndef WIN32
+
/*
* We can't do this on Windows because it will keep the "pg_ctl start"
* output filename open until the server stops, so we do the \n\n above on
* that platform. We use a unique filename for "pg_ctl start" that is
- * never reused while the server is running, so it works fine. We could
+ * never reused while the server is running, so it works fine. We could
* log these commands to a third file, but that just adds complexity.
*/
if ((log = fopen(log_file, "a")) == NULL)
/* ENOTDIR means we will throw a more useful error later */
if (errno != ENOENT && errno != ENOTDIR)
pg_fatal("could not open file \"%s\" for reading: %s\n",
- path, getErrorText(errno));
+ path, getErrorText(errno));
return false;
}
int fd;
/*
- * We open a file we would normally create anyway. We do this even in
+ * We open a file we would normally create anyway. We do this even in
* 'check' mode, which isn't ideal, but this is the best we can do.
*/
if ((fd = open(GLOBALS_DUMP_FILE, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR)) < 0)
*
* This function validates the given cluster directory - we search for a
* small set of subdirectories that we expect to find in a valid $PGDATA
- * directory. If any of the subdirectories are missing (or secured against
+ * directory. If any of the subdirectories are missing (or secured against
* us) we display an error message and exit()
*
*/
* check_bin_dir()
*
* This function searches for the executables that we expect to find
- * in the binaries directory. If we find that a required executable
+ * in the binaries directory. If we find that a required executable
* is missing (or secured against us), we display an error message and
* exit().
*/
*/
if (stat(path, &buf) < 0)
pg_fatal("check for \"%s\" failed: %s\n",
- path, getErrorText(errno));
+ path, getErrorText(errno));
else if (!S_ISREG(buf.st_mode))
pg_fatal("check for \"%s\" failed: not an executable file\n",
- path);
+ path);
/*
* Ensure that the file is both executable and readable (required for
if ((buf.st_mode & S_IRUSR) == 0)
#endif
pg_fatal("check for \"%s\" failed: cannot read file (permission denied)\n",
- path);
+ path);
#ifndef WIN32
if (access(path, X_OK) != 0)
if ((buf.st_mode & S_IXUSR) == 0)
#endif
pg_fatal("check for \"%s\" failed: cannot execute (permission denied)\n",
- path);
+ path);
}
if (pg_link_file(existing_file, new_link_file) == -1)
{
pg_fatal("Could not create hard link between old and new data directories: %s\n"
- "In link mode the old and new data directories must be on the same file system volume.\n",
- getErrorText(errno));
+ "In link mode the old and new data directories must be on the same file system volume.\n",
+ getErrorText(errno));
}
unlink(new_link_file);
}
* plpython2u language was created with library name plpython2.so as a
* symbolic link to plpython.so. In Postgres 9.1, only the
* plpython2.so library was created, and both plpythonu and plpython2u
- * pointing to it. For this reason, any reference to library name
+ * pointing to it. For this reason, any reference to library name
* "plpython" in an old PG <= 9.1 cluster must look for "plpython2" in
* the new cluster.
*
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
pg_fatal("Could not open file \"%s\": %s\n",
- output_path, getErrorText(errno));
+ output_path, getErrorText(errno));
fprintf(script, "Could not load library \"%s\"\n%s\n",
lib,
PQerrorMessage(conn));
fclose(script);
pg_log(PG_REPORT, "fatal\n");
pg_fatal("Your installation references loadable libraries that are missing from the\n"
- "new installation. You can add these libraries to the new installation,\n"
- "or remove the functions using them from the old installation. A list of\n"
- "problem libraries is in the file:\n"
- " %s\n\n", output_path);
+ "new installation. You can add these libraries to the new installation,\n"
+ "or remove the functions using them from the old installation. A list of\n"
+ "problem libraries is in the file:\n"
+ " %s\n\n", output_path);
}
else
check_ok();
if (old_rel->reloid != new_rel->reloid)
pg_fatal("Mismatch of relation OID in database \"%s\": old OID %d, new OID %d\n",
- old_db->db_name, old_rel->reloid, new_rel->reloid);
+ old_db->db_name, old_rel->reloid, new_rel->reloid);
/*
* TOAST table names initially match the heap pg_class oid. In
strcmp(old_rel->nspname, "pg_toast") != 0) &&
strcmp(old_rel->relname, new_rel->relname) != 0))
pg_fatal("Mismatch of relation names in database \"%s\": "
- "old name \"%s.%s\", new name \"%s.%s\"\n",
- old_db->db_name, old_rel->nspname, old_rel->relname,
- new_rel->nspname, new_rel->relname);
+ "old name \"%s.%s\", new name \"%s.%s\"\n",
+ old_db->db_name, old_rel->nspname, old_rel->relname,
+ new_rel->nspname, new_rel->relname);
create_rel_filename_map(old_pgdata, new_pgdata, old_db, new_db,
old_rel, new_rel, maps + num_maps);
*/
if (old_db->rel_arr.nrels != new_db->rel_arr.nrels)
pg_fatal("old and new databases \"%s\" have a different number of relations\n",
- old_db->db_name);
+ old_db->db_name);
*nmaps = num_maps;
return maps;
i_relfilenode,
i_reltablespace;
char query[QUERY_ALLOC];
- char *last_namespace = NULL, *last_tablespace = NULL;
+ char *last_namespace = NULL,
+ *last_tablespace = NULL;
/*
* pg_largeobject contains user data that does not appear in pg_dumpall
"SELECT reltoastrelid "
"FROM info_rels i JOIN pg_catalog.pg_class c "
" ON i.reloid = c.oid "
- " AND c.reltoastrelid != %u", InvalidOid));
+ " AND c.reltoastrelid != %u", InvalidOid));
PQclear(executeQueryOrDie(conn,
"INSERT INTO info_rels "
"SELECT indexrelid "
curr->nsp_alloc = false;
/*
- * Many of the namespace and tablespace strings are identical,
- * so we try to reuse the allocated string pointers where possible
- * to reduce memory consumption.
+ * Many of the namespace and tablespace strings are identical, so we
+ * try to reuse the allocated string pointers where possible to reduce
+ * memory consumption.
*/
/* Can we reuse the previous string allocation? */
if (last_namespace && strcmp(nspname, last_namespace) == 0)
default:
pg_fatal("Try \"%s --help\" for more information.\n",
- os_info.progname);
+ os_info.progname);
break;
}
}
/* Turn off read-only mode; add prefix to PGOPTIONS? */
if (getenv("PGOPTIONS"))
{
- char *pgoptions = psprintf("%s %s", FIX_DEFAULT_READ_ONLY,
- getenv("PGOPTIONS"));
+ char *pgoptions = psprintf("%s %s", FIX_DEFAULT_READ_ONLY,
+ getenv("PGOPTIONS"));
+
pg_putenv("PGOPTIONS", pgoptions);
pfree(pgoptions);
}
}
else
pg_fatal("You must identify the directory where the %s.\n"
- "Please use the %s command-line option or the %s environment variable.\n",
- description, cmdLineOption, envVarName);
+ "Please use the %s command-line option or the %s environment variable.\n",
+ description, cmdLineOption, envVarName);
}
/*
/*
* We don't have a data directory yet, so we can't check the PG version,
- * so this might fail --- only works for PG 9.2+. If this fails,
+ * so this might fail --- only works for PG 9.2+. If this fails,
* pg_upgrade will fail anyway because the data files will not be found.
*/
snprintf(cmd, sizeof(cmd), "\"%s/postmaster\" -D \"%s\" -C data_directory",
if ((output = popen(cmd, "r")) == NULL ||
fgets(cmd_output, sizeof(cmd_output), output) == NULL)
pg_fatal("Could not get data directory using %s: %s\n",
- cmd, getErrorText(errno));
+ cmd, getErrorText(errno));
pclose(output);
* the PageLayoutVersion of the new cluster. If the versions differ, this
* function loads a converter plugin and returns a pointer to a pageCnvCtx
* object (in *result) that knows how to convert pages from the old format
- * to the new format. If the versions are identical, this function just
+ * to the new format. If the versions are identical, this function just
* returns a NULL pageCnvCtx pointer to indicate that page-by-page conversion
* is not required.
*/
* This function loads a page-converter plugin library and grabs a
* pointer to each of the (interesting) functions provided by that
* plugin. The name of the plugin library is derived from the given
- * newPageVersion and oldPageVersion. If a plugin is found, this
+ * newPageVersion and oldPageVersion. If a plugin is found, this
* function returns a pointer to a pageCnvCtx object (which will contain
* a collection of plugin function pointers). If the required plugin
* is not found, this function returns NULL.
thread_handles[thread_num] = thread_handles[parallel_jobs - 1];
/*
- * Move last active thead arg struct into the now-dead slot,
- * and the now-dead slot to the end for reuse by the next thread.
- * Though the thread struct is in use by another thread, we can
- * safely swap the struct pointers within the array.
+ * Move last active thead arg struct into the now-dead slot, and the
+ * now-dead slot to the end for reuse by the next thread. Though the
+ * thread struct is in use by another thread, we can safely swap the
+ * struct pointers within the array.
*/
tmp_args = cur_thread_args[thread_num];
cur_thread_args[thread_num] = cur_thread_args[parallel_jobs - 1];
/*
* Most failures happen in create_new_objects(), which has completed at
- * this point. We do this here because it is just before linking, which
+ * this point. We do this here because it is just before linking, which
* will link the old and new cluster data files, preventing the old
* cluster from being safely started once the new cluster is started.
*/
{
/*
* If we have a postmaster.pid file, try to start the server. If it
- * starts, the pid file was stale, so stop the server. If it doesn't
+ * starts, the pid file was stale, so stop the server. If it doesn't
* start, assume the server is running. If the pid file is left over
* from a server crash, this also allows any committed transactions
* stored in the WAL to be replayed so they are not lost, because WAL
{
if (!user_opts.check)
pg_fatal("There seems to be a postmaster servicing the old cluster.\n"
- "Please shutdown that postmaster and try again.\n");
+ "Please shutdown that postmaster and try again.\n");
else
*live_check = true;
}
stop_postmaster(false);
else
pg_fatal("There seems to be a postmaster servicing the new cluster.\n"
- "Please shutdown that postmaster and try again.\n");
+ "Please shutdown that postmaster and try again.\n");
}
/* get path to pg_upgrade executable */
/*
* Install support functions in the global-object restore database to
- * preserve pg_authid.oid. pg_dumpall uses 'template0' as its template
- * database so objects we add into 'template1' are not propogated. They
+ * preserve pg_authid.oid. pg_dumpall uses 'template0' as its template
+ * database so objects we add into 'template1' are not propogated. They
* are removed on pg_upgrade exit.
*/
install_support_functions_in_new_db("template1");
*/
typedef struct
{
- const char *old_tablespace;
- const char *new_tablespace;
- const char *old_tablespace_suffix;
- const char *new_tablespace_suffix;
+ const char *old_tablespace;
+ const char *new_tablespace;
+ const char *old_tablespace_suffix;
+ const char *new_tablespace_suffix;
Oid old_db_oid;
Oid new_db_oid;
{
Oid db_oid; /* oid of the database */
char *db_name; /* database name */
- char db_tablespace[MAXPGPATH]; /* database default tablespace path */
+ char db_tablespace[MAXPGPATH]; /* database default tablespace
+ * path */
RelInfoArr rel_arr; /* array of all user relinfos */
} DbInfo;
__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 3)));
void
pg_fatal(const char *fmt,...)
-__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2),noreturn));
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2), noreturn));
void end_progress_output(void);
void
prep_status(const char *fmt,...)
/*
* Transfering files by tablespace is tricky because a single database can
* use multiple tablespaces. For non-parallel mode, we just pass a NULL
- * tablespace path, which matches all tablespaces. In parallel mode, we
+ * tablespace path, which matches all tablespaces. In parallel mode, we
* pass the default tablespace and all user-created tablespaces and let
* those operations happen in parallel.
*/
if (new_dbnum >= new_db_arr->ndbs)
pg_fatal("old database \"%s\" not found in the new cluster\n",
- old_db->db_name);
+ old_db->db_name);
n_maps = 0;
mappings = gen_db_file_maps(old_db, new_db, &n_maps, old_pgdata,
/*
* get_pg_database_relfilenode()
*
- * Retrieves the relfilenode for a few system-catalog tables. We need these
+ * Retrieves the relfilenode for a few system-catalog tables. We need these
* relfilenodes later in the upgrade process.
*/
void
return;
else
pg_fatal("error while checking for file existence \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
- map->nspname, map->relname, old_file, new_file,
- getErrorText(errno));
+ map->nspname, map->relname, old_file, new_file,
+ getErrorText(errno));
}
close(fd);
}
if ((user_opts.transfer_mode == TRANSFER_MODE_LINK) && (pageConverter != NULL))
pg_fatal("This upgrade requires page-by-page conversion, "
- "you must use copy mode instead of link mode.\n");
+ "you must use copy mode instead of link mode.\n");
if (user_opts.transfer_mode == TRANSFER_MODE_COPY)
{
if ((msg = copyAndUpdateFile(pageConverter, old_file, new_file, true)) != NULL)
pg_fatal("error while copying relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
- map->nspname, map->relname, old_file, new_file, msg);
+ map->nspname, map->relname, old_file, new_file, msg);
}
else
{
if ((msg = linkAndUpdateFile(pageConverter, old_file, new_file)) != NULL)
pg_fatal("error while creating link for relation \"%s.%s\" (\"%s\" to \"%s\"): %s\n",
- map->nspname, map->relname, old_file, new_file, msg);
+ map->nspname, map->relname, old_file, new_file, msg);
}
}
return false;
/*
- * We set this here to make sure atexit() shuts down the server,
- * but only if we started the server successfully. We do it
- * before checking for connectivity in case the server started but
- * there is a connectivity failure. If pg_ctl did not return success,
- * we will exit below.
+ * We set this here to make sure atexit() shuts down the server, but only
+ * if we started the server successfully. We do it before checking for
+ * connectivity in case the server started but there is a connectivity
+ * failure. If pg_ctl did not return success, we will exit below.
*
* Pre-9.1 servers do not have PQping(), so we could be leaving the server
- * running if authentication was misconfigured, so someday we might went to
- * be more aggressive about doing server shutdowns even if pg_ctl fails,
- * but now (2013-08-14) it seems prudent to be cautious. We don't want to
- * shutdown a server that might have been accidentally started during the
- * upgrade.
+ * running if authentication was misconfigured, so someday we might went
+ * to be more aggressive about doing server shutdowns even if pg_ctl
+ * fails, but now (2013-08-14) it seems prudent to be cautious. We don't
+ * want to shutdown a server that might have been accidentally started
+ * during the upgrade.
*/
if (pg_ctl_return)
os_info.running_cluster = cluster;
/*
- * pg_ctl -w might have failed because the server couldn't be started,
- * or there might have been a connection problem in _checking_ if the
- * server has started. Therefore, even if pg_ctl failed, we continue
- * and test for connectivity in case we get a connection reason for the
- * failure.
+ * pg_ctl -w might have failed because the server couldn't be started, or
+ * there might have been a connection problem in _checking_ if the server
+ * has started. Therefore, even if pg_ctl failed, we continue and test
+ * for connectivity in case we get a connection reason for the failure.
*/
if ((conn = get_db_conn(cluster, "template1")) == NULL ||
PQstatus(conn) != CONNECTION_OK)
if (conn)
PQfinish(conn);
pg_fatal("could not connect to %s postmaster started with the command:\n"
- "%s\n",
- CLUSTER_NAME(cluster), cmd);
+ "%s\n",
+ CLUSTER_NAME(cluster), cmd);
}
PQfinish(conn);
/*
* If pg_ctl failed, and the connection didn't fail, and throw_error is
- * enabled, fail now. This could happen if the server was already running.
+ * enabled, fail now. This could happen if the server was already
+ * running.
*/
if (!pg_ctl_return)
pg_fatal("pg_ctl failed to start the %s server, or connection failed\n",
- CLUSTER_NAME(cluster));
+ CLUSTER_NAME(cluster));
return true;
}
(strcmp(value, "localhost") != 0 && strcmp(value, "127.0.0.1") != 0 &&
strcmp(value, "::1") != 0 && value[0] != '/'))
pg_fatal("libpq environment variable %s has a non-local server value: %s\n",
- option->envvar, value);
+ option->envvar, value);
}
}
if (os_info.num_old_tablespaces > 0 &&
strcmp(old_cluster.tablespace_suffix, new_cluster.tablespace_suffix) == 0)
pg_fatal("Cannot upgrade to/from the same system catalog version when\n"
- "using tablespaces.\n");
+ "using tablespaces.\n");
}
* Effectively, this is checking only for tables/indexes in
* non-existent tablespace directories. Databases located in
* non-existent tablespaces already throw a backend error.
- * Non-existent tablespace directories can occur when a data
- * directory that contains user tablespaces is moved as part
- * of pg_upgrade preparation and the symbolic links are not
- * updated.
+ * Non-existent tablespace directories can occur when a data directory
+ * that contains user tablespaces is moved as part of pg_upgrade
+ * preparation and the symbolic links are not updated.
*/
if (stat(os_info.old_tablespaces[tblnum], &statBuf) != 0)
{
os_info.old_tablespaces[tblnum]);
else
report_status(PG_FATAL,
- "cannot stat() tablespace directory \"%s\": %s\n",
- os_info.old_tablespaces[tblnum], getErrorText(errno));
+ "cannot stat() tablespace directory \"%s\": %s\n",
+ os_info.old_tablespaces[tblnum], getErrorText(errno));
}
if (!S_ISDIR(statBuf.st_mode))
- report_status(PG_FATAL,
- "tablespace path \"%s\" is not a directory\n",
- os_info.old_tablespaces[tblnum]);
+ report_status(PG_FATAL,
+ "tablespace path \"%s\" is not a directory\n",
+ os_info.old_tablespaces[tblnum]);
}
PQclear(res);
static
- __attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 0)))
+__attribute__((format(PG_PRINTF_ATTRIBUTE, 2, 0)))
void
pg_log_v(eLogType type, const char *fmt, va_list ap)
{
/*
* Do not free envstr because it becomes part of the environment on
- * some operating systems. See port/unsetenv.c::unsetenv.
+ * some operating systems. See port/unsetenv.c::unsetenv.
*/
#else
SetEnvironmentVariableA(var, val);
pg_log(PG_REPORT, "fatal\n");
pg_fatal("Your installation contains the \"name\" data type in user tables. This\n"
"data type changed its internal alignment between your old and new\n"
- "clusters so this cluster cannot currently be upgraded. You can remove\n"
+ "clusters so this cluster cannot currently be upgraded. You can remove\n"
"the problem tables and restart the upgrade. A list of the problem\n"
- "columns is in the file:\n"
- " %s\n\n", output_path);
+ "columns is in the file:\n"
+ " %s\n\n", output_path);
}
else
check_ok();
{
pg_log(PG_REPORT, "fatal\n");
pg_fatal("Your installation contains the \"tsquery\" data type. This data type\n"
- "added a new internal field between your old and new clusters so this\n"
+ "added a new internal field between your old and new clusters so this\n"
"cluster cannot currently be upgraded. You can remove the problem\n"
- "columns and restart the upgrade. A list of the problem columns is in the\n"
- "file:\n"
- " %s\n\n", output_path);
+ "columns and restart the upgrade. A list of the problem columns is in the\n"
+ "file:\n"
+ " %s\n\n", output_path);
}
else
check_ok();
found = true;
if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL)
pg_fatal("Could not open file \"%s\": %s\n",
- output_path, getErrorText(errno));
+ output_path, getErrorText(errno));
if (!db_used)
{
fprintf(script, "Database: %s\n", active_db->db_name);
{
pg_log(PG_REPORT, "fatal\n");
pg_fatal("Your installation contains the \"ltree\" data type. This data type\n"
- "changed its internal storage format between your old and new clusters so this\n"
- "cluster cannot currently be upgraded. You can manually upgrade databases\n"
- "that use \"contrib/ltree\" facilities and remove \"contrib/ltree\" from the old\n"
- "cluster and restart the upgrade. A list of the problem functions is in the\n"
- "file:\n"
- " %s\n\n", output_path);
+ "changed its internal storage format between your old and new clusters so this\n"
+ "cluster cannot currently be upgraded. You can manually upgrade databases\n"
+ "that use \"contrib/ltree\" facilities and remove \"contrib/ltree\" from the old\n"
+ "cluster and restart the upgrade. A list of the problem functions is in the\n"
+ "file:\n"
+ " %s\n\n", output_path);
}
else
check_ok();
/*
* Stopgap implementation of timestamptz_to_str that doesn't depend on backend
- * infrastructure. This will work for timestamps that are within the range
+ * infrastructure. This will work for timestamps that are within the range
* of the platform time_t type. (pg_time_t is compatible except for possibly
* being wider.)
*
break;
else
{
- pg_usleep(1000000L); /* 1 second */
+ pg_usleep(1000000L); /* 1 second */
continue;
}
}
bool use_quiet; /* quiet logging onto stderr */
int agg_interval; /* log aggregates instead of individual
* transactions */
-int progress = 0; /* thread progress report every this seconds */
-int progress_nclients = 0; /* number of clients for progress report */
-int progress_nthreads = 0; /* number of threads for progress report */
+int progress = 0; /* thread progress report every this seconds */
+int progress_nclients = 0; /* number of clients for progress
+ * report */
+int progress_nthreads = 0; /* number of threads for progress
+ * report */
bool is_connect; /* establish connection for each transaction */
bool is_latencies; /* report per-command latencies */
int main_pid; /* main process id used in log filename */
int listen; /* 0 indicates that an async query has been
* sent */
int sleeping; /* 1 indicates that the client is napping */
- bool throttling; /* whether nap is for throttling */
+ bool throttling; /* whether nap is for throttling */
int64 until; /* napping until (usec) */
Variable *variables; /* array of variable definitions */
int nvariables;
instr_time *exec_elapsed; /* time spent executing cmds (per Command) */
int *exec_count; /* number of cmd executions (per Command) */
unsigned short random_state[3]; /* separate randomness for each thread */
- int64 throttle_trigger; /* previous/next throttling (us) */
- int64 throttle_lag; /* total transaction lag behind throttling */
- int64 throttle_lag_max; /* max transaction lag */
+ int64 throttle_trigger; /* previous/next throttling (us) */
+ int64 throttle_lag; /* total transaction lag behind throttling */
+ int64 throttle_lag_max; /* max transaction lag */
} TState;
#define INVALID_THREAD ((pthread_t) 0)
int xacts;
int64 latencies;
int64 sqlats;
- int64 throttle_lag;
- int64 throttle_lag_max;
+ int64 throttle_lag;
+ int64 throttle_lag_max;
} TResult;
/*
"\nInitialization options:\n"
" -i, --initialize invokes initialization mode\n"
" -F, --fillfactor=NUM set fill factor\n"
- " -n, --no-vacuum do not run VACUUM after initialization\n"
- " -q, --quiet quiet logging (one message each 5 seconds)\n"
+ " -n, --no-vacuum do not run VACUUM after initialization\n"
+ " -q, --quiet quiet logging (one message each 5 seconds)\n"
" -s, --scale=NUM scaling factor\n"
" --foreign-keys create foreign key constraints between tables\n"
" --index-tablespace=TABLESPACE\n"
- " create indexes in the specified tablespace\n"
- " --tablespace=TABLESPACE create tables in the specified tablespace\n"
+ " create indexes in the specified tablespace\n"
+ " --tablespace=TABLESPACE create tables in the specified tablespace\n"
" --unlogged-tables create tables as unlogged tables\n"
"\nBenchmarking options:\n"
" -c, --client=NUM number of concurrent database clients (default: 1)\n"
" -C, --connect establish new connection for each transaction\n"
" -D, --define=VARNAME=VALUE\n"
- " define variable for use by custom script\n"
- " -f, --file=FILENAME read transaction script from FILENAME\n"
+ " define variable for use by custom script\n"
+ " -f, --file=FILENAME read transaction script from FILENAME\n"
" -j, --jobs=NUM number of threads (default: 1)\n"
" -l, --log write transaction times to log file\n"
" -M, --protocol=simple|extended|prepared\n"
" -N, --skip-some-updates skip updates of pgbench_tellers and pgbench_branches\n"
" -P, --progress=NUM show thread progress report every NUM seconds\n"
" -r, --report-latencies report average latency per command\n"
- " -R, --rate=NUM target rate in transactions per second\n"
+ " -R, --rate=NUM