DefineCustomRealVariable("auto_explain.sample_rate",
"Fraction of queries to process.",
- NULL,
- &auto_explain_sample_rate,
- 1.0,
- 0.0,
- 1.0,
- PGC_SUSET,
- 0,
- NULL,
- NULL,
- NULL);
+ NULL,
+ &auto_explain_sample_rate,
+ 1.0,
+ 0.0,
+ 1.0,
+ PGC_SUSET,
+ 0,
+ NULL,
+ NULL,
+ NULL);
EmitWarningsOnPlaceholders("auto_explain");
explain_ExecutorStart(QueryDesc *queryDesc, int eflags)
{
/*
- * For rate sampling, randomly choose top-level statement. Either
- * all nested statements will be explained or none will.
+ * For rate sampling, randomly choose top-level statement. Either all
+ * nested statements will be explained or none will.
*/
if (auto_explain_log_min_duration >= 0 && nesting_level == 0)
current_query_sampled = (random() < auto_explain_sample_rate *
- MAX_RANDOM_VALUE);
+ MAX_RANDOM_VALUE);
if (auto_explain_enabled() && current_query_sampled)
{
typedef struct
{
BloomState blstate; /* bloom index state */
- MemoryContext tmpCtx; /* temporary memory context reset after
- * each tuple */
+ MemoryContext tmpCtx; /* temporary memory context reset after each
+ * tuple */
char data[BLCKSZ]; /* cached page */
int64 count; /* number of tuples in cached page */
-} BloomBuildState;
+} BloomBuildState;
/*
* Flush page cached in BloomBuildState.
bloomBuildCallback, (void *) &buildstate);
/*
- * There are could be some items in cached page. Flush this page
- * if needed.
+ * There are could be some items in cached page. Flush this page if
+ * needed.
*/
if (buildstate.count > 0)
flushCachedPage(index, &buildstate);
/* Opaque for bloom pages */
typedef struct BloomPageOpaqueData
{
- OffsetNumber maxoff; /* number of index tuples on page */
- uint16 flags; /* see bit definitions below */
- uint16 unused; /* placeholder to force maxaligning of size
- * of BloomPageOpaqueData and to place
- * bloom_page_id exactly at the end of page
- */
- uint16 bloom_page_id; /* for identification of BLOOM indexes */
-} BloomPageOpaqueData;
+ OffsetNumber maxoff; /* number of index tuples on page */
+ uint16 flags; /* see bit definitions below */
+ uint16 unused; /* placeholder to force maxaligning of size of
+ * BloomPageOpaqueData and to place
+ * bloom_page_id exactly at the end of page */
+ uint16 bloom_page_id; /* for identification of BLOOM indexes */
+} BloomPageOpaqueData;
typedef BloomPageOpaqueData *BloomPageOpaque;
{
int32 vl_len_; /* varlena header (do not touch directly!) */
int bloomLength; /* length of signature in words (not bits!) */
- int bitSize[INDEX_MAX_KEYS]; /* # of bits generated for each
- * index key */
-} BloomOptions;
+ int bitSize[INDEX_MAX_KEYS]; /* # of bits generated for
+ * each index key */
+} BloomOptions;
/*
* FreeBlockNumberArray - array of block numbers sized so that metadata fill
uint16 nEnd;
BloomOptions opts;
FreeBlockNumberArray notFullPage;
-} BloomMetaPageData;
+} BloomMetaPageData;
/* Magic number to distinguish bloom pages among anothers */
#define BLOOM_MAGICK_NUMBER (0xDBAC0DED)
* precompute it
*/
Size sizeOfBloomTuple;
-} BloomState;
+} BloomState;
#define BloomPageGetFreeSpace(state, page) \
(BLCKSZ - MAXALIGN(SizeOfPageHeaderData) \
{
ItemPointerData heapPtr;
BloomSignatureWord sign[FLEXIBLE_ARRAY_MEMBER];
-} BloomTuple;
+} BloomTuple;
#define BLOOMTUPLEHDRSZ offsetof(BloomTuple, sign)
/* Opaque data structure for bloom index scan */
typedef struct BloomScanOpaqueData
{
- BloomSignatureWord *sign; /* Scan signature */
+ BloomSignatureWord *sign; /* Scan signature */
BloomState state;
-} BloomScanOpaqueData;
+} BloomScanOpaqueData;
typedef BloomScanOpaqueData *BloomScanOpaque;
/* blutils.c */
extern void _PG_init(void);
extern Datum blhandler(PG_FUNCTION_ARGS);
-extern void initBloomState(BloomState * state, Relation index);
+extern void initBloomState(BloomState *state, Relation index);
extern void BloomFillMetapage(Relation index, Page metaPage);
extern void BloomInitMetapage(Relation index);
extern void BloomInitPage(Page page, uint16 flags);
extern Buffer BloomNewBuffer(Relation index);
-extern void signValue(BloomState * state, BloomSignatureWord * sign, Datum value, int attno);
-extern BloomTuple *BloomFormTuple(BloomState * state, ItemPointer iptr, Datum *values, bool *isnull);
-extern bool BloomPageAddItem(BloomState * state, Page page, BloomTuple * tuple);
+extern void signValue(BloomState *state, BloomSignatureWord *sign, Datum value, int attno);
+extern BloomTuple *BloomFormTuple(BloomState *state, ItemPointer iptr, Datum *values, bool *isnull);
+extern bool BloomPageAddItem(BloomState *state, Page page, BloomTuple *tuple);
/* blvalidate.c */
extern bool blvalidate(Oid opclassoid);
/* Kind of relation options for bloom index */
static relopt_kind bl_relopt_kind;
+
/* parse table for fillRelOptions */
static relopt_parse_elt bl_relopt_tab[INDEX_MAX_KEYS + 1];
* October 1988, p. 1195.
*----------
*/
- int32 hi, lo, x;
+ int32 hi,
+ lo,
+ x;
/* Must be in [1, 0x7ffffffe] range at this point. */
hi = next / 127773;
/* Iterate over the tuples */
itup = itupPtr = BloomPageGetTuple(&state, page, FirstOffsetNumber);
itupEnd = BloomPageGetTuple(&state, page,
- OffsetNumberNext(BloomPageGetMaxOffset(page)));
+ OffsetNumberNext(BloomPageGetMaxOffset(page)));
while (itup < itupEnd)
{
/* Do we have to delete this tuple? */
}
Assert(itupPtr == BloomPageGetTuple(&state, page,
- OffsetNumberNext(BloomPageGetMaxOffset(page))));
+ OffsetNumberNext(BloomPageGetMaxOffset(page))));
/*
- * Add page to notFullPage list if we will not mark page as deleted and
- * there is a free space on it
+ * Add page to notFullPage list if we will not mark page as deleted
+ * and there is a free space on it
*/
if (BloomPageGetMaxOffset(page) != 0 &&
BloomPageGetFreeSpace(&state, page) > state.sizeOfBloomTuple &&
AcquireSampleRowsFunc *func,
BlockNumber *totalpages);
static bool fileIsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel,
- RangeTblEntry *rte);
+ RangeTblEntry *rte);
/*
* Helper functions
/*
* fileIsForeignScanParallelSafe
- * Reading a file in a parallel worker should work just the same as
- * reading it in the leader, so mark scans safe.
+ * Reading a file in a parallel worker should work just the same as
+ * reading it in the leader, so mark scans safe.
*/
static bool
fileIsForeignScanParallelSafe(PlannerInfo *root, RelOptInfo *rel,
- RangeTblEntry *rte)
+ RangeTblEntry *rte)
{
return true;
}
unsigned check;
/*
- * The number should come in this format: 978-0-000-00000-0
- * or may be an ISBN-13 number, 979-..., which does not have a short
- * representation. Do the short output version if possible.
+ * The number should come in this format: 978-0-000-00000-0 or may be an
+ * ISBN-13 number, 979-..., which does not have a short representation. Do
+ * the short output version if possible.
*/
if (strncmp("978-", isn, 4) == 0)
{
else
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("illegal character '%c' in t_bits string", str[off])));
+ errmsg("illegal character '%c' in t_bits string", str[off])));
if (off % 8 == 7)
bits[off / 8] = byte;
lp_offset == MAXALIGN(lp_offset) &&
lp_offset + lp_len <= raw_page_size)
{
- HeapTupleHeader tuphdr;
- bytea *tuple_data_bytea;
- int tuple_data_len;
+ HeapTupleHeader tuphdr;
+ bytea *tuple_data_bytea;
+ int tuple_data_len;
/* Extract information from the tuple header */
tuple_data_bytea = (bytea *) palloc(tuple_data_len + VARHDRSZ);
SET_VARSIZE(tuple_data_bytea, tuple_data_len + VARHDRSZ);
memcpy(VARDATA(tuple_data_bytea), (char *) tuphdr + tuphdr->t_hoff,
- tuple_data_len);
+ tuple_data_len);
values[13] = PointerGetDatum(tuple_data_bytea);
/*
*/
static Datum
tuple_data_split_internal(Oid relid, char *tupdata,
- uint16 tupdata_len, uint16 t_infomask,
- uint16 t_infomask2, bits8 *t_bits,
- bool do_detoast)
+ uint16 tupdata_len, uint16 t_infomask,
+ uint16 t_infomask2, bits8 *t_bits,
+ bool do_detoast)
{
- ArrayBuildState *raw_attrs;
- int nattrs;
- int i;
- int off = 0;
- Relation rel;
- TupleDesc tupdesc;
+ ArrayBuildState *raw_attrs;
+ int nattrs;
+ int i;
+ int off = 0;
+ Relation rel;
+ TupleDesc tupdesc;
/* Get tuple descriptor from relation OID */
rel = relation_open(relid, NoLock);
for (i = 0; i < nattrs; i++)
{
- Form_pg_attribute attr;
- bool is_null;
- bytea *attr_data = NULL;
+ Form_pg_attribute attr;
+ bool is_null;
+ bytea *attr_data = NULL;
attr = tupdesc->attrs[i];
is_null = (t_infomask & HEAP_HASNULL) && att_isnull(i, t_bits);
/*
- * Tuple header can specify less attributes than tuple descriptor
- * as ALTER TABLE ADD COLUMN without DEFAULT keyword does not
- * actually change tuples in pages, so attributes with numbers greater
- * than (t_infomask2 & HEAP_NATTS_MASK) should be treated as NULL.
+ * Tuple header can specify less attributes than tuple descriptor as
+ * ALTER TABLE ADD COLUMN without DEFAULT keyword does not actually
+ * change tuples in pages, so attributes with numbers greater than
+ * (t_infomask2 & HEAP_NATTS_MASK) should be treated as NULL.
*/
if (i >= (t_infomask2 & HEAP_NATTS_MASK))
is_null = true;
if (!is_null)
{
- int len;
+ int len;
if (attr->attlen == -1)
{
off = att_align_pointer(off, tupdesc->attrs[i]->attalign, -1,
tupdata + off);
+
/*
* As VARSIZE_ANY throws an exception if it can't properly
* detect the type of external storage in macros VARTAG_SIZE,
!VARATT_IS_EXTERNAL_ONDISK(tupdata + off) &&
!VARATT_IS_EXTERNAL_INDIRECT(tupdata + off))
ereport(ERROR,
- (errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("first byte of varlena attribute is incorrect for attribute %d", i)));
+ (errcode(ERRCODE_DATA_CORRUPTED),
+ errmsg("first byte of varlena attribute is incorrect for attribute %d", i)));
len = VARSIZE_ANY(tupdata + off);
}
if (tupdata_len != off)
ereport(ERROR,
(errcode(ERRCODE_DATA_CORRUPTED),
- errmsg("end of tuple reached without looking at all its data")));
+ errmsg("end of tuple reached without looking at all its data")));
return makeArrayResult(raw_attrs, CurrentMemoryContext);
}
Datum
tuple_data_split(PG_FUNCTION_ARGS)
{
- Oid relid;
- bytea *raw_data;
- uint16 t_infomask;
- uint16 t_infomask2;
- char *t_bits_str;
- bool do_detoast = false;
- bits8 *t_bits = NULL;
- Datum res;
+ Oid relid;
+ bytea *raw_data;
+ uint16 t_infomask;
+ uint16 t_infomask2;
+ char *t_bits_str;
+ bool do_detoast = false;
+ bits8 *t_bits = NULL;
+ Datum res;
relid = PG_GETARG_OID(0);
raw_data = PG_ARGISNULL(1) ? NULL : PG_GETARG_BYTEA_P(1);
*/
if (t_infomask & HEAP_HASNULL)
{
- int bits_str_len;
- int bits_len;
+ int bits_str_len;
+ int bits_len;
bits_len = (t_infomask2 & HEAP_NATTS_MASK) / 8 + 1;
if (!t_bits_str)
Datum
gin_trgm_triconsistent(PG_FUNCTION_ARGS)
{
- GinTernaryValue *check = (GinTernaryValue *) PG_GETARG_POINTER(0);
+ GinTernaryValue *check = (GinTernaryValue *) PG_GETARG_POINTER(0);
StrategyNumber strategy = PG_GETARG_UINT16(1);
/* text *query = PG_GETARG_TEXT_P(2); */
int32 nkeys = PG_GETARG_INT32(3);
Pointer *extra_data = (Pointer *) PG_GETARG_POINTER(4);
- GinTernaryValue res = GIN_MAYBE;
+ GinTernaryValue res = GIN_MAYBE;
int32 i,
ntrue;
bool *boolcheck;
}
/*
- * See comment in gin_trgm_consistent() about * upper bound formula
+ * See comment in gin_trgm_consistent() about * upper bound
+ * formula
*/
res = (nkeys == 0)
? GIN_FALSE : (((((float4) ntrue) / ((float4) nkeys)) >= nlimit)
- ? GIN_MAYBE : GIN_FALSE);
+ ? GIN_MAYBE : GIN_FALSE);
break;
case ILikeStrategyNumber:
#ifndef IGNORECASE
else
{
/*
- * As trigramsMatchGraph implements a monotonic boolean function,
- * promoting all GIN_MAYBE keys to GIN_TRUE will give a
- * conservative result.
+ * As trigramsMatchGraph implements a monotonic boolean
+ * function, promoting all GIN_MAYBE keys to GIN_TRUE will
+ * give a conservative result.
*/
boolcheck = (bool *) palloc(sizeof(bool) * nkeys);
for (i = 0; i < nkeys; i++)
break;
default:
elog(ERROR, "unrecognized strategy number: %d", strategy);
- res = GIN_FALSE; /* keep compiler quiet */
+ res = GIN_FALSE; /* keep compiler quiet */
break;
}
if (GIST_LEAF(entry))
{ /* all leafs contains orig trgm */
+
/*
* Prevent gcc optimizing the tmpsml variable using volatile
* keyword. Otherwise comparison of nlimit and tmpsml may give
*recheck = strategy == WordDistanceStrategyNumber;
if (GIST_LEAF(entry))
{ /* all leafs contains orig trgm */
+
/*
* Prevent gcc optimizing the sml variable using volatile
* keyword. Otherwise res can differ from the
* word_similarity_dist_op() function.
*/
float4 volatile sml = cnt_sml(qtrg, key, *recheck);
+
res = 1.0 - sml;
}
else if (ISALLTRUE(key))
PG_MODULE_MAGIC;
/* GUC variables */
-double similarity_threshold = 0.3f;
-double word_similarity_threshold = 0.6f;
+double similarity_threshold = 0.3f;
+double word_similarity_threshold = 0.6f;
void _PG_init(void);
/* Trigram with position */
typedef struct
{
- trgm trg;
- int index;
+ trgm trg;
+ int index;
} pos_trgm;
/*
{
/* Define custom GUC variables. */
DefineCustomRealVariable("pg_trgm.similarity_threshold",
- "Sets the threshold used by the %% operator.",
- "Valid range is 0.0 .. 1.0.",
- &similarity_threshold,
- 0.3,
- 0.0,
- 1.0,
- PGC_USERSET,
- 0,
- NULL,
- NULL,
- NULL);
+ "Sets the threshold used by the %% operator.",
+ "Valid range is 0.0 .. 1.0.",
+ &similarity_threshold,
+ 0.3,
+ 0.0,
+ 1.0,
+ PGC_USERSET,
+ 0,
+ NULL,
+ NULL,
+ NULL);
DefineCustomRealVariable("pg_trgm.word_similarity_threshold",
- "Sets the threshold used by the <%% operator.",
- "Valid range is 0.0 .. 1.0.",
- &word_similarity_threshold,
- 0.6,
- 0.0,
- 1.0,
- PGC_USERSET,
- 0,
- NULL,
- NULL,
- NULL);
+ "Sets the threshold used by the <%% operator.",
+ "Valid range is 0.0 .. 1.0.",
+ &word_similarity_threshold,
+ 0.6,
+ 0.0,
+ 1.0,
+ PGC_USERSET,
+ 0,
+ NULL,
+ NULL,
+ NULL);
}
/*
* Make array of positional trigrams from two trigram arrays trg1 and trg2.
*
* trg1: trigram array of search pattern, of length len1. trg1 is required
- * word which positions don't matter and replaced with -1.
+ * word which positions don't matter and replaced with -1.
* trg2: trigram array of text, of length len2. trg2 is haystack where we
- * search and have to store its positions.
+ * search and have to store its positions.
*
* Returns concatenated trigram array.
*/
make_positional_trgm(trgm *trg1, int len1, trgm *trg2, int len2)
{
pos_trgm *result;
- int i, len = len1 + len2;
+ int i,
+ len = len1 + len2;
result = (pos_trgm *) palloc(sizeof(pos_trgm) * len);
static int
comp_ptrgm(const void *v1, const void *v2)
{
- const pos_trgm *p1 = (const pos_trgm *)v1;
- const pos_trgm *p2 = (const pos_trgm *)v2;
- int cmp;
+ const pos_trgm *p1 = (const pos_trgm *) v1;
+ const pos_trgm *p2 = (const pos_trgm *) v2;
+ int cmp;
cmp = CMPTRGM(p1->trg, p2->trg);
if (cmp != 0)
* len2: length of array "trg2" and array "trg2indexes".
* len: length of the array "found".
* check_only: if true then only check existaince of similar search pattern in
- * text.
+ * text.
*
* Returns word similarity.
*/
for (i = 0; i < len2; i++)
{
/* Get index of next trigram */
- int trgindex = trg2indexes[i];
+ int trgindex = trg2indexes[i];
/* Update last position of this trigram */
if (lower >= 0 || found[trgindex])
/* Adjust lower bound if this trigram is present in required substing */
if (found[trgindex])
{
- int prev_lower,
- tmp_ulen2,
- tmp_lower,
- tmp_count;
+ int prev_lower,
+ tmp_ulen2,
+ tmp_lower,
+ tmp_count;
upper = i;
if (lower == -1)
prev_lower = lower;
for (tmp_lower = lower; tmp_lower <= upper; tmp_lower++)
{
- float smlr_tmp = CALCSML(tmp_count, ulen1, tmp_ulen2);
- int tmp_trgindex;
+ float smlr_tmp = CALCSML(tmp_count, ulen1, tmp_ulen2);
+ int tmp_trgindex;
if (smlr_tmp > smlr_cur)
{
lower = tmp_lower;
count = tmp_count;
}
+
/*
* if we only check that word similarity is greater than
- * pg_trgm.word_similarity_threshold we do not need to calculate
- * a maximum similarity.
+ * pg_trgm.word_similarity_threshold we do not need to
+ * calculate a maximum similarity.
*/
if (check_only && smlr_cur >= word_similarity_threshold)
break;
}
smlr_max = Max(smlr_max, smlr_cur);
+
/*
* if we only check that word similarity is greater than
* pg_trgm.word_similarity_threshold we do not need to calculate a
for (tmp_lower = prev_lower; tmp_lower < lower; tmp_lower++)
{
- int tmp_trgindex;
+ int tmp_trgindex;
+
tmp_trgindex = trg2indexes[tmp_lower];
if (lastpos[tmp_trgindex] == tmp_lower)
lastpos[tmp_trgindex] = -1;
* str1: search pattern string, of length slen1 bytes.
* str2: text in which we are looking for a word, of length slen2 bytes.
* check_only: if true then only check existaince of similar search pattern in
- * text.
+ * text.
*
* Returns word similarity.
*/
static float4
calc_word_similarity(char *str1, int slen1, char *str2, int slen2,
- bool check_only)
+ bool check_only)
{
bool *found;
pos_trgm *ptrg;
protect_out_of_mem(slen1 + slen2);
/* Make positional trigrams */
- trg1 = (trgm *) palloc(sizeof(trgm) * (slen1 / 2 + 1) * 3);
- trg2 = (trgm *) palloc(sizeof(trgm) * (slen2 / 2 + 1) * 3);
+ trg1 = (trgm *) palloc(sizeof(trgm) * (slen1 / 2 + 1) *3);
+ trg2 = (trgm *) palloc(sizeof(trgm) * (slen2 / 2 + 1) *3);
len1 = generate_trgm_only(trg1, str1, slen1);
len2 = generate_trgm_only(trg2, str2, slen2);
{
if (i > 0)
{
- int cmp = CMPTRGM(ptrg[i - 1].trg, ptrg[i].trg);
+ int cmp = CMPTRGM(ptrg[i - 1].trg, ptrg[i].trg);
+
if (cmp != 0)
{
if (found[j])
/* Run iterative procedure to find maximum similarity with word */
result = iterate_word_similarity(trg2indexes, found, ulen1, len2, len,
- check_only);
+ check_only);
pfree(trg2indexes);
pfree(found);
float4 res;
res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
- VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
- false);
+ VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
+ false);
PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1);
float4 res;
res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
- VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
- true);
+ VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
+ true);
PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1);
float4 res;
res = calc_word_similarity(VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
- VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
- true);
+ VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
+ true);
PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1);
float4 res;
res = calc_word_similarity(VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
- VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
- false);
+ VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
+ false);
PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1);
float4 res;
res = calc_word_similarity(VARDATA_ANY(in2), VARSIZE_ANY_EXHDR(in2),
- VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
- false);
+ VARDATA_ANY(in1), VARSIZE_ANY_EXHDR(in1),
+ false);
PG_FREE_IF_COPY(in1, 0);
PG_FREE_IF_COPY(in2, 1);
typedef struct vbits
{
- BlockNumber next;
- BlockNumber count;
+ BlockNumber next;
+ BlockNumber count;
uint8 bits[FLEXIBLE_ARRAY_MEMBER];
} vbits;
if (SRF_IS_FIRSTCALL())
{
Oid relid = PG_GETARG_OID(0);
- MemoryContext oldcontext;
+ MemoryContext oldcontext;
funcctx = SRF_FIRSTCALL_INIT();
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
if (SRF_IS_FIRSTCALL())
{
Oid relid = PG_GETARG_OID(0);
- MemoryContext oldcontext;
+ MemoryContext oldcontext;
funcctx = SRF_FIRSTCALL_INIT();
oldcontext = MemoryContextSwitchTo(funcctx->multi_call_memory_ctx);
{
Oid relid = PG_GETARG_OID(0);
Relation rel;
- BlockNumber nblocks;
- BlockNumber blkno;
+ BlockNumber nblocks;
+ BlockNumber blkno;
Buffer vmbuffer = InvalidBuffer;
int64 all_visible = 0;
int64 all_frozen = 0;
collect_visibility_data(Oid relid, bool include_pd)
{
Relation rel;
- BlockNumber nblocks;
+ BlockNumber nblocks;
vbits *info;
- BlockNumber blkno;
+ BlockNumber blkno;
Buffer vmbuffer = InvalidBuffer;
- BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
+ BufferAccessStrategy bstrategy = GetAccessStrategy(BAS_BULKREAD);
rel = relation_open(relid, AccessShareLock);
nblocks = RelationGetNumberOfBlocks(rel);
- info = palloc0(offsetof(vbits, bits) + nblocks);
+ info = palloc0(offsetof(vbits, bits) +nblocks);
info->next = 0;
info->count = nblocks;
info->bits[blkno] |= (1 << 1);
/*
- * Page-level data requires reading every block, so only get it if
- * the caller needs it. Use a buffer access strategy, too, to prevent
+ * Page-level data requires reading every block, so only get it if the
+ * caller needs it. Use a buffer access strategy, too, to prevent
* cache-trashing.
*/
if (include_pd)
uint8 mode;
uint8 digest_algo;
uint8 salt[8];
- uint8 iter; /* encoded (one-octet) count */
+ uint8 iter; /* encoded (one-octet) count */
/* calculated: */
uint8 key[PGP_MAX_KEY];
uint8 key_len;
for (;;)
{
- PGresult *res;
+ PGresult *res;
while (PQisBusy(conn))
{
- int wc;
+ int wc;
/* Sleep until there's something to do */
wc = WaitLatchOrSocket(MyLatch,
/*
* If a command has been submitted to the remote server by
* using an asynchronous execution function, the command
- * might not have yet completed. Check to see if a command
- * is still being processed by the remote server, and if so,
- * request cancellation of the command.
+ * might not have yet completed. Check to see if a
+ * command is still being processed by the remote server,
+ * and if so, request cancellation of the command.
*/
if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE)
{
if (!PQcancel(cancel, errbuf, sizeof(errbuf)))
ereport(WARNING,
(errcode(ERRCODE_CONNECTION_FAILURE),
- errmsg("could not send cancel request: %s",
- errbuf)));
+ errmsg("could not send cancel request: %s",
+ errbuf)));
PQfreeCancel(cancel);
}
}
entry->have_error = true;
/*
- * If a command has been submitted to the remote server by using an
- * asynchronous execution function, the command might not have yet
- * completed. Check to see if a command is still being processed by
- * the remote server, and if so, request cancellation of the
- * command.
+ * If a command has been submitted to the remote server by using
+ * an asynchronous execution function, the command might not have
+ * yet completed. Check to see if a command is still being
+ * processed by the remote server, and if so, request cancellation
+ * of the command.
*/
if (PQtransactionStatus(entry->conn) == PQTRANS_ACTIVE)
{
/*
* All other system attributes are fetched as 0, except for table OID,
* which is fetched as the local table OID. However, we must be
- * careful; the table could be beneath an outer join, in which case
- * it must go to NULL whenever the rest of the row does.
+ * careful; the table could be beneath an outer join, in which case it
+ * must go to NULL whenever the rest of the row does.
*/
- Oid fetchval = 0;
+ Oid fetchval = 0;
if (varattno == TableOidAttributeNumber)
{
0 - FirstLowInvalidHeapAttributeNumber);
/*
- * In case the whole-row reference is under an outer join then it has to
- * go NULL whenver the rest of the row goes NULL. Deparsing a join query
- * would always involve multiple relations, thus qualify_col would be
- * true.
+ * In case the whole-row reference is under an outer join then it has
+ * to go NULL whenver the rest of the row goes NULL. Deparsing a join
+ * query would always involve multiple relations, thus qualify_col
+ * would be true.
*/
if (qualify_col)
{
/* Complete the CASE WHEN statement started above. */
if (qualify_col)
- appendStringInfo(buf," END");
+ appendStringInfo(buf, " END");
heap_close(rel, NoLock);
bms_free(attrs_used);
}
else if (strcmp(def->defname, "fetch_size") == 0)
{
- int fetch_size;
+ int fetch_size;
- fetch_size = strtol(defGetString(def), NULL,10);
+ fetch_size = strtol(defGetString(def), NULL, 10);
if (fetch_size <= 0)
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
/*
* Pull the other remote conditions from the joining relations into join
- * clauses or other remote clauses (remote_conds) of this relation wherever
- * possible. This avoids building subqueries at every join step, which is
- * not currently supported by the deparser logic.
+ * clauses or other remote clauses (remote_conds) of this relation
+ * wherever possible. This avoids building subqueries at every join step,
+ * which is not currently supported by the deparser logic.
*
* For an inner join, clauses from both the relations are added to the
- * other remote clauses. For LEFT and RIGHT OUTER join, the clauses from the
- * outer side are added to remote_conds since those can be evaluated after
- * the join is evaluated. The clauses from inner side are added to the
- * joinclauses, since they need to evaluated while constructing the join.
+ * other remote clauses. For LEFT and RIGHT OUTER join, the clauses from
+ * the outer side are added to remote_conds since those can be evaluated
+ * after the join is evaluated. The clauses from inner side are added to
+ * the joinclauses, since they need to evaluated while constructing the
+ * join.
*
- * For a FULL OUTER JOIN, the other clauses from either relation can not be
- * added to the joinclauses or remote_conds, since each relation acts as an
- * outer relation for the other. Consider such full outer join as
+ * For a FULL OUTER JOIN, the other clauses from either relation can not
+ * be added to the joinclauses or remote_conds, since each relation acts
+ * as an outer relation for the other. Consider such full outer join as
* unshippable because of the reasons mentioned above in this comment.
*
* The joining sides can not have local conditions, thus no need to test
ForeignServer *server;
UserMapping *user; /* only set in use_remote_estimate mode */
- int fetch_size; /* fetch size for this remote table */
+ int fetch_size; /* fetch size for this remote table */
/*
* Name of the relation while EXPLAINing ForeignScan. It is used for join
List *targetAttrs, List *returningList,
List **retrieved_attrs);
extern void deparseDirectUpdateSql(StringInfo buf, PlannerInfo *root,
- Index rtindex, Relation rel,
- List *targetlist,
- List *targetAttrs,
- List *remote_conds,
- List **params_list,
- List *returningList,
- List **retrieved_attrs);
+ Index rtindex, Relation rel,
+ List *targetlist,
+ List *targetAttrs,
+ List *remote_conds,
+ List **params_list,
+ List *returningList,
+ List **retrieved_attrs);
extern void deparseDeleteSql(StringInfo buf, PlannerInfo *root,
Index rtindex, Relation rel,
List *returningList,
List **retrieved_attrs);
extern void deparseDirectDeleteSql(StringInfo buf, PlannerInfo *root,
- Index rtindex, Relation rel,
- List *remote_conds,
- List **params_list,
- List *returningList,
- List **retrieved_attrs);
+ Index rtindex, Relation rel,
+ List *remote_conds,
+ List **params_list,
+ List *returningList,
+ List **retrieved_attrs);
extern void deparseAnalyzeSizeSql(StringInfo buf, Relation rel);
extern void deparseAnalyzeSql(StringInfo buf, Relation rel,
List **retrieved_attrs);
if (nid == NID_undef)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("unknown OpenSSL extension in certificate at position %d",
- call_cntr)));
+ errmsg("unknown OpenSSL extension in certificate at position %d",
+ call_cntr)));
values[0] = CStringGetTextDatum(OBJ_nid2sn(nid));
nulls[0] = false;
static bool pg_decode_filter(LogicalDecodingContext *ctx,
RepOriginId origin_id);
static void pg_decode_message(LogicalDecodingContext *ctx,
- ReorderBufferTXN *txn, XLogRecPtr message_lsn,
- bool transactional, const char *prefix,
- Size sz, const char *message);
+ ReorderBufferTXN *txn, XLogRecPtr message_lsn,
+ bool transactional, const char *prefix,
+ Size sz, const char *message);
void
_PG_init(void)
{
XLogRecPtr lsn = record->EndRecPtr;
Buffer buffer;
- BlockNumber regpgno;
+ BlockNumber regpgno;
Page page;
XLogRedoAction action;
"fillfactor",
"Packs table pages only to this percentage",
RELOPT_KIND_HEAP,
- ShareUpdateExclusiveLock /* since it applies only to later inserts */
+ ShareUpdateExclusiveLock /* since it applies only to later
+ * inserts */
},
HEAP_DEFAULT_FILLFACTOR, HEAP_MIN_FILLFACTOR, 100
},
"fillfactor",
"Packs btree index pages only to this percentage",
RELOPT_KIND_BTREE,
- ShareUpdateExclusiveLock /* since it applies only to later inserts */
+ ShareUpdateExclusiveLock /* since it applies only to later
+ * inserts */
},
BTREE_DEFAULT_FILLFACTOR, BTREE_MIN_FILLFACTOR, 100
},
"fillfactor",
"Packs hash index pages only to this percentage",
RELOPT_KIND_HASH,
- ShareUpdateExclusiveLock /* since it applies only to later inserts */
+ ShareUpdateExclusiveLock /* since it applies only to later
+ * inserts */
},
HASH_DEFAULT_FILLFACTOR, HASH_MIN_FILLFACTOR, 100
},
"fillfactor",
"Packs gist index pages only to this percentage",
RELOPT_KIND_GIST,
- ShareUpdateExclusiveLock /* since it applies only to later inserts */
+ ShareUpdateExclusiveLock /* since it applies only to later
+ * inserts */
},
GIST_DEFAULT_FILLFACTOR, GIST_MIN_FILLFACTOR, 100
},
"fillfactor",
"Packs spgist index pages only to this percentage",
RELOPT_KIND_SPGIST,
- ShareUpdateExclusiveLock /* since it applies only to later inserts */
+ ShareUpdateExclusiveLock /* since it applies only to later
+ * inserts */
},
SPGIST_DEFAULT_FILLFACTOR, SPGIST_MIN_FILLFACTOR, 100
},
LOCKMODE
AlterTableGetRelOptionsLockLevel(List *defList)
{
- LOCKMODE lockmode = NoLock;
- ListCell *cell;
+ LOCKMODE lockmode = NoLock;
+ ListCell *cell;
if (defList == NIL)
return AccessExclusiveLock;
foreach(cell, defList)
{
- DefElem *def = (DefElem *) lfirst(cell);
- int i;
+ DefElem *def = (DefElem *) lfirst(cell);
+ int i;
for (i = 0; relOpts[i]; i++)
{
int64 nDeletedHeapTuples = 0;
ginxlogDeleteListPages data;
Buffer buffers[GIN_NDELETE_AT_ONCE];
- BlockNumber freespace[GIN_NDELETE_AT_ONCE];
+ BlockNumber freespace[GIN_NDELETE_AT_ONCE];
data.ndeleted = 0;
while (data.ndeleted < GIN_NDELETE_AT_ONCE && blknoToDelete != newHead)
bool inVacuum = (stats == NULL);
/*
- * We would like to prevent concurrent cleanup process. For
- * that we will lock metapage in exclusive mode using LockPage()
- * call. Nobody other will use that lock for metapage, so
- * we keep possibility of concurrent insertion into pending list
+ * We would like to prevent concurrent cleanup process. For that we will
+ * lock metapage in exclusive mode using LockPage() call. Nobody other
+ * will use that lock for metapage, so we keep possibility of concurrent
+ * insertion into pending list
*/
if (inVacuum)
{
/*
- * We are called from [auto]vacuum/analyze or
- * gin_clean_pending_list() and we would like to wait
- * concurrent cleanup to finish.
+ * We are called from [auto]vacuum/analyze or gin_clean_pending_list()
+ * and we would like to wait concurrent cleanup to finish.
*/
LockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock);
workMemory =
(IsAutoVacuumWorkerProcess() && autovacuum_work_mem != -1) ?
- autovacuum_work_mem : maintenance_work_mem;
+ autovacuum_work_mem : maintenance_work_mem;
}
else
{
/*
- * We are called from regular insert and if we see
- * concurrent cleanup just exit in hope that concurrent
- * process will clean up pending list.
+ * We are called from regular insert and if we see concurrent cleanup
+ * just exit in hope that concurrent process will clean up pending
+ * list.
*/
if (!ConditionalLockPage(index, GIN_METAPAGE_BLKNO, ExclusiveLock))
return;
Assert(!GinPageIsDeleted(page));
/*
- * Are we walk through the page which as we remember was a tail when we
- * start our cleanup? But if caller asks us to clean up whole pending
- * list then ignore old tail, we will work until list becomes empty.
+ * Are we walk through the page which as we remember was a tail when
+ * we start our cleanup? But if caller asks us to clean up whole
+ * pending list then ignore old tail, we will work until list becomes
+ * empty.
*/
if (blkno == blknoFinish && full_clean == false)
cleanupFinish = true;
* locking */
/*
- * remove read pages from pending list, at this point all
- * content of read pages is in regular structure
+ * remove read pages from pending list, at this point all content
+ * of read pages is in regular structure
*/
shiftList(index, metabuffer, blkno, fill_fsm, stats);
ReleaseBuffer(metabuffer);
/*
- * As pending list pages can have a high churn rate, it is
- * desirable to recycle them immediately to the FreeSpace Map when
- * ordinary backends clean the list.
+ * As pending list pages can have a high churn rate, it is desirable to
+ * recycle them immediately to the FreeSpace Map when ordinary backends
+ * clean the list.
*/
if (fsm_vac && fill_fsm)
IndexFreeSpaceMapVacuum(index);
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("recovery is in progress"),
- errhint("GIN pending list cannot be cleaned up during recovery.")));
+ errhint("GIN pending list cannot be cleaned up during recovery.")));
/* Must be a GIN index */
if (indexRel->rd_rel->relkind != RELKIND_INDEX ||
&htup->t_self);
/* If we've maxed out our available memory, dump everything to the index */
- if (buildstate->accum.allocatedMemory >= (Size)maintenance_work_mem * 1024L)
+ if (buildstate->accum.allocatedMemory >= (Size) maintenance_work_mem * 1024L)
{
ItemPointerData *list;
Datum key;
{
/* Yes, so initialize stats to zeroes */
stats = (IndexBulkDeleteResult *) palloc0(sizeof(IndexBulkDeleteResult));
+
/*
- * and cleanup any pending inserts */
+ * and cleanup any pending inserts
+ */
ginInsertCleanup(&gvs.ginstate, !IsAutoVacuumWorkerProcess(),
false, stats);
}
gistvacuumpage(Relation rel, Page page, Buffer buffer)
{
OffsetNumber deletable[MaxIndexTuplesPerPage];
- int ndeletable = 0;
- OffsetNumber offnum, maxoff;
+ int ndeletable = 0;
+ OffsetNumber offnum,
+ maxoff;
Assert(GistPageIsLeaf(page));
static void
gistkillitems(IndexScanDesc scan)
{
- GISTScanOpaque so = (GISTScanOpaque) scan->opaque;
- Buffer buffer;
- Page page;
- OffsetNumber offnum;
- ItemId iid;
- int i;
- bool killedsomething = false;
+ GISTScanOpaque so = (GISTScanOpaque) scan->opaque;
+ Buffer buffer;
+ Page page;
+ OffsetNumber offnum;
+ ItemId iid;
+ int i;
+ bool killedsomething = false;
Assert(so->curBlkno != InvalidBlockNumber);
Assert(!XLogRecPtrIsInvalid(so->curPageLSN));
page = BufferGetPage(buffer);
/*
- * If page LSN differs it means that the page was modified since the last read.
- * killedItems could be not valid so LP_DEAD hints applying is not safe.
+ * If page LSN differs it means that the page was modified since the last
+ * read. killedItems could be not valid so LP_DEAD hints applying is not
+ * safe.
*/
- if(PageGetLSN(page) != so->curPageLSN)
+ if (PageGetLSN(page) != so->curPageLSN)
{
UnlockReleaseBuffer(buffer);
- so->numKilled = 0; /* reset counter */
+ so->numKilled = 0; /* reset counter */
return;
}
Assert(GistPageIsLeaf(page));
/*
- * Mark all killedItems as dead. We need no additional recheck,
- * because, if page was modified, pageLSN must have changed.
+ * Mark all killedItems as dead. We need no additional recheck, because,
+ * if page was modified, pageLSN must have changed.
*/
for (i = 0; i < so->numKilled; i++)
{
maxoff = PageGetMaxOffsetNumber(page);
for (i = FirstOffsetNumber; i <= maxoff; i = OffsetNumberNext(i))
{
- ItemId iid = PageGetItemId(page, i);
+ ItemId iid = PageGetItemId(page, i);
IndexTuple it;
bool match;
bool recheck;
* If the scan specifies not to return killed tuples, then we treat a
* killed tuple as not passing the qual.
*/
- if(scan->ignore_killed_tuples && ItemIdIsDead(iid))
+ if (scan->ignore_killed_tuples && ItemIdIsDead(iid))
continue;
it = (IndexTuple) PageGetItem(page, iid);
+
/*
* Must call gistindex_keytest in tempCxt, and clean up any leftover
* junk afterward.
if (so->killedItems == NULL)
{
MemoryContext oldCxt =
- MemoryContextSwitchTo(so->giststate->scanCxt);
+ MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage
- * sizeof(OffsetNumber));
+ * sizeof(OffsetNumber));
MemoryContextSwitchTo(oldCxt);
}
if (so->killedItems == NULL)
{
MemoryContext oldCxt =
- MemoryContextSwitchTo(so->giststate->scanCxt);
+ MemoryContextSwitchTo(so->giststate->scanCxt);
so->killedItems =
(OffsetNumber *) palloc(MaxIndexTuplesPerPage
- * sizeof(OffsetNumber));
+ * sizeof(OffsetNumber));
MemoryContextSwitchTo(oldCxt);
}
ScanKey skey = scan->keyData + i;
/*
- * Copy consistent support function to ScanKey structure
- * instead of function implementing filtering operator.
+ * Copy consistent support function to ScanKey structure instead
+ * of function implementing filtering operator.
*/
fmgr_info_copy(&(skey->sk_func),
&(so->giststate->consistentFn[skey->sk_attno - 1]),
so->orderByTypes[i] = get_func_rettype(skey->sk_func.fn_oid);
/*
- * Copy distance support function to ScanKey structure
- * instead of function implementing ordering operator.
+ * Copy distance support function to ScanKey structure instead of
+ * function implementing ordering operator.
*/
fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt);
{
BlockNumber page = InvalidBlockNumber;
BlockNumber sync_startpage = InvalidBlockNumber;
- BlockNumber report_page = InvalidBlockNumber;
+ BlockNumber report_page = InvalidBlockNumber;
ParallelHeapScanDesc parallel_scan;
Assert(scan->rs_parallel);
RelationAddExtraBlocks(Relation relation, BulkInsertState bistate)
{
Page page;
- BlockNumber blockNum = InvalidBlockNumber,
+ BlockNumber blockNum = InvalidBlockNumber,
firstBlock = InvalidBlockNumber;
int extraBlocks = 0;
int lockWaiters = 0;
return;
/*
- * It might seem like multiplying the number of lock waiters by as much
- * as 20 is too aggressive, but benchmarking revealed that smaller numbers
- * were insufficient. 512 is just an arbitrary cap to prevent pathological
- * results.
+ * It might seem like multiplying the number of lock waiters by as much as
+ * 20 is too aggressive, but benchmarking revealed that smaller numbers
+ * were insufficient. 512 is just an arbitrary cap to prevent
+ * pathological results.
*/
extraBlocks = Min(512, lockWaiters * 20);
}
/*
- * Updating the upper levels of the free space map is too expensive
- * to do for every block, but it's worth doing once at the end to make
- * sure that subsequent insertion activity sees all of those nifty free
- * pages we just inserted.
+ * Updating the upper levels of the free space map is too expensive to do
+ * for every block, but it's worth doing once at the end to make sure that
+ * subsequent insertion activity sees all of those nifty free pages we
+ * just inserted.
*
* Note that we're using the freespace value that was reported for the
* last block we added as if it were the freespace value for every block
}
/*
- * In addition to whatever extension we performed above, we always add
- * at least one block to satisfy our own request.
+ * In addition to whatever extension we performed above, we always add at
+ * least one block to satisfy our own request.
*
* XXX This does an lseek - rather expensive - but at the moment it is the
* only way to accurately determine how many blocks are in a relation. Is
OldestXmin = RecentGlobalXmin;
else
OldestXmin =
- TransactionIdLimitedForOldSnapshots(RecentGlobalDataXmin,
- relation);
+ TransactionIdLimitedForOldSnapshots(RecentGlobalDataXmin,
+ relation);
Assert(TransactionIdIsValid(OldestXmin));
uint32 mapByte = HEAPBLK_TO_MAPBYTE(heapBlk);
uint8 mapOffset = HEAPBLK_TO_OFFSET(heapBlk);
Page page;
- uint8 *map;
+ uint8 *map;
#ifdef TRACE_VISIBILITYMAP
elog(DEBUG1, "vm_set %s %d", RelationGetRelationName(rel), heapBlk);
elog(ERROR, "wrong VM buffer passed to visibilitymap_set");
page = BufferGetPage(vmBuf);
- map = (uint8 *)PageGetContents(page);
+ map = (uint8 *) PageGetContents(page);
LockBuffer(vmBuf, BUFFER_LOCK_EXCLUSIVE);
if (flags != (map[mapByte] >> mapOffset & VISIBILITYMAP_VALID_BITS))
* Check for a conflict-in as we would if we were going to
* write to this page. We aren't actually going to write,
* but we want a chance to report SSI conflicts that would
- * otherwise be masked by this unique constraint violation.
+ * otherwise be masked by this unique constraint
+ * violation.
*/
CheckForSerializableConflictIn(rel, NULL, buf);
/*
* Check to see if we need to issue one final WAL record for this index,
- * which may be needed for correctness on a hot standby node when
- * non-MVCC index scans could take place.
+ * which may be needed for correctness on a hot standby node when non-MVCC
+ * index scans could take place.
*
* If the WAL is replayed in hot standby, the replay process needs to get
* cleanup locks on all index leaf pages, just as we've been doing here.
if (ndeletable > 0)
{
/*
- * Notice that the issued XLOG_BTREE_VACUUM WAL record includes all
- * information to the replay code to allow it to get a cleanup lock
- * on all pages between the previous lastBlockVacuumed and this page.
- * This ensures that WAL replay locks all leaf pages at some point,
- * which is important should non-MVCC scans be requested.
- * This is currently unused on standby, but we record it anyway, so
- * that the WAL contains the required information.
+ * Notice that the issued XLOG_BTREE_VACUUM WAL record includes
+ * all information to the replay code to allow it to get a cleanup
+ * lock on all pages between the previous lastBlockVacuumed and
+ * this page. This ensures that WAL replay locks all leaf pages at
+ * some point, which is important should non-MVCC scans be
+ * requested. This is currently unused on standby, but we record
+ * it anyway, so that the WAL contains the required information.
*
* Since we can visit leaf pages out-of-order when recursing,
* replay might end up locking such pages an extra time, but it
xl_btree_vacuum *xlrec = (xl_btree_vacuum *) XLogRecGetData(record);
/*
- * This section of code is thought to be no longer needed, after
- * analysis of the calling paths. It is retained to allow the code
- * to be reinstated if a flaw is revealed in that thinking.
+ * This section of code is thought to be no longer needed, after analysis
+ * of the calling paths. It is retained to allow the code to be reinstated
+ * if a flaw is revealed in that thinking.
*
* If we are running non-MVCC scans using this index we need to do some
* additional work to ensure correctness, which is known as a "pin scan"
* described in more detail in next paragraphs. We used to do the extra
- * work in all cases, whereas we now avoid that work in most cases.
- * If lastBlockVacuumed is set to InvalidBlockNumber then we skip the
+ * work in all cases, whereas we now avoid that work in most cases. If
+ * lastBlockVacuumed is set to InvalidBlockNumber then we skip the
* additional work required for the pin scan.
*
* Avoiding this extra work is important since it requires us to touch
while (ptr < end)
{
- OffsetNumber offset,
- length;
+ OffsetNumber offset,
+ length;
memcpy(&offset, ptr, sizeof(offset));
ptr += sizeof(offset);
xl_logical_message *xlrec = (xl_logical_message *) rec;
appendStringInfo(buf, "%s message size %zu bytes",
- xlrec->transactional ? "transactional" : "nontransactional",
+ xlrec->transactional ? "transactional" : "nontransactional",
xlrec->message_size);
}
}
Oid dbId, Oid tsId,
bool relcacheInitFileInval)
{
- int i;
+ int i;
if (relcacheInitFileInval)
appendStringInfo(buf, "; relcache init file inval dbid %u tsid %u",
if (parsed.nmsgs > 0)
{
standby_desc_invalidations(
- buf, parsed.nmsgs, parsed.msgs, parsed.dbId, parsed.tsId,
- XactCompletionRelcacheInitFileInval(parsed.xinfo));
+ buf, parsed.nmsgs, parsed.msgs, parsed.dbId, parsed.tsId,
+ XactCompletionRelcacheInitFileInval(parsed.xinfo));
}
if (XactCompletionForceSyncCommit(parsed.xinfo))
const struct config_enum_entry wal_level_options[] = {
{"minimal", WAL_LEVEL_MINIMAL, false},
{"replica", WAL_LEVEL_REPLICA, false},
- {"archive", WAL_LEVEL_REPLICA, true}, /* deprecated */
- {"hot_standby", WAL_LEVEL_REPLICA, true}, /* deprecated */
+ {"archive", WAL_LEVEL_REPLICA, true}, /* deprecated */
+ {"hot_standby", WAL_LEVEL_REPLICA, true}, /* deprecated */
{"logical", WAL_LEVEL_LOGICAL, false},
{NULL, 0, false}
};
{
TransactionId xidLastCommit;
CommitTimestampEntry dataLastCommit;
- bool commitTsActive;
+ bool commitTsActive;
} CommitTimestampShared;
CommitTimestampShared *commitTsShared;
* No-op if the module is not active.
*
* An unlocked read here is fine, because in a standby (the only place
- * where the flag can change in flight) this routine is only called by
- * the recovery process, which is also the only process which can change
- * the flag.
+ * where the flag can change in flight) this routine is only called by the
+ * recovery process, which is also the only process which can change the
+ * flag.
*/
if (!commitTsShared->commitTsActive)
return;
int pageno;
/*
- * Nothing to do if module not enabled. Note we do an unlocked read of the
- * flag here, which is okay because this routine is only called from
+ * Nothing to do if module not enabled. Note we do an unlocked read of
+ * the flag here, which is okay because this routine is only called from
* GetNewTransactionId, which is never called in a standby.
*/
Assert(!InRecovery);
{
LWLockAcquire(CommitTsLock, LW_EXCLUSIVE);
if (ShmemVariableCache->oldestCommitTsXid != InvalidTransactionId &&
- TransactionIdPrecedes(ShmemVariableCache->oldestCommitTsXid, oldestXact))
+ TransactionIdPrecedes(ShmemVariableCache->oldestCommitTsXid, oldestXact))
ShmemVariableCache->oldestCommitTsXid = oldestXact;
LWLockRelease(CommitTsLock);
}
Buffer buffer; /* registered buffer */
int flags; /* flags for this buffer */
int deltaLen; /* space consumed in delta field */
- char *image; /* copy of page image for modification,
- * do not do it in-place to have aligned
- * memory chunk */
+ char *image; /* copy of page image for modification, do not
+ * do it in-place to have aligned memory chunk */
char delta[MAX_DELTA_SIZE]; /* delta between page images */
} PageData;
char *oldest_datname = get_database_name(oldest_datoid);
/*
- * Immediately kick autovacuum into action as we're already
- * in ERROR territory.
+ * Immediately kick autovacuum into action as we're already in
+ * ERROR territory.
*/
SendPostmasterSignal(PMSIGNAL_START_AUTOVAC_LAUNCHER);
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg_plural("database with OID %u must be vacuumed before %d more multixact member is used",
"database with OID %u must be vacuumed before %d more multixact members are used",
- MultiXactState->offsetStopLimit - nextOffset + nmembers,
- MultiXactState->oldestMultiXactDB,
+ MultiXactState->offsetStopLimit - nextOffset + nmembers,
+ MultiXactState->oldestMultiXactDB,
MultiXactState->offsetStopLimit - nextOffset + nmembers),
errhint("Execute a database-wide VACUUM in that database with reduced vacuum_multixact_freeze_min_age and vacuum_multixact_freeze_table_age settings.")));
nworkers = 0;
/*
- * If we are running under serializable isolation, we can't use
- * parallel workers, at least not until somebody enhances that mechanism
- * to be parallel-aware.
+ * If we are running under serializable isolation, we can't use parallel
+ * workers, at least not until somebody enhances that mechanism to be
+ * parallel-aware.
*/
if (IsolationIsSerializable())
nworkers = 0;
}
/*
- * We can't finish transaction commit or abort until all of the
- * workers have exited. This means, in particular, that we can't respond
- * to interrupts at this stage.
+ * We can't finish transaction commit or abort until all of the workers
+ * have exited. This means, in particular, that we can't respond to
+ * interrupts at this stage.
*/
HOLD_INTERRUPTS();
WaitForParallelWorkersToExit(pcxt);
if (toc == NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("invalid magic number in dynamic shared memory segment")));
+ errmsg("invalid magic number in dynamic shared memory segment")));
/* Look up fixed parallel state. */
fps = shm_toc_lookup(toc, PARALLEL_KEY_FIXED);
*/
/*
- * Join locking group. We must do this before anything that could try
- * to acquire a heavyweight lock, because any heavyweight locks acquired
- * to this point could block either directly against the parallel group
+ * Join locking group. We must do this before anything that could try to
+ * acquire a heavyweight lock, because any heavyweight locks acquired to
+ * this point could block either directly against the parallel group
* leader or against some process which in turn waits for a lock that
* conflicts with the parallel group leader, causing an undetected
* deadlock. (If we can't join the lock group, the leader has gone away,
sz += MAXALIGN(nslots * sizeof(bool)); /* page_dirty[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_number[] */
sz += MAXALIGN(nslots * sizeof(int)); /* page_lru_count[] */
- sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */
+ sz += MAXALIGN(nslots * sizeof(LWLockPadded)); /* buffer_locks[] */
if (nlsns > 0)
sz += MAXALIGN(nslots * nlsns * sizeof(XLogRecPtr)); /* group_lsn[] */
for (slotno = 0; slotno < nslots; slotno++)
{
LWLockInitialize(&shared->buffer_locks[slotno].lock,
- shared->lwlock_tranche_id);
+ shared->lwlock_tranche_id);
shared->page_buffer[slotno] = ptr;
shared->page_status[slotno] = SLRU_PAGE_EMPTY;
startPage++;
/* must account for wraparound */
if (startPage > TransactionIdToPage(MaxTransactionId))
- startPage=0;
+ startPage = 0;
}
(void) ZeroSUBTRANSPage(startPage);
TimestampTz prepared_at; /* time of preparation */
/*
- * Note that we need to keep track of two LSNs for each GXACT.
- * We keep track of the start LSN because this is the address we must
- * use to read state data back from WAL when committing a prepared GXACT.
- * We keep track of the end LSN because that is the LSN we need to wait
- * for prior to commit.
+ * Note that we need to keep track of two LSNs for each GXACT. We keep
+ * track of the start LSN because this is the address we must use to read
+ * state data back from WAL when committing a prepared GXACT. We keep
+ * track of the end LSN because that is the LSN we need to wait for prior
+ * to commit.
*/
- XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */
+ XLogRecPtr prepare_start_lsn; /* XLOG offset of prepare record start */
XLogRecPtr prepare_end_lsn; /* XLOG offset of prepare record end */
Oid owner; /* ID of user that executed the xact */
hdr.nabortrels = smgrGetPendingDeletes(false, &abortrels);
hdr.ninvalmsgs = xactGetCommittedInvalidationMessages(&invalmsgs,
&hdr.initfileinval);
- hdr.gidlen = strlen(gxact->gid) + 1; /* Include '\0' */
+ hdr.gidlen = strlen(gxact->gid) + 1; /* Include '\0' */
save_state_data(&hdr, sizeof(TwoPhaseFileHeader));
save_state_data(gxact->gid, hdr.gidlen);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of memory"),
- errdetail("Failed while allocating an XLog reading processor.")));
+ errdetail("Failed while allocating an XLog reading processor.")));
record = XLogReadRecord(xlogreader, lsn, &errormsg);
if (record == NULL)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("could not read two-phase state from xlog at %X/%X",
- (uint32) (lsn >> 32),
- (uint32) lsn)));
+ (uint32) (lsn >> 32),
+ (uint32) lsn)));
if (XLogRecGetRmid(xlogreader) != RM_XACT_ID ||
(XLogRecGetInfo(xlogreader) & XLOG_XACT_OPMASK) != XLOG_XACT_PREPARE)
ereport(ERROR,
(errcode_for_file_access(),
errmsg("expected two-phase state data is not present in xlog at %X/%X",
- (uint32) (lsn >> 32),
- (uint32) lsn)));
+ (uint32) (lsn >> 32),
+ (uint32) lsn)));
if (len != NULL)
*len = XLogRecGetDataLen(xlogreader);
- *buf = palloc(sizeof(char)*XLogRecGetDataLen(xlogreader));
+ *buf = palloc(sizeof(char) * XLogRecGetDataLen(xlogreader));
memcpy(*buf, XLogRecGetData(xlogreader), sizeof(char) * XLogRecGetDataLen(xlogreader));
XLogReaderFree(xlogreader);
xid = pgxact->xid;
/*
- * Read and validate 2PC state data.
- * State data will typically be stored in WAL files if the LSN is after the
- * last checkpoint record, or moved to disk if for some reason they have
- * lived for a long time.
+ * Read and validate 2PC state data. State data will typically be stored
+ * in WAL files if the LSN is after the last checkpoint record, or moved
+ * to disk if for some reason they have lived for a long time.
*/
if (gxact->ondisk)
buf = ReadTwoPhaseFile(xid, true);
TRACE_POSTGRESQL_TWOPHASE_CHECKPOINT_START();
/*
- * We are expecting there to be zero GXACTs that need to be
- * copied to disk, so we perform all I/O while holding
- * TwoPhaseStateLock for simplicity. This prevents any new xacts
- * from preparing while this occurs, which shouldn't be a problem
- * since the presence of long-lived prepared xacts indicates the
- * transaction manager isn't active.
+ * We are expecting there to be zero GXACTs that need to be copied to
+ * disk, so we perform all I/O while holding TwoPhaseStateLock for
+ * simplicity. This prevents any new xacts from preparing while this
+ * occurs, which shouldn't be a problem since the presence of long-lived
+ * prepared xacts indicates the transaction manager isn't active.
*
- * It's also possible to move I/O out of the lock, but on
- * every error we should check whether somebody committed our
- * transaction in different backend. Let's leave this optimisation
- * for future, if somebody will spot that this place cause
- * bottleneck.
+ * It's also possible to move I/O out of the lock, but on every error we
+ * should check whether somebody committed our transaction in different
+ * backend. Let's leave this optimisation for future, if somebody will
+ * spot that this place cause bottleneck.
*
- * Note that it isn't possible for there to be a GXACT with
- * a prepare_end_lsn set prior to the last checkpoint yet
- * is marked invalid, because of the efforts with delayChkpt.
+ * Note that it isn't possible for there to be a GXACT with a
+ * prepare_end_lsn set prior to the last checkpoint yet is marked invalid,
+ * because of the efforts with delayChkpt.
*/
LWLockAcquire(TwoPhaseStateLock, LW_SHARED);
for (i = 0; i < TwoPhaseState->numPrepXacts; i++)
gxact->prepare_end_lsn <= redo_horizon)
{
char *buf;
- int len;
+ int len;
XlogReadTwoPhaseData(gxact->prepare_start_lsn, &buf, &len);
RecreateTwoPhaseFile(pgxact->xid, buf, len);
TwoPhaseFileHeader *hdr;
TransactionId *subxids;
GlobalTransaction gxact;
- const char *gid;
+ const char *gid;
int i;
xid = (TransactionId) strtoul(clde->d_name, NULL, 16);
/*
* Transactions without an assigned xid can contain invalidation
* messages (e.g. explicit relcache invalidations or catcache
- * invalidations for inplace updates); standbys need to process
- * those. We can't emit a commit record without an xid, and we don't
- * want to force assigning an xid, because that'd be problematic for
- * e.g. vacuum. Hence we emit a bespoke record for the
- * invalidations. We don't want to use that in case a commit record is
- * emitted, so they happen synchronously with commits (besides not
- * wanting to emit more WAL recoreds).
+ * invalidations for inplace updates); standbys need to process those.
+ * We can't emit a commit record without an xid, and we don't want to
+ * force assigning an xid, because that'd be problematic for e.g.
+ * vacuum. Hence we emit a bespoke record for the invalidations. We
+ * don't want to use that in case a commit record is emitted, so they
+ * happen synchronously with commits (besides not wanting to emit more
+ * WAL recoreds).
*/
if (nmsgs != 0)
{
LogStandbyInvalidations(nmsgs, invalMessages,
RelcacheInitFileInval);
- wrote_xlog = true; /* not strictly necessary */
+ wrote_xlog = true; /* not strictly necessary */
}
/*
* this case, but we don't currently try to do that. It would certainly
* cause problems at least in Hot Standby mode, where the
* KnownAssignedXids machinery requires tracking every XID assignment. It
- * might be OK to skip it only when wal_level < replica, but for now
- * we don't.)
+ * might be OK to skip it only when wal_level < replica, but for now we
+ * don't.)
*
* However, if we're doing cleanup of any non-temp rels or committing any
* command that wanted to force sync commit, then we must flush XLOG
/*
* If asked by the primary (because someone is waiting for a synchronous
- * commit = remote_apply), we will need to ask walreceiver to send a
- * reply immediately.
+ * commit = remote_apply), we will need to ask walreceiver to send a reply
+ * immediately.
*/
if (XactCompletionApplyFeedback(parsed->xinfo))
XLogRequestWalReceiverReply();
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
- "recovery_target_action",
- item->value),
+ errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
+ "recovery_target_action",
+ item->value),
errhint("Valid values are \"pause\", \"promote\", and \"shutdown\".")));
ereport(DEBUG2,
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
- "recovery_target",
- item->value),
+ errmsg("invalid value for recovery parameter \"%s\": \"%s\"",
+ "recovery_target",
+ item->value),
errhint("The only allowed value is \"immediate\".")));
ereport(DEBUG2,
(errmsg_internal("recovery_target = '%s'",
}
/*
- * For Hot Standby, the WAL must be generated with 'replica' mode, and
- * we must have at least as many backend slots as the primary.
+ * For Hot Standby, the WAL must be generated with 'replica' mode, and we
+ * must have at least as many backend slots as the primary.
*/
if (ArchiveRecoveryRequested && EnableHotStandby)
{
* is no use of such file. There is no harm in retaining it, but it
* is better to get rid of the map file so that we don't have any
* redundant file in data directory and it will avoid any sort of
- * confusion. It seems prudent though to just rename the file out
- * of the way rather than delete it completely, also we ignore any
- * error that occurs in rename operation as even if map file is
- * present without backup_label file, it is harmless.
+ * confusion. It seems prudent though to just rename the file out of
+ * the way rather than delete it completely, also we ignore any error
+ * that occurs in rename operation as even if map file is present
+ * without backup_label file, it is harmless.
*/
if (stat(TABLESPACE_MAP, &st) == 0)
{
unlink(TABLESPACE_MAP_OLD);
if (durable_rename(TABLESPACE_MAP, TABLESPACE_MAP_OLD, DEBUG1) == 0)
ereport(LOG,
- (errmsg("ignoring file \"%s\" because no file \"%s\" exists",
- TABLESPACE_MAP, BACKUP_LABEL_FILE),
- errdetail("File \"%s\" was renamed to \"%s\".",
- TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
+ (errmsg("ignoring file \"%s\" because no file \"%s\" exists",
+ TABLESPACE_MAP, BACKUP_LABEL_FILE),
+ errdetail("File \"%s\" was renamed to \"%s\".",
+ TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
else
ereport(LOG,
- (errmsg("ignoring file \"%s\" because no file \"%s\" exists",
- TABLESPACE_MAP, BACKUP_LABEL_FILE),
- errdetail("Could not rename file \"%s\" to \"%s\": %m.",
- TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
+ (errmsg("ignoring file \"%s\" because no file \"%s\" exists",
+ TABLESPACE_MAP, BACKUP_LABEL_FILE),
+ errdetail("Could not rename file \"%s\" to \"%s\": %m.",
+ TABLESPACE_MAP, TABLESPACE_MAP_OLD)));
}
/*
ereport(DEBUG1,
(errmsg_internal("redo record is at %X/%X; shutdown %s",
(uint32) (checkPoint.redo >> 32), (uint32) checkPoint.redo,
- wasShutdown ? "TRUE" : "FALSE")));
+ wasShutdown ? "TRUE" : "FALSE")));
ereport(DEBUG1,
(errmsg_internal("next transaction ID: %u:%u; next OID: %u",
- checkPoint.nextXidEpoch, checkPoint.nextXid,
- checkPoint.nextOid)));
+ checkPoint.nextXidEpoch, checkPoint.nextXid,
+ checkPoint.nextOid)));
ereport(DEBUG1,
(errmsg_internal("next MultiXactId: %u; next MultiXactOffset: %u",
- checkPoint.nextMulti, checkPoint.nextMultiOffset)));
+ checkPoint.nextMulti, checkPoint.nextMultiOffset)));
ereport(DEBUG1,
- (errmsg_internal("oldest unfrozen transaction ID: %u, in database %u",
- checkPoint.oldestXid, checkPoint.oldestXidDB)));
+ (errmsg_internal("oldest unfrozen transaction ID: %u, in database %u",
+ checkPoint.oldestXid, checkPoint.oldestXidDB)));
ereport(DEBUG1,
(errmsg_internal("oldest MultiXactId: %u, in database %u",
- checkPoint.oldestMulti, checkPoint.oldestMultiDB)));
+ checkPoint.oldestMulti, checkPoint.oldestMultiDB)));
ereport(DEBUG1,
(errmsg_internal("commit timestamp Xid oldest/newest: %u/%u",
- checkPoint.oldestCommitTsXid,
- checkPoint.newestCommitTsXid)));
+ checkPoint.oldestCommitTsXid,
+ checkPoint.newestCommitTsXid)));
if (!TransactionIdIsNormal(checkPoint.nextXid))
ereport(PANIC,
(errmsg("invalid next transaction ID")));
SpinLockRelease(&XLogCtl->info_lck);
/*
- * If rm_redo called XLogRequestWalReceiverReply, then we
- * wake up the receiver so that it notices the updated
+ * If rm_redo called XLogRequestWalReceiverReply, then we wake
+ * up the receiver so that it notices the updated
* lastReplayedEndRecPtr and sends a reply to the master.
*/
if (doRequestWalReceiverReply)
MemoryContext oldcontext;
/*
- * Label file and tablespace map file need to be long-lived, since they
- * are read in pg_stop_backup.
+ * Label file and tablespace map file need to be long-lived, since
+ * they are read in pg_stop_backup.
*/
oldcontext = MemoryContextSwitchTo(TopMemoryContext);
label_file = makeStringInfo();
MemoryContextSwitchTo(oldcontext);
startpoint = do_pg_start_backup(backupidstr, fast, NULL, label_file,
- dir, NULL, tblspc_map_file, false, true);
+ dir, NULL, tblspc_map_file, false, true);
nonexclusive_backup_running = true;
before_shmem_exit(nonexclusive_base_backup_cleanup, (Datum) 0);
* Note: different from CancelBackup which just cancels online backup mode.
*
* Note: this version is only called to stop an exclusive backup. The function
- * pg_stop_backup_v2 (overloaded as pg_stop_backup in SQL) is called to
- * stop non-exclusive backups.
+ * pg_stop_backup_v2 (overloaded as pg_stop_backup in SQL) is called to
+ * stop non-exclusive backups.
*
* Permission checking for this function is managed through the normal
* GRANT system.
errhint("Did you mean to use pg_stop_backup('f')?")));
/*
- * Exclusive backups were typically started in a different connection,
- * so don't try to verify that exclusive_backup_running is set in this one.
- * Actual verification that an exclusive backup is in fact running is handled
- * inside do_pg_stop_backup.
+ * Exclusive backups were typically started in a different connection, so
+ * don't try to verify that exclusive_backup_running is set in this one.
+ * Actual verification that an exclusive backup is in fact running is
+ * handled inside do_pg_stop_backup.
*/
stoppoint = do_pg_stop_backup(NULL, true, NULL);
Datum
pg_stop_backup_v2(PG_FUNCTION_ARGS)
{
- ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
- TupleDesc tupdesc;
+ ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo;
+ TupleDesc tupdesc;
Tuplestorestate *tupstore;
- MemoryContext per_query_ctx;
- MemoryContext oldcontext;
- Datum values[3];
- bool nulls[3];
+ MemoryContext per_query_ctx;
+ MemoryContext oldcontext;
+ Datum values[3];
+ bool nulls[3];
- bool exclusive = PG_GETARG_BOOL(0);
- XLogRecPtr stoppoint;
+ bool exclusive = PG_GETARG_BOOL(0);
+ XLogRecPtr stoppoint;
/* check to see if caller supports us returning a tuplestore */
if (rsinfo == NULL || !IsA(rsinfo, ReturnSetInfo))
errhint("Did you mean to use pg_stop_backup('t')?")));
/*
- * Stop the non-exclusive backup. Return a copy of the backup
- * label and tablespace map so they can be written to disk by
- * the caller.
+ * Stop the non-exclusive backup. Return a copy of the backup label
+ * and tablespace map so they can be written to disk by the caller.
*/
stoppoint = do_pg_stop_backup(label_file->data, true, NULL);
nonexclusive_backup_running = false;
}
/* Stoppoint is included on both exclusive and nonexclusive backups */
- values[0] = LSNGetDatum(stoppoint);
+ values[0] = LSNGetDatum(stoppoint);
tuplestore_putvalues(tupstore, tupdesc, values, nulls);
tuplestore_donestoring(typstore);
if (total_len < SizeOfXLogRecord)
{
report_invalid_record(state,
- "invalid record length at %X/%X: wanted %u, got %u",
+ "invalid record length at %X/%X: wanted %u, got %u",
(uint32) (RecPtr >> 32), (uint32) RecPtr,
(uint32) SizeOfXLogRecord, total_len);
goto err;
if (record->xl_tot_len < SizeOfXLogRecord)
{
report_invalid_record(state,
- "invalid record length at %X/%X: wanted %u, got %u",
+ "invalid record length at %X/%X: wanted %u, got %u",
(uint32) (RecPtr >> 32), (uint32) RecPtr,
(uint32) SizeOfXLogRecord, record->xl_tot_len);
return false;
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("unrecognized default ACL object type %c", objtype),
- errhint("Valid object types are \"r\", \"S\", \"f\", and \"T\".")));
+ errhint("Valid object types are \"r\", \"S\", \"f\", and \"T\".")));
}
/*
Form_pg_proc proc;
Oid transfn;
Oid finalfn = InvalidOid; /* can be omitted */
- Oid combinefn = InvalidOid; /* can be omitted */
+ Oid combinefn = InvalidOid; /* can be omitted */
Oid serialfn = InvalidOid; /* can be omitted */
- Oid deserialfn = InvalidOid; /* can be omitted */
+ Oid deserialfn = InvalidOid; /* can be omitted */
Oid mtransfn = InvalidOid; /* can be omitted */
Oid minvtransfn = InvalidOid; /* can be omitted */
Oid mfinalfn = InvalidOid; /* can be omitted */
/* handle the combinefn, if supplied */
if (aggcombinefnName)
{
- Oid combineType;
+ Oid combineType;
/*
- * Combine function must have 2 argument, each of which is the
- * trans type
+ * Combine function must have 2 argument, each of which is the trans
+ * type
*/
fnArgs[0] = aggTransType;
fnArgs[1] = aggTransType;
if (combineType != aggTransType)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("return type of combine function %s is not %s",
- NameListToString(aggcombinefnName),
- format_type_be(aggTransType))));
+ errmsg("return type of combine function %s is not %s",
+ NameListToString(aggcombinefnName),
+ format_type_be(aggTransType))));
/*
* A combine function to combine INTERNAL states must accept nulls and
}
/*
- * Validate the serialization function, if present. We must ensure that the
- * return type of this function is the same as the specified serialType.
+ * Validate the serialization function, if present. We must ensure that
+ * the return type of this function is the same as the specified
+ * serialType.
*/
if (aggserialfnName)
{
if (rettype != aggSerialType)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("return type of serialization function %s is not %s",
- NameListToString(aggserialfnName),
- format_type_be(aggSerialType))));
+ errmsg("return type of serialization function %s is not %s",
+ NameListToString(aggserialfnName),
+ format_type_be(aggSerialType))));
}
/*
if (rettype != aggTransType)
ereport(ERROR,
(errcode(ERRCODE_DATATYPE_MISMATCH),
- errmsg("return type of deserialization function %s is not %s",
- NameListToString(aggdeserialfnName),
- format_type_be(aggTransType))));
+ errmsg("return type of deserialization function %s is not %s",
+ NameListToString(aggdeserialfnName),
+ format_type_be(aggTransType))));
}
/*
/*
* There's little point in having a serialization/deserialization
* function on aggregates that don't have an internal state, so let's
- * just disallow this as it may help clear up any confusion or needless
- * authoring of these functions.
+ * just disallow this as it may help clear up any confusion or
+ * needless authoring of these functions.
*/
if (transTypeId != INTERNALOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("a serialization type must only be specified when the aggregate transition data type is %s",
- format_type_be(INTERNALOID))));
+ format_type_be(INTERNALOID))));
serialTypeId = typenameTypeId(NULL, serialType);
/*
* We disallow INTERNAL serialType as the whole point of the
- * serialized types is to allow the aggregate state to be output,
- * and we cannot output INTERNAL. This check, combined with the one
- * above ensures that the trans type and serialization type are not the
+ * serialized types is to allow the aggregate state to be output, and
+ * we cannot output INTERNAL. This check, combined with the one above
+ * ensures that the trans type and serialization type are not the
* same.
*/
if (serialTypeId == INTERNALOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("aggregate serialization data type cannot be %s",
+ errmsg("aggregate serialization data type cannot be %s",
format_type_be(serialTypeId))));
/*
*/
if (serialfuncName != NIL)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("must specify serialization type when specifying serialization function")));
+ (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
+ errmsg("must specify serialization type when specifying serialization function")));
/* likewise for the deserialization function */
if (deserialfuncName != NIL)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
- errmsg("must specify serialization type when specifying deserialization function")));
+ (errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
+ errmsg("must specify serialization type when specifying deserialization function")));
}
/*
mfinalfuncExtraArgs,
sortoperatorName, /* sort operator name */
transTypeId, /* transition data type */
- serialTypeId, /* serialization data type */
+ serialTypeId, /* serialization data type */
transSpace, /* transition space */
mtransTypeId, /* transition data type */
mtransSpace, /* transition space */
ObjectAddress
ExecAlterObjectDependsStmt(AlterObjectDependsStmt *stmt, ObjectAddress *refAddress)
{
- ObjectAddress address;
- ObjectAddress refAddr;
- Relation rel;
+ ObjectAddress address;
+ ObjectAddress refAddr;
+ Relation rel;
address =
get_object_address_rv(stmt->objectType, stmt->relation, stmt->objname,
- stmt->objargs, &rel, AccessExclusiveLock, false);
+ stmt->objargs, &rel, AccessExclusiveLock, false);
/*
- * If a relation was involved, it would have been opened and locked.
- * We don't need the relation here, but we'll retain the lock until
- * commit.
+ * If a relation was involved, it would have been opened and locked. We
+ * don't need the relation here, but we'll retain the lock until commit.
*/
if (rel)
heap_close(rel, NoLock);
oldNspOid = DatumGetObjectId(namespace);
/*
- * If the object is already in the correct namespace, we don't need
- * to do anything except fire the object access hook.
+ * If the object is already in the correct namespace, we don't need to do
+ * anything except fire the object access hook.
*/
if (oldNspOid == nspOid)
{
/*
* get_am_type_oid
- * Worker for various get_am_*_oid variants
+ * Worker for various get_am_*_oid variants
*
* If missing_ok is false, throw an error if access method not found. If
* true, just return InvalidOid.
/*
* get_am_oid - given an access method name, look up its OID.
- * The type is not checked.
+ * The type is not checked.
*/
Oid
get_am_oid(const char *amname, bool missing_ok)
*/
if (!inh)
{
- BlockNumber relallvisible;
+ BlockNumber relallvisible;
visibilitymap_count(onerel, &relallvisible, NULL);
if (get_func_rettype(funcoid) != VOIDOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("encoding conversion function %s must return type %s",
- NameListToString(func_name), "void")));
+ errmsg("encoding conversion function %s must return type %s",
+ NameListToString(func_name), "void")));
/* Check we have EXECUTE rights for the function */
aclresult = pg_proc_aclcheck(funcoid, GetUserId(), ACL_EXECUTE);
if (is_from)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY FROM not supported with row-level security"),
+ errmsg("COPY FROM not supported with row-level security"),
errhint("Use INSERT statements instead.")));
/* Build target list */
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("DO INSTEAD NOTHING rules are not supported for COPY")));
+ errmsg("DO INSTEAD NOTHING rules are not supported for COPY")));
}
else if (list_length(rewritten) > 1)
{
- ListCell *lc;
+ ListCell *lc;
/* examine queries to determine which error message to issue */
foreach(lc, rewritten)
{
- Query *q = (Query *) lfirst(lc);
+ Query *q = (Query *) lfirst(lc);
if (q->querySource == QSRC_QUAL_INSTEAD_RULE)
ereport(ERROR,
if (q->querySource == QSRC_NON_INSTEAD_RULE)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("DO ALSO rules are not supported for the COPY")));
+ errmsg("DO ALSO rules are not supported for the COPY")));
}
ereport(ERROR,
query->commandType == CMD_DELETE);
ereport(ERROR,
- (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
- errmsg("COPY query must have a RETURNING clause")));
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("COPY query must have a RETURNING clause")));
}
/* plan the query */
CreateExtensionStmt *ces;
ListCell *lc;
ObjectAddress addr;
- List *cascade_parents;
+ List *cascade_parents;
/* Check extension name validity before trying to cascade */
check_valid_extension_name(curreq);
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("function %s must return type %s",
- NameListToString((List *) handler->arg), "fdw_handler")));
+ NameListToString((List *) handler->arg), "fdw_handler")));
return handlerOid;
}
RelationGetRelationName(matviewRel));
/*
- * Check that there is a unique index with no WHERE clause on
- * one or more columns of the materialized view if CONCURRENTLY
- * is specified.
+ * Check that there is a unique index with no WHERE clause on one or more
+ * columns of the materialized view if CONCURRENTLY is specified.
*/
if (concurrent)
{
- List *indexoidlist = RelationGetIndexList(matviewRel);
- ListCell *indexoidscan;
+ List *indexoidlist = RelationGetIndexList(matviewRel);
+ ListCell *indexoidscan;
bool hasUniqueIndex = false;
foreach(indexoidscan, indexoidlist)
{
Oid indexoid = lfirst_oid(indexoidscan);
Relation indexRel;
- Form_pg_index indexStruct;
+ Form_pg_index indexStruct;
indexRel = index_open(indexoid, AccessShareLock);
indexStruct = indexRel->rd_index;
if (!hasUniqueIndex)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
- errmsg("cannot refresh materialized view \"%s\" concurrently",
- quote_qualified_identifier(get_namespace_name(RelationGetNamespace(matviewRel)),
- RelationGetRelationName(matviewRel))),
+ errmsg("cannot refresh materialized view \"%s\" concurrently",
+ quote_qualified_identifier(get_namespace_name(RelationGetNamespace(matviewRel)),
+ RelationGetRelationName(matviewRel))),
errhint("Create a unique index with no WHERE clause on one or more columns of the materialized view.")));
}
/*
* There must be at least one unique index on the matview.
*
- * ExecRefreshMatView() checks that after taking the exclusive lock on
- * the matview. So at least one unique index is guaranteed to exist here
+ * ExecRefreshMatView() checks that after taking the exclusive lock on the
+ * matview. So at least one unique index is guaranteed to exist here
* because the lock is still being held.
*/
Assert(foundUniqueIndex);
if (get_func_rettype(restrictionOid) != FLOAT8OID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("restriction estimator function %s must return type %s",
- NameListToString(restrictionName), "float8")));
+ errmsg("restriction estimator function %s must return type %s",
+ NameListToString(restrictionName), "float8")));
/* Require EXECUTE rights for the estimator */
aclresult = pg_proc_aclcheck(restrictionOid, GetUserId(), ACL_EXECUTE);
if (get_func_rettype(joinOid) != FLOAT8OID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("join estimator function %s must return type %s",
- NameListToString(joinName), "float8")));
+ errmsg("join estimator function %s must return type %s",
+ NameListToString(joinName), "float8")));
/* Require EXECUTE rights for the estimator */
aclresult = pg_proc_aclcheck(joinOid, GetUserId(), ACL_EXECUTE);
/* Must own relation. */
if (pg_class_ownercheck(relid, GetUserId()))
- noperm = false; /* user is allowed to modify this policy */
+ noperm = false; /* user is allowed to modify this policy */
else
ereport(WARNING,
(errcode(ERRCODE_WARNING_PRIVILEGE_NOT_REVOKED),
*/
if (!noperm && num_roles > 0)
{
- int i, j;
+ int i,
+ j;
Oid *roles = (Oid *) ARR_DATA_PTR(policy_roles);
Datum *role_oids;
char *qual_value;
Node *qual_expr;
- List *qual_parse_rtable = NIL;
+ List *qual_parse_rtable = NIL;
char *with_check_value;
Node *with_check_qual;
- List *with_check_parse_rtable = NIL;
+ List *with_check_parse_rtable = NIL;
Datum values[Natts_pg_policy];
bool isnull[Natts_pg_policy];
bool replaces[Natts_pg_policy];
/*
* All of the dependencies will be removed from the policy and then
- * re-added. In order to get them correct, we need to extract out
- * the expressions in the policy and construct a parsestate just
- * enough to build the range table(s) to then pass to
- * recordDependencyOnExpr().
+ * re-added. In order to get them correct, we need to extract out the
+ * expressions in the policy and construct a parsestate just enough to
+ * build the range table(s) to then pass to recordDependencyOnExpr().
*/
/* Get policy qual, to update dependencies */
value_datum = heap_getattr(tuple, Anum_pg_policy_polqual,
- RelationGetDescr(pg_policy_rel), &attr_isnull);
+ RelationGetDescr(pg_policy_rel), &attr_isnull);
if (!attr_isnull)
{
ParseState *qual_pstate;
/* Get WITH CHECK qual, to update dependencies */
value_datum = heap_getattr(tuple, Anum_pg_policy_polwithcheck,
- RelationGetDescr(pg_policy_rel), &attr_isnull);
+ RelationGetDescr(pg_policy_rel), &attr_isnull);
if (!attr_isnull)
{
ParseState *with_check_pstate;
heap_close(pg_policy_rel, RowExclusiveLock);
- return(noperm || num_roles > 0);
+ return (noperm || num_roles > 0);
}
/*
/* Get policy command */
polcmd_datum = heap_getattr(policy_tuple, Anum_pg_policy_polcmd,
- RelationGetDescr(pg_policy_rel),
- &polcmd_isnull);
+ RelationGetDescr(pg_policy_rel),
+ &polcmd_isnull);
Assert(!polcmd_isnull);
polcmd = DatumGetChar(polcmd_datum);
}
else
{
- Oid *roles;
+ Oid *roles;
Datum roles_datum;
bool attr_isnull;
ArrayType *policy_roles;
/*
- * We need to pull the set of roles this policy applies to from
- * what's in the catalog, so that we can recreate the dependencies
- * correctly for the policy.
+ * We need to pull the set of roles this policy applies to from what's
+ * in the catalog, so that we can recreate the dependencies correctly
+ * for the policy.
*/
roles_datum = heap_getattr(policy_tuple, Anum_pg_policy_polroles,
}
else
{
- Datum value_datum;
- bool attr_isnull;
+ Datum value_datum;
+ bool attr_isnull;
/*
* We need to pull the USING expression and build the range table for
- * the policy from what's in the catalog, so that we can recreate
- * the dependencies correctly for the policy.
+ * the policy from what's in the catalog, so that we can recreate the
+ * dependencies correctly for the policy.
*/
/* Check if the policy has a USING expr */
}
else
{
- Datum value_datum;
- bool attr_isnull;
+ Datum value_datum;
+ bool attr_isnull;
/*
* We need to pull the WITH CHECK expression and build the range table
if (funcrettype != LANGUAGE_HANDLEROID)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("function %s must return type %s",
- NameListToString(funcname), "language_handler")));
+ errmsg("function %s must return type %s",
+ NameListToString(funcname), "language_handler")));
}
else
{
else
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
- errmsg("function %s must return type %s",
- NameListToString(stmt->plhandler), "language_handler")));
+ errmsg("function %s must return type %s",
+ NameListToString(stmt->plhandler), "language_handler")));
}
/* validate the inline function */
* can skip this for internally generated triggers, since the name
* modification above should be sufficient.
*
- * NOTE that this is cool only because we have ShareRowExclusiveLock on the
- * relation, so the trigger set won't be changing underneath us.
+ * NOTE that this is cool only because we have ShareRowExclusiveLock on
+ * the relation, so the trigger set won't be changing underneath us.
*/
if (!isInternal)
{
{
/* backwards-compatibility hack */
ereport(WARNING,
- (errmsg("changing return type of function %s from %s to %s",
- NameListToString(inputName), "opaque", typeName)));
+ (errmsg("changing return type of function %s from %s to %s",
+ NameListToString(inputName), "opaque", typeName)));
SetFunctionReturnType(inputOid, typoid);
}
else
{
/* backwards-compatibility hack */
ereport(WARNING,
- (errmsg("changing return type of function %s from %s to %s",
- NameListToString(outputName), "opaque", "cstring")));
+ (errmsg("changing return type of function %s from %s to %s",
+ NameListToString(outputName), "opaque", "cstring")));
SetFunctionReturnType(outputOid, CSTRINGOID);
}
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type output function %s must return type %s",
- NameListToString(outputName), "cstring")));
+ errmsg("type output function %s must return type %s",
+ NameListToString(outputName), "cstring")));
}
if (receiveOid)
{
if (resulttype != BYTEAOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type send function %s must return type %s",
- NameListToString(sendName), "bytea")));
+ errmsg("type send function %s must return type %s",
+ NameListToString(sendName), "bytea")));
}
/*
if (get_func_rettype(procOid) != BOOLOID)
ereport(ERROR,
(errcode(ERRCODE_INVALID_OBJECT_DEFINITION),
- errmsg("type analyze function %s must return type %s",
- NameListToString(procname), "boolean")));
+ errmsg("type analyze function %s must return type %s",
+ NameListToString(procname), "boolean")));
return procOid;
}
typTup = (Form_pg_type) GETSTRUCT(tup);
/*
- * If it's a composite type, invoke ATExecChangeOwner so that we fix up the
- * pg_class entry properly. That will call back to AlterTypeOwnerInternal
- * to take care of the pg_type entry(s).
+ * If it's a composite type, invoke ATExecChangeOwner so that we fix up
+ * the pg_class entry properly. That will call back to
+ * AlterTypeOwnerInternal to take care of the pg_type entry(s).
*/
if (typTup->typtype == TYPTYPE_COMPOSITE)
ATExecChangeOwner(typTup->typrelid, newOwnerId, true, AccessExclusiveLock);
if (!superuser())
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
- errmsg("must be superuser to change bypassrls attribute")));
+ errmsg("must be superuser to change bypassrls attribute")));
}
else
{
ereport(ERROR,
(errcode(ERRCODE_RESERVED_NAME),
errmsg("role name \"%s\" is reserved",
- stmt->role),
- errdetail("Role names starting with \"pg_\" are reserved.")));
+ stmt->role),
+ errdetail("Role names s