OffsetNumber upperbound);
static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum);
static inline BTScanInsert bt_mkscankey_pivotsearch(Relation rel,
- IndexTuple itup);
+ IndexTuple itup);
static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block,
Page page, OffsetNumber offset);
static inline ItemPointer BTreeTupleGetHeapTIDCareful(BtreeCheckState *state,
/* Fingerprint leaf page tuples (those that point to the heap) */
if (state->heapallindexed && P_ISLEAF(topaque) && !ItemIdIsDead(itemid))
{
- IndexTuple norm;
+ IndexTuple norm;
norm = bt_normalize_tuple(state, itup);
bloom_add_element(state->filter, (unsigned char *) norm,
*/
else if (offset == max)
{
- BTScanInsert rightkey;
+ BTScanInsert rightkey;
/* Get item in next/right page */
rightkey = bt_right_page_check_scankey(state);
bool *isnull, bool tupleIsAlive, void *checkstate)
{
BtreeCheckState *state = (BtreeCheckState *) checkstate;
- IndexTuple itup, norm;
+ IndexTuple itup,
+ norm;
Assert(state->heapallindexed);
for (i = 0; i < tupleDescriptor->natts; i++)
{
- Form_pg_attribute att;
+ Form_pg_attribute att;
att = TupleDescAttr(tupleDescriptor, i);
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("can't extend cube"),
errdetail("A cube cannot have more than %d dimensions.",
- CUBE_MAX_DIM)));
+ CUBE_MAX_DIM)));
if (ARRNELEMS(ll) != dim)
ereport(ERROR,
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("array is too long"),
errdetail("A cube cannot have more than %d dimensions.",
- CUBE_MAX_DIM)));
+ CUBE_MAX_DIM)));
dur = ARRPTR(ur);
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("array is too long"),
errdetail("A cube cannot have more than %d dimensions.",
- CUBE_MAX_DIM)));
+ CUBE_MAX_DIM)));
size = IS_POINT(c) ? POINT_SIZE(dim) : CUBE_SIZE(dim);
result = (NDBOX *) palloc0(size);
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("can't extend cube"),
errdetail("A cube cannot have more than %d dimensions.",
- CUBE_MAX_DIM)));
+ CUBE_MAX_DIM)));
if (IS_POINT(cube))
{
(errcode(ERRCODE_PROGRAM_LIMIT_EXCEEDED),
errmsg("can't extend cube"),
errdetail("A cube cannot have more than %d dimensions.",
- CUBE_MAX_DIM)));
+ CUBE_MAX_DIM)));
if (IS_POINT(cube) && (x1 == x2))
{
/* Skip dropped attributes (probably shouldn't see any here). */
if (attr->attisdropped)
continue;
- /* Skip generated columns (COPY won't accept them in the column
- * list) */
+
+ /*
+ * Skip generated columns (COPY won't accept them in the column
+ * list)
+ */
if (attr->attgenerated)
continue;
*columns = lappend(*columns, makeString(pstrdup(attname)));
*/
for (j = i = len - 1; i > 0 && lenr > 0; i--, j--)
{
- int r_end = dr[i];
- int r_start = r_end;
- while (i > 0 && lenr > 0 && dr[i-1] == r_start - 1)
+ int r_end = dr[i];
+ int r_start = r_end;
+
+ while (i > 0 && lenr > 0 && dr[i - 1] == r_start - 1)
--r_start, --i, --lenr;
- dr[2*j] = r_start;
- dr[2*j+1] = r_end;
+ dr[2 * j] = r_start;
+ dr[2 * j + 1] = r_end;
}
/* just copy the rest, if any, as trivial ranges */
for (; i >= 0; i--, j--)
- dr[2*j] = dr[2*j + 1] = dr[i];
+ dr[2 * j] = dr[2 * j + 1] = dr[i];
if (++j)
{
/*
* shunt everything down to start at the right place
*/
- memmove((void *) &dr[0], (void *) &dr[2*j], 2*(len - j) * sizeof(int32));
+ memmove((void *) &dr[0], (void *) &dr[2 * j], 2 * (len - j) * sizeof(int32));
}
+
/*
* make "len" be number of array elements, not ranges
*/
- len = 2*(len - j);
+ len = 2 * (len - j);
cand = 1;
while (len > MAXNUMRANGE * 2)
{
min = PG_INT64_MAX;
for (i = 2; i < len; i += 2)
- if (min > ((int64)dr[i] - (int64)dr[i - 1]))
+ if (min > ((int64) dr[i] - (int64) dr[i - 1]))
{
- min = ((int64)dr[i] - (int64)dr[i - 1]);
+ min = ((int64) dr[i] - (int64) dr[i - 1]);
cand = i;
}
memmove((void *) &dr[cand - 1], (void *) &dr[cand + 1], (len - cand - 1) * sizeof(int32));
len -= 2;
}
+
/*
* check sparseness of result
*/
for (i = 0; i < len; i += 2)
{
if (!i || a[i] != a[i - 1]) /* do not count repeated range */
- size += (int64)(a[i + 1]) - (int64)(a[i]) + 1;
+ size += (int64) (a[i + 1]) - (int64) (a[i]) + 1;
}
- if (size > (int64)INT_MAX || size < (int64)INT_MIN)
+ if (size > (int64) INT_MAX || size < (int64) INT_MIN)
return -1; /* overflow */
return (int) size;
}
queryId = pgss_hash_string(query, query_len);
/*
- * If we are unlucky enough to get a hash of zero(invalid), use queryID
- * as 2 instead, queryID 1 is already in use for normal statements.
+ * If we are unlucky enough to get a hash of zero(invalid), use
+ * queryID as 2 instead, queryID 1 is already in use for normal
+ * statements.
*/
if (queryId == UINT64CONST(0))
queryId = UINT64CONST(2);
}
mp_result
-mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small * r)
+mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small *r)
{
mpz_t vtmp;
mp_digit vbuf[MP_VALUE_DIGITS(value)];
}
mp_result
-mp_int_to_int(mp_int z, mp_small * out)
+mp_int_to_int(mp_int z, mp_small *out)
{
assert(z != NULL);
}
mp_result
-mp_int_to_uint(mp_int z, mp_usmall * out)
+mp_int_to_uint(mp_int z, mp_usmall *out)
{
assert(z != NULL);
/** Sets `q` and `*r` to the quotent and remainder of `a / value`. Division by
powers of 2 is detected and handled efficiently. The remainder is pinned to
`0 <= *r < b`. Either of `q` or `r` may be NULL. */
-mp_result mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small * r);
+mp_result mp_int_div_value(mp_int a, mp_small value, mp_int q, mp_small *r);
/** Sets `q` and `r` to the quotient and remainder of `a / 2^p2`. This is a
special case for division by powers of two that is more efficient than
The remainder is pinned to `0 <= r < value`. */
static inline
mp_result
-mp_int_mod_value(mp_int a, mp_small value, mp_small * r)
+mp_int_mod_value(mp_int a, mp_small value, mp_small *r)
{
return mp_int_div_value(a, value, 0, r);
}
/** Returns `MP_OK` if `z` is representable as `mp_small`, else `MP_RANGE`.
If `out` is not NULL, `*out` is set to the value of `z` when `MP_OK`. */
-mp_result mp_int_to_int(mp_int z, mp_small * out);
+mp_result mp_int_to_int(mp_int z, mp_small *out);
/** Returns `MP_OK` if `z` is representable as `mp_usmall`, or `MP_RANGE`.
If `out` is not NULL, `*out` is set to the value of `z` when `MP_OK`. */
-mp_result mp_int_to_uint(mp_int z, mp_usmall * out);
+mp_result mp_int_to_uint(mp_int z, mp_usmall *out);
/** Converts `z` to a zero-terminated string of characters in the specified
`radix`, writing at most `limit` characters to `str` including the
InitDirtySnapshot(SnapshotDirty);
- nblocks = hscan->rs_nblocks; /* # blocks to be scanned */
+ nblocks = hscan->rs_nblocks; /* # blocks to be scanned */
/* scan the relation */
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
if (fmstate->aux_fmstate)
resultRelInfo->ri_FdwState = fmstate->aux_fmstate;
rslot = execute_foreign_modify(estate, resultRelInfo, CMD_INSERT,
- slot, planSlot);
+ slot, planSlot);
/* Revert that change */
if (fmstate->aux_fmstate)
resultRelInfo->ri_FdwState = fmstate;
bool doNothing = false;
/*
- * If the foreign table we are about to insert routed rows into is also
- * an UPDATE subplan result rel that will be updated later, proceeding
- * with the INSERT will result in the later UPDATE incorrectly modifying
- * those routed rows, so prevent the INSERT --- it would be nice if we
- * could handle this case; but for now, throw an error for safety.
+ * If the foreign table we are about to insert routed rows into is also an
+ * UPDATE subplan result rel that will be updated later, proceeding with
+ * the INSERT will result in the later UPDATE incorrectly modifying those
+ * routed rows, so prevent the INSERT --- it would be nice if we could
+ * handle this case; but for now, throw an error for safety.
*/
if (plan && plan->operation == CMD_UPDATE &&
(resultRelInfo->ri_usesFdwDirectModify ||
if (!grouping_is_sortable(root->parse->groupClause) ||
!pathkeys_contained_in(pathkeys, root->group_pathkeys))
{
- Path sort_path; /* dummy for result of cost_sort */
+ Path sort_path; /* dummy for result of cost_sort */
cost_sort(&sort_path,
root,
* add 1/4th of that default.
*/
double sort_multiplier = 1.0 + (DEFAULT_FDW_SORT_MULTIPLIER
- - 1.0) * 0.25;
+ - 1.0) * 0.25;
*p_startup_cost *= sort_multiplier;
*p_run_cost *= sort_multiplier;
fmstate->retrieved_attrs,
NULL,
fmstate->temp_cxt);
+
/*
* The returning slot will not necessarily be suitable to store
* heaptuples directly, so allow for conversion.
/*
* Grouping and aggregation are not supported with FOR UPDATE/SHARE,
* so the input_rel should be a base, join, or ordered relation; and
- * if it's an ordered relation, its input relation should be a base
- * or join relation.
+ * if it's an ordered relation, its input relation should be a base or
+ * join relation.
*/
Assert(input_rel->reloptkind == RELOPT_BASEREL ||
input_rel->reloptkind == RELOPT_JOINREL ||
}
/*
- * Now walk the missing attributes. If there is a missing value
- * make space for it. Otherwise, it's going to be NULL.
+ * Now walk the missing attributes. If there is a missing value make
+ * space for it. Otherwise, it's going to be NULL.
*/
for (attnum = firstmissingnum;
attnum < natts;
* There is at least one empty page. So we have to rescan the tree
* deleting empty pages.
*/
- Buffer buffer;
+ Buffer buffer;
DataPageDeleteStack root,
- *ptr,
- *tmp;
+ *ptr,
+ *tmp;
buffer = ReadBufferExtended(gvs->index, MAIN_FORKNUM, rootBlkno,
RBM_NORMAL, gvs->strategy);
/*
- * Lock posting tree root for cleanup to ensure there are no concurrent
- * inserts.
+ * Lock posting tree root for cleanup to ensure there are no
+ * concurrent inserts.
*/
LockBufferForCleanup(buffer);
while (segno < a_segno)
{
/*
- * Once modification is started and page tail is copied, we've
- * to copy unmodified segments.
+ * Once modification is started and page tail is copied, we've to
+ * copy unmodified segments.
*/
segsize = SizeOfGinPostingList(oldseg);
if (tailCopy)
}
/*
- * We're about to start modification of the page. So, copy tail of the
- * page if it's not done already.
+ * We're about to start modification of the page. So, copy tail of
+ * the page if it's not done already.
*/
if (!tailCopy && segptr != segmentend)
{
- int tailSize = segmentend - segptr;
+ int tailSize = segmentend - segptr;
tailCopy = (Pointer) palloc(tailSize);
memcpy(tailCopy, segptr, tailSize);
segptr = (Pointer) oldseg;
if (segptr != segmentend && tailCopy)
{
- int restSize = segmentend - segptr;
+ int restSize = segmentend - segptr;
Assert(writePtr + restSize <= PageGetSpecialPointer(page));
memcpy(writePtr, segptr, restSize);
gistcheckpage(r, buffer);
/*
- * Otherwise, recycle it if deleted, and too old to have any processes
- * interested in it.
+ * Otherwise, recycle it if deleted, and too old to have any
+ * processes interested in it.
*/
if (gistPageRecyclable(page))
{
/*
- * If we are generating WAL for Hot Standby then create a
- * WAL record that will allow us to conflict with queries
- * running on standby, in case they have snapshots older
- * than the page's deleteXid.
+ * If we are generating WAL for Hot Standby then create a WAL
+ * record that will allow us to conflict with queries running
+ * on standby, in case they have snapshots older than the
+ * page's deleteXid.
*/
if (XLogStandbyInfoActive() && RelationNeedsWAL(r))
gistXLogPageReuse(r, blkno, GistPageGetDeleteXid(page));
{
text *key = PG_GETARG_TEXT_PP(0);
Oid collid = PG_GET_COLLATION();
- pg_locale_t mylocale = 0;
+ pg_locale_t mylocale = 0;
Datum result;
if (!collid)
int32_t ulen = -1;
UChar *uchar = NULL;
Size bsize;
- uint8_t *buf;
+ uint8_t *buf;
ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
{
text *key = PG_GETARG_TEXT_PP(0);
Oid collid = PG_GET_COLLATION();
- pg_locale_t mylocale = 0;
+ pg_locale_t mylocale = 0;
Datum result;
if (!collid)
int32_t ulen = -1;
UChar *uchar = NULL;
Size bsize;
- uint8_t *buf;
+ uint8_t *buf;
ulen = icu_to_uchar(&uchar, VARDATA_ANY(key), VARSIZE_ANY_EXHDR(key));
heap_get_latest_tid(TableScanDesc sscan,
ItemPointer tid)
{
- Relation relation = sscan->rs_rd;
- Snapshot snapshot = sscan->rs_snapshot;
+ Relation relation = sscan->rs_rd;
+ Snapshot snapshot = sscan->rs_snapshot;
ItemPointerData ctid;
TransactionId priorXmax;
HeapTupleHeaderGetCmin(tuple->t_data) >= cid)
{
tmfd->xmax = priorXmax;
+
/*
* Cmin is the problematic value, so store that. See
* above.
Snapshot snapshot;
bool need_unregister_snapshot = false;
TransactionId OldestXmin;
- BlockNumber previous_blkno = InvalidBlockNumber;
+ BlockNumber previous_blkno = InvalidBlockNumber;
BlockNumber root_blkno = InvalidBlockNumber;
OffsetNumber root_offsets[MaxHeapTuplesPerPage];
/* Publish number of blocks to scan */
if (progress)
{
- BlockNumber nblocks;
+ BlockNumber nblocks;
if (hscan->rs_base.rs_parallel != NULL)
{
/* Report scan progress, if asked to. */
if (progress)
{
- BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan);
+ BlockNumber blocks_done = heapam_scan_get_blocks_done(hscan);
if (blocks_done != previous_blkno)
{
/* Report scan progress one last time. */
if (progress)
{
- BlockNumber blks_done;
+ BlockNumber blks_done;
if (hscan->rs_base.rs_parallel != NULL)
{
BlockNumber root_blkno = InvalidBlockNumber;
OffsetNumber root_offsets[MaxHeapTuplesPerPage];
bool in_index[MaxHeapTuplesPerPage];
- BlockNumber previous_blkno = InvalidBlockNumber;
+ BlockNumber previous_blkno = InvalidBlockNumber;
/* state variables for the merge */
ItemPointer indexcursor = NULL;
heapam_scan_get_blocks_done(HeapScanDesc hscan)
{
ParallelBlockTableScanDesc bpscan = NULL;
- BlockNumber startblock;
- BlockNumber blocks_done;
+ BlockNumber startblock;
+ BlockNumber blocks_done;
if (hscan->rs_base.rs_parallel != NULL)
{
blocks_done = hscan->rs_cblock - startblock;
else
{
- BlockNumber nblocks;
+ BlockNumber nblocks;
nblocks = bpscan != NULL ? bpscan->phs_nblocks : hscan->rs_nblocks;
blocks_done = nblocks - startblock +
}
else if (HeapTupleHasExternal(tup) || tup->t_len > TOAST_TUPLE_THRESHOLD)
{
- int options = HEAP_INSERT_SKIP_FSM;
+ int options = HEAP_INSERT_SKIP_FSM;
if (!state->rs_use_wal)
options |= HEAP_INSERT_SKIP_WAL;
toast_decompress_datum_slice(struct varlena *attr, int32 slicelength)
{
struct varlena *result;
- int32 rawsize;
+ int32 rawsize;
Assert(VARATT_IS_COMPRESSED(attr));
result = (struct varlena *) palloc(slicelength + VARHDRSZ);
rawsize = pglz_decompress(TOAST_COMPRESS_RAWDATA(attr),
- VARSIZE(attr) - TOAST_COMPRESS_HDRSZ,
- VARDATA(result),
- slicelength, false);
+ VARSIZE(attr) - TOAST_COMPRESS_HDRSZ,
+ VARDATA(result),
+ slicelength, false);
if (rawsize < 0)
elog(ERROR, "compressed data is corrupted");
static int lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
int tupindex, LVRelStats *vacrelstats, Buffer *vmbuffer);
static bool should_attempt_truncation(VacuumParams *params,
- LVRelStats *vacrelstats);
+ LVRelStats *vacrelstats);
static void lazy_truncate_heap(Relation onerel, LVRelStats *vacrelstats);
static BlockNumber count_nondeletable_pages(Relation onerel,
LVRelStats *vacrelstats);
* cheaper to get rid of it in the next pruning pass than
* to treat it like an indexed tuple. Finally, if index
* cleanup is disabled, the second heap pass will not
- * execute, and the tuple will not get removed, so we
- * must treat it like any other dead tuple that we choose
- * to keep.
+ * execute, and the tuple will not get removed, so we must
+ * treat it like any other dead tuple that we choose to
+ * keep.
*
* If this were to happen for a tuple that actually needed
* to be deleted, we'd be in trouble, because it'd
all_visible = false;
break;
case HEAPTUPLE_LIVE:
+
/*
* Count it as live. Not only is this natural, but it's
* also what acquire_sample_rows() does.
else
{
/*
- * Here, we have indexes but index cleanup is disabled. Instead of
- * vacuuming the dead tuples on the heap, we just forget them.
+ * Here, we have indexes but index cleanup is disabled.
+ * Instead of vacuuming the dead tuples on the heap, we just
+ * forget them.
*
* Note that vacrelstats->dead_tuples could have tuples which
* became dead after HOT-pruning but are not marked dead yet.
- * We do not process them because it's a very rare condition, and
- * the next vacuum will process them anyway.
+ * We do not process them because it's a very rare condition,
+ * and the next vacuum will process them anyway.
*/
Assert(params->index_cleanup == VACOPT_TERNARY_DISABLED);
}
/*
* Re-find and write lock the parent of buf.
*
- * It's possible that the location of buf's downlink has changed
- * since our initial _bt_search() descent. _bt_getstackbuf() will
- * detect and recover from this, updating the stack, which ensures
- * that the new downlink will be inserted at the correct offset.
- * Even buf's parent may have changed.
+ * It's possible that the location of buf's downlink has changed since
+ * our initial _bt_search() descent. _bt_getstackbuf() will detect
+ * and recover from this, updating the stack, which ensures that the
+ * new downlink will be inserted at the correct offset. Even buf's
+ * parent may have changed.
*/
stack->bts_btentry = bknum;
pbuf = _bt_getstackbuf(rel, stack);
new_stack->bts_parent = stack_in;
/*
- * Page level 1 is lowest non-leaf page level prior to leaves. So,
- * if we're on the level 1 and asked to lock leaf page in write mode,
+ * Page level 1 is lowest non-leaf page level prior to leaves. So, if
+ * we're on the level 1 and asked to lock leaf page in write mode,
* then lock next page in write mode, because it must be a leaf.
*/
if (opaque->btpo.level == 1 && access == BT_WRITE)
/* Initialize remaining insertion scan key fields */
inskey.heapkeyspace = _bt_heapkeyspace(rel);
- inskey.anynullkeys = false; /* unusued */
+ inskey.anynullkeys = false; /* unused */
inskey.nextkey = nextkey;
inskey.pivotsearch = false;
inskey.scantid = NULL;
* much smaller.
*
* Since the truncated tuple is often smaller than the original
- * tuple, it cannot just be copied in place (besides, we want
- * to actually save space on the leaf page). We delete the
- * original high key, and add our own truncated high key at the
- * same offset.
+ * tuple, it cannot just be copied in place (besides, we want to
+ * actually save space on the leaf page). We delete the original
+ * high key, and add our own truncated high key at the same
+ * offset.
*
* Note that the page layout won't be changed very much. oitup is
* already located at the physical beginning of tuple space, so we
key = palloc(offsetof(BTScanInsertData, scankeys) +
sizeof(ScanKeyData) * indnkeyatts);
key->heapkeyspace = itup == NULL || _bt_heapkeyspace(rel);
- key->anynullkeys = false; /* initial assumption */
+ key->anynullkeys = false; /* initial assumption */
key->nextkey = false;
key->pivotsearch = false;
key->keysz = Min(indnkeyatts, tupnatts);
pairingheap_SpGistSearchItem_cmp(const pairingheap_node *a,
const pairingheap_node *b, void *arg)
{
- const SpGistSearchItem *sa = (const SpGistSearchItem *) a;
- const SpGistSearchItem *sb = (const SpGistSearchItem *) b;
+ const SpGistSearchItem *sa = (const SpGistSearchItem *) a;
+ const SpGistSearchItem *sb = (const SpGistSearchItem *) b;
SpGistScanOpaque so = (SpGistScanOpaque) arg;
int i;
}
static void
-spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem * item)
+spgFreeSearchItem(SpGistScanOpaque so, SpGistSearchItem *item)
{
if (!so->state.attLeafType.attbyval &&
DatumGetPointer(item->value) != NULL)
* Called in queue context
*/
static void
-spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem * item)
+spgAddSearchItemToQueue(SpGistScanOpaque so, SpGistSearchItem *item)
{
pairingheap_add(so->scanQueue, &item->phNode);
}
* the scan is not ordered AND the item satisfies the scankeys
*/
static bool
-spgLeafTest(SpGistScanOpaque so, SpGistSearchItem * item,
+spgLeafTest(SpGistScanOpaque so, SpGistSearchItem *item,
SpGistLeafTuple leafTuple, bool isnull,
bool *reportedSome, storeRes_func storeRes)
{
static void
spgInitInnerConsistentIn(spgInnerConsistentIn *in,
SpGistScanOpaque so,
- SpGistSearchItem * item,
+ SpGistSearchItem *item,
SpGistInnerTuple innerTuple)
{
in->scankeys = so->keyData;
static SpGistSearchItem *
spgMakeInnerItem(SpGistScanOpaque so,
- SpGistSearchItem * parentItem,
+ SpGistSearchItem *parentItem,
SpGistNodeTuple tuple,
spgInnerConsistentOut *out, int i, bool isnull,
double *distances)
}
static void
-spgInnerTest(SpGistScanOpaque so, SpGistSearchItem * item,
+spgInnerTest(SpGistScanOpaque so, SpGistSearchItem *item,
SpGistInnerTuple innerTuple, bool isnull)
{
MemoryContext oldCxt = MemoryContextSwitchTo(so->tempCxt);
static OffsetNumber
spgTestLeafTuple(SpGistScanOpaque so,
- SpGistSearchItem * item,
+ SpGistSearchItem *item,
Page page, OffsetNumber offset,
bool isnull, bool isroot,
bool *reportedSome,
res = (level >= queryLen) ||
DatumGetBool(DirectFunctionCall2Coll(text_starts_with,
PG_GET_COLLATION(),
- out->leafValue,
- PointerGetDatum(query)));
+ out->leafValue,
+ PointerGetDatum(query)));
if (!res) /* no need to consider remaining conditions */
break;
* happened since VACUUM started.
*
* Note: we could make a tighter test by seeing if the xid is
- * "running" according to the active snapshot; but snapmgr.c doesn't
- * currently export a suitable API, and it's not entirely clear
- * that a tighter test is worth the cycles anyway.
+ * "running" according to the active snapshot; but snapmgr.c
+ * doesn't currently export a suitable API, and it's not entirely
+ * clear that a tighter test is worth the cycles anyway.
*/
if (TransactionIdFollowsOrEquals(dt->xid, bds->myXmin))
spgAddPendingTID(bds, &dt->pointer);
table_beginscan_catalog(Relation relation, int nkeys, struct ScanKeyData *key)
{
uint32 flags = SO_TYPE_SEQSCAN |
- SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
+ SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE | SO_TEMP_SNAPSHOT;
Oid relid = RelationGetRelid(relation);
Snapshot snapshot = RegisterSnapshot(GetCatalogSnapshot(relid));
{
Snapshot snapshot;
uint32 flags = SO_TYPE_SEQSCAN |
- SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
+ SO_ALLOW_STRAT | SO_ALLOW_SYNC | SO_ALLOW_PAGEMODE;
Assert(RelationGetRelid(relation) == parallel_scan->phs_relid);
void
table_get_latest_tid(TableScanDesc scan, ItemPointer tid)
{
- Relation rel = scan->rs_rd;
+ Relation rel = scan->rs_rd;
const TableAmRoutine *tableam = rel->rd_tableam;
/*
/*
* Ensure parent(s) have XIDs, so that a child always has an XID later
- * than its parent. Mustn't recurse here, or we might get a stack overflow
- * if we're at the bottom of a huge stack of subtransactions none of which
- * have XIDs yet.
+ * than its parent. Mustn't recurse here, or we might get a stack
+ * overflow if we're at the bottom of a huge stack of subtransactions none
+ * of which have XIDs yet.
*/
if (isSubXact && !FullTransactionIdIsValid(s->parent->fullTransactionId))
{
* just skipping the reset in StartTransaction() won't work.)
*/
static int save_XactIsoLevel;
-static bool save_XactReadOnly;
-static bool save_XactDeferrable;
+static bool save_XactReadOnly;
+static bool save_XactDeferrable;
void
SaveTransactionCharacteristics(void)
nxids = add_size(nxids, s->nChildXids);
}
Assert(SerializedTransactionStateHeaderSize + nxids * sizeof(TransactionId)
- <= maxsize);
+ <= maxsize);
/* Copy them to our scratch space. */
workspace = palloc(nxids * sizeof(TransactionId));
ereport(FATAL,
(errmsg("could not find redo location referenced by checkpoint record"),
errhint("If you are restoring from a backup, touch \"%s/recovery.signal\" and add required recovery options.\n"
- "If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n"
- "Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.",
- DataDir, DataDir, DataDir)));
+ "If you are not restoring from a backup, try removing the file \"%s/backup_label\".\n"
+ "Be careful: removing \"%s/backup_label\" will result in a corrupt cluster if restoring from a backup.",
+ DataDir, DataDir, DataDir)));
}
}
else
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- Oid oid = ((Form_pg_proc) GETSTRUCT(tuple))->oid;
+ Oid oid = ((Form_pg_proc) GETSTRUCT(tuple))->oid;
objects = lappend_oid(objects, oid);
}
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
- Oid oid = ((Form_pg_class) GETSTRUCT(tuple))->oid;
+ Oid oid = ((Form_pg_class) GETSTRUCT(tuple))->oid;
relations = lappend_oid(relations, oid);
}
}
else
{
- Oid defAclOid;
+ Oid defAclOid;
/* Prepare to insert or update pg_default_acl entry */
MemSet(values, 0, sizeof(values));
if (isNew)
InvokeObjectPostCreateHook(DefaultAclRelationId, defAclOid, 0);
else
- InvokeObjectPostAlterHook(DefaultAclRelationId, defAclOid, 0);
+ InvokeObjectPostAlterHook(DefaultAclRelationId, defAclOid, 0);
}
if (HeapTupleIsValid(tuple))
Datum
pg_nextoid(PG_FUNCTION_ARGS)
{
- Oid reloid = PG_GETARG_OID(0);
- Name attname = PG_GETARG_NAME(1);
- Oid idxoid = PG_GETARG_OID(2);
- Relation rel;
- Relation idx;
- HeapTuple atttuple;
+ Oid reloid = PG_GETARG_OID(0);
+ Name attname = PG_GETARG_NAME(1);
+ Oid idxoid = PG_GETARG_OID(2);
+ Relation rel;
+ Relation idx;
+ HeapTuple atttuple;
Form_pg_attribute attform;
- AttrNumber attno;
- Oid newoid;
+ AttrNumber attno;
+ Oid newoid;
/*
* As this function is not intended to be used during normal running, and
/*
* If the expression is just a NULL constant, we do not bother to make
* an explicit pg_attrdef entry, since the default behavior is
- * equivalent. This applies to column defaults, but not for generation
- * expressions.
+ * equivalent. This applies to column defaults, but not for
+ * generation expressions.
*
* Note a nonobvious property of this test: if the column is of a
* domain type, what we'll get is not a bare null Const but a
Anum_pg_class_reloptions, &isnull);
/*
- * Extract the list of column names to be used for the index
- * creation.
+ * Extract the list of column names to be used for the index creation.
*/
for (int i = 0; i < indexInfo->ii_NumIndexAttrs; i++)
{
optionDatum,
INDEX_CREATE_SKIP_BUILD | INDEX_CREATE_CONCURRENT,
0,
- true, /* allow table to be a system catalog? */
- false, /* is_internal? */
+ true, /* allow table to be a system catalog? */
+ false, /* is_internal? */
NULL);
/* Close the relations used and clean up */
values, nulls, replaces);
CatalogTupleUpdate(description, &tuple->t_self, tuple);
- break; /* Assume there can be only one match */
+ break; /* Assume there can be only one match */
}
systable_endscan(sd);
*/
if (get_rel_relispartition(oldIndexId))
{
- List *ancestors = get_partition_ancestors(oldIndexId);
- Oid parentIndexRelid = linitial_oid(ancestors);
+ List *ancestors = get_partition_ancestors(oldIndexId);
+ Oid parentIndexRelid = linitial_oid(ancestors);
DeleteInheritsTuple(oldIndexId, parentIndexRelid);
StoreSingleInheritance(newIndexId, parentIndexRelid, 1);
newClassRel->pgstat_info->t_counts.t_tuples_fetched = tabentry->tuples_fetched;
newClassRel->pgstat_info->t_counts.t_blocks_fetched = tabentry->blocks_fetched;
newClassRel->pgstat_info->t_counts.t_blocks_hit = tabentry->blocks_hit;
- /* The data will be sent by the next pgstat_report_stat() call. */
+
+ /*
+ * The data will be sent by the next pgstat_report_stat()
+ * call.
+ */
}
}
}
Relation userIndexRelation;
/*
- * No more predicate locks will be acquired on this index, and we're
- * about to stop doing inserts into the index which could show
- * conflicts with existing predicate locks, so now is the time to move
- * them to the heap relation.
+ * No more predicate locks will be acquired on this index, and we're about
+ * to stop doing inserts into the index which could show conflicts with
+ * existing predicate locks, so now is the time to move them to the heap
+ * relation.
*/
userHeapRelation = table_open(heapId, ShareUpdateExclusiveLock);
userIndexRelation = index_open(indexId, ShareUpdateExclusiveLock);
TransferPredicateLocksToHeapRelation(userIndexRelation);
/*
- * Now we are sure that nobody uses the index for queries; they just
- * might have it open for updating it. So now we can unset indisready
- * and indislive, then wait till nobody could be using it at all
- * anymore.
+ * Now we are sure that nobody uses the index for queries; they just might
+ * have it open for updating it. So now we can unset indisready and
+ * indislive, then wait till nobody could be using it at all anymore.
*/
index_set_state_flags(indexId, INDEX_DROP_SET_DEAD);
/*
- * Invalidate the relcache for the table, so that after this commit
- * all sessions will refresh the table's index list. Forgetting just
- * the index's relcache entry is not enough.
+ * Invalidate the relcache for the table, so that after this commit all
+ * sessions will refresh the table's index list. Forgetting just the
+ * index's relcache entry is not enough.
*/
CacheInvalidateRelcache(userHeapRelation);
*/
if (OidIsValid(parentConstraintId))
{
- ObjectAddress referenced;
+ ObjectAddress referenced;
ObjectAddressSet(referenced, ConstraintRelationId, parentConstraintId);
recordDependencyOn(&myself, &referenced, DEPENDENCY_PARTITION_PRI);
PROGRESS_SCAN_BLOCKS_DONE,
PROGRESS_SCAN_BLOCKS_TOTAL
};
- const int64 val[] = {
+ const int64 val[] = {
PROGRESS_CREATEIDX_PHASE_BUILD,
PROGRESS_CREATEIDX_SUBPHASE_INITIALIZE,
0, 0, 0, 0
PROGRESS_SCAN_BLOCKS_DONE,
PROGRESS_SCAN_BLOCKS_TOTAL
};
- const int64 val[] = {
+ const int64 val[] = {
PROGRESS_CREATEIDX_PHASE_VALIDATE_IDXSCAN,
0, 0, 0, 0
};
+
pgstat_progress_update_multi_param(5, index, val);
}
PROGRESS_SCAN_BLOCKS_DONE,
PROGRESS_SCAN_BLOCKS_TOTAL
};
- const int64 val[] = {
+ const int64 val[] = {
PROGRESS_CREATEIDX_PHASE_VALIDATE_SORT,
0, 0
};
StringInfoData opfam;
amprocDesc = table_open(AccessMethodProcedureRelationId,
- AccessShareLock);
+ AccessShareLock);
ScanKeyInit(&skey[0],
Anum_pg_amproc_oid,
myself = ProcedureCreate(aggName,
aggNamespace,
- replace, /* maybe replacement */
+ replace, /* maybe replacement */
false, /* doesn't return a set */
finaltype, /* returnType */
GetUserId(), /* proowner */
/*
* If we're replacing an existing entry, we need to validate that
- * we're not changing anything that would break callers.
- * Specifically we must not change aggkind or aggnumdirectargs,
- * which affect how an aggregate call is treated in parse
- * analysis.
+ * we're not changing anything that would break callers. Specifically
+ * we must not change aggkind or aggnumdirectargs, which affect how an
+ * aggregate call is treated in parse analysis.
*/
if (aggKind != oldagg->aggkind)
ereport(ERROR,
prokind == PROKIND_PROCEDURE
? errmsg("cannot change whether a procedure has output parameters")
: errmsg("cannot change return type of existing function"),
- /* translator: first %s is DROP FUNCTION, DROP PROCEDURE or DROP AGGREGATE */
+
+ /*
+ * translator: first %s is DROP FUNCTION, DROP PROCEDURE or DROP
+ * AGGREGATE
+ */
errhint("Use %s %s first.",
dropcmd,
format_procedure(oldproc->oid))));
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot change return type of existing function"),
errdetail("Row type defined by OUT parameters is different."),
- /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+ /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
errhint("Use %s %s first.",
dropcmd,
format_procedure(oldproc->oid))));
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot change name of input parameter \"%s\"",
old_arg_names[j]),
- /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+ /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
errhint("Use %s %s first.",
dropcmd,
format_procedure(oldproc->oid))));
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot remove parameter defaults from existing function"),
- /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+ /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
errhint("Use %s %s first.",
dropcmd,
format_procedure(oldproc->oid))));
ereport(ERROR,
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
errmsg("cannot change data type of existing parameter default value"),
- /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
+ /* translator: first %s is DROP FUNCTION or DROP PROCEDURE */
errhint("Use %s %s first.",
dropcmd,
format_procedure(oldproc->oid))));
else
{
/* Creating a new procedure */
- Oid newOid;
+ Oid newOid;
/* First, get default permissions and set up proacl */
proacl = get_user_default_acl(OBJECT_FUNCTION, proowner,
result = NIL;
while (HeapTupleIsValid(tup = systable_getnext(scan)))
{
- Oid oid = ((Form_pg_publication) GETSTRUCT(tup))->oid;
+ Oid oid = ((Form_pg_publication) GETSTRUCT(tup))->oid;
result = lappend_oid(result, oid);
}
break;
default:
elog(ERROR, "invalid relpersistence: %c", relpersistence);
- return NULL; /* placate compiler */
+ return NULL; /* placate compiler */
}
srel = smgropen(rnode, backend);
errhint("Must be superuser to create an access method.")));
/* Check if name is used */
- amoid = GetSysCacheOid1(AMNAME, Anum_pg_am_oid,
+ amoid = GetSysCacheOid1(AMNAME, Anum_pg_am_oid,
CStringGetDatum(stmt->amname));
if (OidIsValid(amoid))
{
static void rebuild_relation(Relation OldHeap, Oid indexOid, bool verbose);
static void copy_table_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex,
- bool verbose, bool *pSwapToastByContent,
- TransactionId *pFreezeXid, MultiXactId *pCutoffMulti);
+ bool verbose, bool *pSwapToastByContent,
+ TransactionId *pFreezeXid, MultiXactId *pCutoffMulti);
static List *get_tables_to_cluster(MemoryContext cluster_context);
/* Copy the heap data into the new table in the desired order */
copy_table_data(OIDNewHeap, tableOid, indexOid, verbose,
- &swap_toast_by_content, &frozenXid, &cutoffMulti);
+ &swap_toast_by_content, &frozenXid, &cutoffMulti);
/*
* Swap the physical files of the target and transient tables, then
(errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED),
errmsg("function \"%s\" must be fired for INSERT or UPDATE",
funcname)));
- ItemPointerSetInvalid(&checktid); /* keep compiler quiet */
+ ItemPointerSetInvalid(&checktid); /* keep compiler quiet */
}
slot = table_slot_create(trigdata->tg_relation, NULL);
tmptid = checktid;
{
IndexFetchTableData *scan = table_index_fetch_begin(trigdata->tg_relation);
- bool call_again = false;
+ bool call_again = false;
if (!table_index_fetch_tuple(scan, &tmptid, SnapshotSelf, slot,
&call_again, NULL))
/* We assume that there can be at most one matching tuple */
if (HeapTupleIsValid(dbtuple))
- oid = ((Form_pg_database)GETSTRUCT(dbtuple))->oid;
+ oid = ((Form_pg_database) GETSTRUCT(dbtuple))->oid;
else
oid = InvalidOid;
static void
ExplainPrintSettings(ExplainState *es)
{
- int num;
+ int num;
struct config_generic **gucs;
/* bail out if information about settings not requested */
if (es->format != EXPLAIN_FORMAT_TEXT)
{
- int i;
+ int i;
ExplainOpenGroup("Settings", "Settings", true, es);
for (i = 0; i < num; i++)
{
- char *setting;
+ char *setting;
struct config_generic *conf = gucs[i];
setting = GetConfigOptionByName(conf->name, NULL, true);
}
else
{
- int i;
- StringInfoData str;
+ int i;
+ StringInfoData str;
initStringInfo(&str);
for (i = 0; i < num; i++)
{
- char *setting;
+ char *setting;
struct config_generic *conf = gucs[i];
if (i > 0)
ExplainNode(ps, NIL, NULL, NULL, es);
/*
- * If requested, include information about GUC parameters with values
- * that don't match the built-in defaults.
+ * If requested, include information about GUC parameters with values that
+ * don't match the built-in defaults.
*/
ExplainPrintSettings(es);
}
if (es->costs && es->verbose &&
outerPlanState(planstate)->worker_jit_instrument)
{
- PlanState *child = outerPlanState(planstate);
+ PlanState *child = outerPlanState(planstate);
int n;
SharedJitInstrumentation *w = child->worker_jit_instrument;
t_sql = DirectFunctionCall3Coll(replace_text,
C_COLLATION_OID,
- t_sql,
- CStringGetTextDatum("@extschema@"),
- CStringGetTextDatum(qSchemaName));
+ t_sql,
+ CStringGetTextDatum("@extschema@"),
+ CStringGetTextDatum(qSchemaName));
}
/*
{
t_sql = DirectFunctionCall3Coll(replace_text,
C_COLLATION_OID,
- t_sql,
- CStringGetTextDatum("MODULE_PATHNAME"),
- CStringGetTextDatum(control->module_pathname));
+ t_sql,
+ CStringGetTextDatum("MODULE_PATHNAME"),
+ CStringGetTextDatum(control->module_pathname));
}
/* And now back to C string */
*/
struct ReindexIndexCallbackState
{
- bool concurrent; /* flag from statement */
- Oid locked_table_oid; /* tracks previously locked table */
+ bool concurrent; /* flag from statement */
+ Oid locked_table_oid; /* tracks previously locked table */
};
/*
{
if (progress)
{
- PGPROC *holder = BackendIdGetProc(old_snapshots[i].backendId);
+ PGPROC *holder = BackendIdGetProc(old_snapshots[i].backendId);
pgstat_progress_update_param(PROGRESS_WAITFOR_CURRENT_PID,
holder->pid);
*/
if (partitioned && stmt->relation && !stmt->relation->inh)
{
- PartitionDesc pd = RelationGetPartitionDesc(rel);
+ PartitionDesc pd = RelationGetPartitionDesc(rel);
if (pd->nparts != 0)
flags |= INDEX_CREATE_INVALID;
/* Get a session-level lock on each table. */
foreach(lc, relationLocks)
{
- LockRelId *lockrelid = (LockRelId *) lfirst(lc);
+ LockRelId *lockrelid = (LockRelId *) lfirst(lc);
LockRelationIdForSession(lockrelid, ShareUpdateExclusiveLock);
}
/*
* The index is now valid in the sense that it contains all currently
- * interesting tuples. But since it might not contain tuples deleted just
- * before the reference snap was taken, we have to wait out any
+ * interesting tuples. But since it might not contain tuples deleted
+ * just before the reference snap was taken, we have to wait out any
* transactions that might have older snapshots.
*/
pgstat_progress_update_param(PROGRESS_CREATEIDX_PHASE,
*/
foreach(lc, relationLocks)
{
- LockRelId *lockrelid = (LockRelId *) lfirst(lc);
+ LockRelId *lockrelid = (LockRelId *) lfirst(lc);
UnlockRelationIdForSession(lockrelid, ShareUpdateExclusiveLock);
}
elog(ERROR, "cache lookup failed for statistics object %u", statsOid);
/*
- * When none of the defined statistics types contain datum values
- * from the table's columns then there's no need to reset the stats.
- * Functional dependencies and ndistinct stats should still hold true.
+ * When none of the defined statistics types contain datum values from the
+ * table's columns then there's no need to reset the stats. Functional
+ * dependencies and ndistinct stats should still hold true.
*/
if (!statext_is_kind_built(oldtup, STATS_EXT_MCV))
{
const char *colName, LOCKMODE lockmode);
static bool NotNullImpliedByRelConstraints(Relation rel, Form_pg_attribute attr);
static bool ConstraintImpliedByRelConstraint(Relation scanrel,
- List *partConstraint, List *existedConstraints);
+ List *partConstraint, List *existedConstraints);
static ObjectAddress ATExecColumnDefault(Relation rel, const char *colName,
Node *newDefault, LOCKMODE lockmode);
static ObjectAddress ATExecAddIdentity(Relation rel, const char *colName,
}
/*
- * Now add any newly specified CHECK constraints to the new relation.
- * Same as for defaults above, but these need to come after partitioning
- * is set up.
+ * Now add any newly specified CHECK constraints to the new relation. Same
+ * as for defaults above, but these need to come after partitioning is set
+ * up.
*/
if (stmt->constraints)
AddRelationNewConstraints(rel, NIL, stmt->constraints,
*/
if (IsSystemClass(relOid, classform) && relkind == RELKIND_INDEX)
{
- HeapTuple locTuple;
- Form_pg_index indexform;
- bool indisvalid;
+ HeapTuple locTuple;
+ Form_pg_index indexform;
+ bool indisvalid;
locTuple = SearchSysCache1(INDEXRELID, ObjectIdGetDatum(relOid));
if (!HeapTupleIsValid(locTuple))
{
Relation toastrel = relation_open(toast_relid,
AccessExclusiveLock);
+
RelationSetNewRelfilenode(toastrel,
toastrel->rd_rel->relpersistence);
table_close(toastrel, NoLock);
/* nothing to do here, oid columns don't exist anymore */
break;
case AT_SetTableSpace: /* SET TABLESPACE */
+
/*
* Only do this for partitioned tables and indexes, for which this
* is just a catalog change. Other relation types which have
{
/*
* If required, test the current data within the table against new
- * constraints generated by ALTER TABLE commands, but don't rebuild
- * data.
+ * constraints generated by ALTER TABLE commands, but don't
+ * rebuild data.
*/
if (tab->constraints != NIL || tab->verify_new_notnull ||
tab->partition_constraint != NULL)
{
/*
* If we are rebuilding the tuples OR if we added any new but not
- * verified NOT NULL constraints, check all not-null constraints.
- * This is a bit of overkill but it minimizes risk of bugs, and
+ * verified NOT NULL constraints, check all not-null constraints. This
+ * is a bit of overkill but it minimizes risk of bugs, and
* heap_attisnull is a pretty cheap test anyway.
*/
for (i = 0; i < newTupDesc->natts; i++)
{
/*
* If there's no rewrite, old and new table are guaranteed to
- * have the same AM, so we can just use the old slot to
- * verify new constraints etc.
+ * have the same AM, so we can just use the old slot to verify
+ * new constraints etc.
*/
insertslot = oldslot;
}
/*
* Ordinarily phase 3 must ensure that no NULLs exist in columns that
* are set NOT NULL; however, if we can find a constraint which proves
- * this then we can skip that. We needn't bother looking if
- * we've already found that we must verify some other NOT NULL
- * constraint.
+ * this then we can skip that. We needn't bother looking if we've
+ * already found that we must verify some other NOT NULL constraint.
*/
if (!tab->verify_new_notnull &&
!NotNullImpliedByRelConstraints(rel, (Form_pg_attribute) GETSTRUCT(tuple)))
*/
if (tab->rewrite)
{
- Relation newrel;
+ Relation newrel;
newrel = table_open(RelationGetRelid(rel), NoLock);
RelationClearMissing(newrel);
{
/*
* Changing the type of a column that is used by a
- * generated column is not allowed by SQL standard.
- * It might be doable with some thinking and effort.
+ * generated column is not allowed by SQL standard. It
+ * might be doable with some thinking and effort.
*/
ereport(ERROR,
(errcode(ERRCODE_SYNTAX_ERROR),
/*
* Here we go --- change the recorded column type and collation. (Note
- * heapTup is a copy of the syscache entry, so okay to scribble on.)
- * First fix up the missing value if any.
+ * heapTup is a copy of the syscache entry, so okay to scribble on.) First
+ * fix up the missing value if any.
*/
if (attTup->atthasmissing)
{
- Datum missingval;
- bool missingNull;
+ Datum missingval;
+ bool missingNull;
/* if rewrite is true the missing value should already be cleared */
Assert(tab->rewrite == 0);
/* if it's a null array there is nothing to do */
- if (! missingNull)
+ if (!missingNull)
{
/*
* Get the datum out of the array and repack it in a new array
* changed, only the array metadata.
*/
- int one = 1;
- bool isNull;
- Datum valuesAtt[Natts_pg_attribute];
- bool nullsAtt[Natts_pg_attribute];
- bool replacesAtt[Natts_pg_attribute];
- HeapTuple newTup;
+ int one = 1;
+ bool isNull;
+ Datum valuesAtt[Natts_pg_attribute];
+ bool nullsAtt[Natts_pg_attribute];
+ bool replacesAtt[Natts_pg_attribute];
+ HeapTuple newTup;
MemSet(valuesAtt, 0, sizeof(valuesAtt));
MemSet(nullsAtt, false, sizeof(nullsAtt));
attTup->attalign,
&isNull);
missingval = PointerGetDatum(
- construct_array(&missingval,
- 1,
- targettype,
- tform->typlen,
- tform->typbyval,
- tform->typalign));
+ construct_array(&missingval,
+ 1,
+ targettype,
+ tform->typlen,
+ tform->typbyval,
+ tform->typalign));
valuesAtt[Anum_pg_attribute_attmissingval - 1] = missingval;
replacesAtt[Anum_pg_attribute_attmissingval - 1] = true;
Oid reloid = RelationGetRelid(rel);
/*
- * Shouldn't be called on relations having storage; these are processed
- * in phase 3.
+ * Shouldn't be called on relations having storage; these are processed in
+ * phase 3.
*/
Assert(!RELKIND_HAS_STORAGE(rel->rd_rel->relkind));
/* Can't allow a non-shared relation in pg_global */
if (newTableSpace == GLOBALTABLESPACE_OID)
ereport(ERROR,
- (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
- errmsg("only shared relations can be placed in pg_global tablespace")));
+ (errcode(ERRCODE_INVALID_PARAMETER_VALUE),
+ errmsg("only shared relations can be placed in pg_global tablespace")));
/*
* No work if no change in tablespace.
i = -1;
while ((i = bms_next_member(expr_attrs, i)) >= 0)
{
- AttrNumber attno = i + FirstLowInvalidHeapAttributeNumber;
+ AttrNumber attno = i + FirstLowInvalidHeapAttributeNumber;
if (TupleDescAttr(RelationGetDescr(rel), attno - 1)->attgenerated)
ereport(ERROR,
bool
ConstraintImpliedByRelConstraint(Relation scanrel, List *testConstraint, List *provenConstraint)
{
- List *existConstraint = list_copy(provenConstraint);
+ List *existConstraint = list_copy(provenConstraint);
TupleConstr *constr = RelationGetDescr(scanrel)->constr;
int num_check,
i;
* not-false and try to prove the same for testConstraint.
*
* Note that predicate_implied_by assumes its first argument is known
- * immutable. That should always be true for both NOT NULL and
- * partition constraints, so we don't test it here.
+ * immutable. That should always be true for both NOT NULL and partition
+ * constraints, so we don't test it here.
*/
return predicate_implied_by(testConstraint, existConstraint, true);
}
/*
* Allow explicit specification of database's default tablespace in
- * default_tablespace without triggering permissions checks. Don't
- * allow specifying that when creating a partitioned table, however,
- * since the result is confusing.
+ * default_tablespace without triggering permissions checks. Don't allow
+ * specifying that when creating a partitioned table, however, since the
+ * result is confusing.
*/
if (result == MyDatabaseTableSpace)
{
case AFTER_TRIGGER_FDW_REUSE:
/*
- * Store tuple in the slot so that tg_trigtuple does not
- * reference tuplestore memory. (It is formally possible for the
- * trigger function to queue trigger events that add to the same
+ * Store tuple in the slot so that tg_trigtuple does not reference
+ * tuplestore memory. (It is formally possible for the trigger
+ * function to queue trigger events that add to the same
* tuplestore, which can push other tuples out of memory.) The
* distinction is academic, because we start with a minimal tuple
* that is stored as a heap tuple, constructed in different memory
ExecVacuum(ParseState *pstate, VacuumStmt *vacstmt, bool isTopLevel)
{
VacuumParams params;
- bool verbose = false;
- bool skip_locked = false;
- bool analyze = false;
- bool freeze = false;
- bool full = false;
- bool disable_page_skipping = false;
- ListCell *lc;
+ bool verbose = false;
+ bool skip_locked = false;
+ bool analyze = false;
+ bool freeze = false;
+ bool full = false;
+ bool disable_page_skipping = false;
+ ListCell *lc;
/* Set default value */
params.index_cleanup = VACOPT_TERNARY_DEFAULT;
/* Parse options list */
foreach(lc, vacstmt->options)
{
- DefElem *opt = (DefElem *) lfirst(lc);
+ DefElem *opt = (DefElem *) lfirst(lc);
/* Parse common options for VACUUM and ANALYZE */
if (strcmp(opt->defname, "verbose") == 0)
/*
* Determine the log level.
*
- * For manual VACUUM or ANALYZE, we emit a WARNING to match the log statements
- * in the permission checks; otherwise, only log if the caller so requested.
+ * For manual VACUUM or ANALYZE, we emit a WARNING to match the log
+ * statements in the permission checks; otherwise, only log if the caller
+ * so requested.
*/
if (!IsAutoVacuumWorkerProcess())
elevel = WARNING;
}
/*
- * Some table AMs might not need per-relation xid / multixid
- * horizons. It therefore seems reasonable to allow relfrozenxid and
- * relminmxid to not be set (i.e. set to their respective Invalid*Id)
+ * Some table AMs might not need per-relation xid / multixid horizons.
+ * It therefore seems reasonable to allow relfrozenxid and relminmxid
+ * to not be set (i.e. set to their respective Invalid*Id)
* independently. Thus validate and compute horizon for each only if
* set.
*
static void
ExecComputeSlotInfo(ExprState *state, ExprEvalStep *op)
{
- PlanState *parent = state->parent;
+ PlanState *parent = state->parent;
TupleDesc desc = NULL;
const TupleTableSlotOps *tts_ops = NULL;
- bool isfixed = false;
+ bool isfixed = false;
if (op->d.fetch.known_desc != NULL)
{
*/
ExprState *
ExecBuildGroupingEqual(TupleDesc ldesc, TupleDesc rdesc,
- const TupleTableSlotOps * lops, const TupleTableSlotOps * rops,
+ const TupleTableSlotOps *lops, const TupleTableSlotOps *rops,
int numCols,
const AttrNumber *keyColIdx,
const Oid *eqfunctions,
ExecEvalSysVar(ExprState *state, ExprEvalStep *op, ExprContext *econtext,
TupleTableSlot *slot)
{
- Datum d;
+ Datum d;
/* slot_getsysattr has sufficient defenses against bad attnums */
d = slot_getsysattr(slot,
if (relation)
*slot = table_slot_create(relation,
- &epqstate->estate->es_tupleTable);
+ &epqstate->estate->es_tupleTable);
else
*slot = ExecAllocTableSlot(&epqstate->estate->es_tupleTable,
epqstate->origslot->tts_tupleDescriptor,
* instrumentation in per-query context.
*/
ibytes = offsetof(SharedJitInstrumentation, jit_instr)
- + mul_size(shared_jit->num_workers, sizeof(JitInstrumentation));
+ + mul_size(shared_jit->num_workers, sizeof(JitInstrumentation));
planstate->worker_jit_instrument =
MemoryContextAlloc(planstate->state->es_query_cxt, ibytes);
/* Accumulate JIT instrumentation, if any. */
if (pei->jit_instrumentation)
ExecParallelRetrieveJitInstrumentation(pei->planstate,
- pei->jit_instrumentation);
+ pei->jit_instrumentation);
/* Free any serialized parameters. */
if (DsaPointerIsValid(pei->param_exec))
TupleTableSlot *tupslot;
AttrNumber *tupmap;
int indexes[FLEXIBLE_ARRAY_MEMBER];
-} PartitionDispatchData;
+} PartitionDispatchData;
/* struct to hold result relations coming from UPDATE subplans */
typedef struct SubplanResultRelHashElem
{
- Oid relid; /* hash key -- must be first */
+ Oid relid; /* hash key -- must be first */
ResultRelInfo *rri;
} SubplanResultRelHashElem;
if (proute->subplan_resultrel_htab)
{
Oid partoid = partdesc->oids[partidx];
- SubplanResultRelHashElem *elem;
+ SubplanResultRelHashElem *elem;
elem = hash_search(proute->subplan_resultrel_htab,
&partoid, HASH_FIND, NULL);
ResultRelInfo *rri = &mtstate->resultRelInfo[i];
bool found;
Oid partoid = RelationGetRelid(rri->ri_RelationDesc);
- SubplanResultRelHashElem *elem;
+ SubplanResultRelHashElem *elem;
elem = (SubplanResultRelHashElem *)
hash_search(htab, &partoid, HASH_ENTER, &found);
* It's safe to reuse these from the partition root, as we
* only process one tuple at a time (therefore we won't
* overwrite needed data in slots), and the results of
- * projections are independent of the underlying
- * storage. Projections and where clauses themselves don't
- * store state / are independent of the underlying storage.
+ * projections are independent of the underlying storage.
+ * Projections and where clauses themselves don't store state
+ * / are independent of the underlying storage.
*/
leaf_part_rri->ri_onConflict->oc_ProjSlot =
rootResultRelInfo->ri_onConflict->oc_ProjSlot;
{
MemoryContext oldcxt;
PartitionRoutingInfo *partrouteinfo;
- int rri_index;
+ int rri_index;
oldcxt = MemoryContextSwitchTo(proute->memcxt);
}
else
{
- int pd_idx = 0;
- int pp_idx;
+ int pd_idx = 0;
+ int pp_idx;
/*
* Some new partitions have appeared since plan time, and
* those are reflected in our PartitionDesc but were not
* present in the one used to construct subplan_map and
* subpart_map. So we must construct new and longer arrays
- * where the partitions that were originally present map to the
- * same place, and any added indexes map to -1, as if the
+ * where the partitions that were originally present map to
+ * the same place, and any added indexes map to -1, as if the
* new partitions had been pruned.
*/
pprune->subpart_map = palloc(sizeof(int) * partdesc->nparts);
static bool
tuples_equal(TupleTableSlot *slot1, TupleTableSlot *slot2)
{
- int attrnum;
+ int attrnum;
Assert(slot1->tts_tupleDescriptor->natts ==
slot2->tts_tupleDescriptor->natts);
if (!DatumGetBool(FunctionCall2Coll(&typentry->eq_opr_finfo,
att->attcollation,
- slot1->tts_values[attrnum],
- slot2->tts_values[attrnum])))
+ slot1->tts_values[attrnum],
+ slot2->tts_values[attrnum])))
return false;
}
resultRelInfo->ri_TrigDesc->trig_insert_before_row)
{
if (!ExecBRInsertTriggers(estate, resultRelInfo, slot))
- skip_tuple = true; /* "do nothing" */
+ skip_tuple = true; /* "do nothing" */
}
if (!skip_tuple)
{
if (!ExecBRUpdateTriggers(estate, epqstate, resultRelInfo,
tid, NULL, slot))
- skip_tuple = true; /* "do nothing" */
+ skip_tuple = true; /* "do nothing" */
}
if (!skip_tuple)
if (resultRelInfo->ri_PartitionCheck)
ExecPartitionCheck(resultRelInfo, slot, estate, true);
- simple_table_update(rel, tid, slot,estate->es_snapshot,
+ simple_table_update(rel, tid, slot, estate->es_snapshot,
&update_indexes);
if (resultRelInfo->ri_NumIndices > 0 && update_indexes)
const char *relname)
{
/*
- * We currently only support writing to regular tables. However, give
- * a more specific error for partitioned and foreign tables.
+ * We currently only support writing to regular tables. However, give a
+ * more specific error for partitioned and foreign tables.
*/
if (relkind == RELKIND_PARTITIONED_TABLE)
ereport(ERROR,
errmsg("cannot use relation \"%s.%s\" as logical replication target",
nspname, relname),
errdetail("\"%s.%s\" is a partitioned table.",
- nspname, relname)));
+ nspname, relname)));
else if (relkind == RELKIND_FOREIGN_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("cannot use relation \"%s.%s\" as logical replication target",
nspname, relname),
errdetail("\"%s.%s\" is a foreign table.",
- nspname, relname)));
+ nspname, relname)));
if (relkind != RELKIND_RELATION)
ereport(ERROR,
errmsg("cannot use relation \"%s.%s\" as logical replication target",
nspname, relname),
errdetail("\"%s.%s\" is not a table.",
- nspname, relname)));
+ nspname, relname)));
}
/* Check if it meets the access-method conditions */
if (!(*recheckMtd) (node, slot))
- return ExecClearTuple(slot); /* would not be returned by scan */
+ return ExecClearTuple(slot); /* would not be returned by
+ * scan */
return slot;
}
static TupleDesc ExecTypeFromTLInternal(List *targetList,
bool skipjunk);
-static pg_attribute_always_inline void
-slot_deform_heap_tuple(TupleTableSlot *slot, HeapTuple tuple, uint32 *offp,
+static pg_attribute_always_inline void slot_deform_heap_tuple(TupleTableSlot *slot, HeapTuple tuple, uint32 *offp,
int natts);
static inline void tts_buffer_heap_store_tuple(TupleTableSlot *slot,
- HeapTuple tuple,
- Buffer buffer,
- bool transfer_pin);
+ HeapTuple tuple,
+ Buffer buffer,
+ bool transfer_pin);
static void tts_heap_store_tuple(TupleTableSlot *slot, HeapTuple tuple, bool shouldFree);
{
elog(ERROR, "virtual tuple table slot does not have system attributes");
- return 0; /* silence compiler warnings */
+ return 0; /* silence compiler warnings */
}
/*
for (int natt = 0; natt < desc->natts; natt++)
{
Form_pg_attribute att = TupleDescAttr(desc, natt);
- Datum val;
+ Datum val;
if (att->attbyval || slot->tts_isnull[natt])
continue;
for (int natt = 0; natt < desc->natts; natt++)
{
Form_pg_attribute att = TupleDescAttr(desc, natt);
- Datum val;
+ Datum val;
if (att->attbyval || slot->tts_isnull[natt])
continue;
if (att->attlen == -1 &&
VARATT_IS_EXTERNAL_EXPANDED(DatumGetPointer(val)))
{
- Size data_length;
+ Size data_length;
/*
* We want to flatten the expanded value so that the materialized
}
else
{
- Size data_length = 0;
+ Size data_length = 0;
data = (char *) att_align_nominal(data, att->attalign);
data_length = att_addlength_datum(data_length, att->attlen, val);
static void
tts_heap_copyslot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
{
- HeapTuple tuple;
+ HeapTuple tuple;
MemoryContext oldcontext;
oldcontext = MemoryContextSwitchTo(dstslot->tts_mcxt);
{
elog(ERROR, "minimal tuple table slot does not have system attributes");
- return 0; /* silence compiler warnings */
+ return 0; /* silence compiler warnings */
}
static void
MakeTupleTableSlot(TupleDesc tupleDesc,
const TupleTableSlotOps *tts_ops)
{
- Size basesz, allocsz;
+ Size basesz,
+ allocsz;
TupleTableSlot *slot;
+
basesz = tts_ops->base_slot_size;
/*
slot_getsomeattrs_int(TupleTableSlot *slot, int attnum)
{
/* Check for caller errors */
- Assert(slot->tts_nvalid < attnum); /* slot_getsomeattr checked */
+ Assert(slot->tts_nvalid < attnum); /* slot_getsomeattr checked */
Assert(attnum > 0);
if (unlikely(attnum > slot->tts_tupleDescriptor->natts))
slot->tts_ops->getsomeattrs(slot, attnum);
/*
- * If the underlying tuple doesn't have enough attributes, tuple descriptor
- * must have the missing attributes.
+ * If the underlying tuple doesn't have enough attributes, tuple
+ * descriptor must have the missing attributes.
*/
if (unlikely(slot->tts_nvalid < attnum))
{