{
if (es->format == EXPLAIN_FORMAT_TEXT)
{
- appendStringInfo(es->str, " (cost=%.2f..%.2f rows=%.0f width=%d)",
+ appendStringInfo(es->str,
+ " (cost=%.2f..%.2f/%.2f rows=%.0f width=%d)",
plan->startup_cost, plan->total_cost,
- plan->plan_rows, plan->plan_width);
+ plan->serial_cost, plan->plan_rows,
+ plan->plan_width);
}
else
{
ExplainPropertyFloat("Startup Cost", plan->startup_cost, 2, es);
ExplainPropertyFloat("Total Cost", plan->total_cost, 2, es);
+ ExplainPropertyFloat("Serial Cost", plan->serial_cost, 2, es);
ExplainPropertyFloat("Plan Rows", plan->plan_rows, 0, es);
ExplainPropertyInteger("Plan Width", plan->plan_width, es);
}
{
COPY_SCALAR_FIELD(startup_cost);
COPY_SCALAR_FIELD(total_cost);
+ COPY_SCALAR_FIELD(serial_cost);
COPY_SCALAR_FIELD(plan_rows);
COPY_SCALAR_FIELD(plan_width);
COPY_SCALAR_FIELD(parallel_aware);
{
WRITE_FLOAT_FIELD(startup_cost, "%.2f");
WRITE_FLOAT_FIELD(total_cost, "%.2f");
+ WRITE_FLOAT_FIELD(serial_cost, "%.2f");
WRITE_FLOAT_FIELD(plan_rows, "%.0f");
WRITE_INT_FIELD(plan_width);
WRITE_BOOL_FIELD(parallel_aware);
WRITE_FLOAT_FIELD(rows, "%.0f");
WRITE_FLOAT_FIELD(startup_cost, "%.2f");
WRITE_FLOAT_FIELD(total_cost, "%.2f");
+ WRITE_FLOAT_FIELD(serial_cost, "%.2f");
WRITE_NODE_FIELD(pathkeys);
}
WRITE_NODE_FIELD(indexorderbycols);
WRITE_ENUM_FIELD(indexscandir, ScanDirection);
WRITE_FLOAT_FIELD(indextotalcost, "%.2f");
+ WRITE_FLOAT_FIELD(indexserialcost, "%.2f");
WRITE_FLOAT_FIELD(indexselectivity, "%.4f");
}
READ_FLOAT_FIELD(startup_cost);
READ_FLOAT_FIELD(total_cost);
+ READ_FLOAT_FIELD(serial_cost);
READ_FLOAT_FIELD(plan_rows);
READ_INT_FIELD(plan_width);
READ_BOOL_FIELD(parallel_aware);
path->startup_cost = startup_cost;
path->total_cost = startup_cost + cpu_run_cost + disk_run_cost;
+ path->serial_cost = disk_run_cost;
}
/*
{
Cost startup_cost = 0;
Cost run_cost = 0;
+ Cost disk_run_cost = 0;
RangeTblEntry *rte;
TableSampleClause *tsc;
TsmRoutine *tsm;
* disk costs (recall that baserel->pages has already been set to the
* number of pages the sampling method will visit)
*/
- run_cost += spc_page_cost * baserel->pages;
+ disk_run_cost = spc_page_cost * baserel->pages;
+ run_cost += disk_run_cost;
/*
* CPU costs (recall that baserel->tuples has already been set to the
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
+ path->serial_cost = disk_run_cost;
}
/*
{
Cost startup_cost = 0;
Cost run_cost = 0;
+ Cost serial_tuple_cost = 0;
/* Mark the path with the correct row estimate */
if (rows)
/* Parallel setup and communication cost. */
startup_cost += parallel_setup_cost;
- run_cost += parallel_tuple_cost * path->path.rows;
+ serial_tuple_cost = parallel_tuple_cost * path->path.rows;
+ run_cost += serial_tuple_cost;
path->path.startup_cost = startup_cost;
path->path.total_cost = (startup_cost + run_cost);
+
+ /*
+ * The cost of setting up parallel workers undoubtedly cannot be
+ * parallelized; it doesn't take less time to set up more workers than
+ * fewer. It's more arguable whether the full parallel_tuple_cost should
+ * be charged as serial_cost, since that represents both the cost the
+ * leader pays and the cost the worker pays. But for now we err on the
+ * side of caution and charge the entire amount.
+ */
+ path->path.serial_cost = path->subpath->serial_cost + parallel_setup_cost
+ + serial_tuple_cost;
}
/*
* estimates of caching behavior
*
* In addition to rows, startup_cost and total_cost, cost_index() sets the
- * path's indextotalcost and indexselectivity fields. These values will be
- * needed if the IndexPath is used in a BitmapIndexScan.
+ * path's indextotalcost, indexserialcost, and indexselectivity fields. These
+ * values will be needed if the IndexPath is used in a BitmapIndexScan.
*
* NOTE: path->indexquals must contain only clauses usable as index
* restrictions. Any additional quals evaluated as qpquals may reduce the
Cost run_cost = 0;
Cost indexStartupCost;
Cost indexTotalCost;
+ Cost indexSerialCost;
Selectivity indexSelectivity;
double indexCorrelation,
csquared;
double spc_seq_page_cost,
spc_random_page_cost;
Cost min_IO_cost,
- max_IO_cost;
+ max_IO_cost,
+ disk_run_cost;
QualCost qpqual_cost;
Cost cpu_per_tuple;
double tuples_fetched;
*/
amcostestimate = (amcostestimate_function) index->amcostestimate;
amcostestimate(root, path, loop_count,
- &indexStartupCost, &indexTotalCost,
+ &indexStartupCost, &indexTotalCost, &indexSerialCost,
&indexSelectivity, &indexCorrelation);
/*
* bitmap scan doesn't care about either.
*/
path->indextotalcost = indexTotalCost;
+ path->indexserialcost = indexSerialCost;
path->indexselectivity = indexSelectivity;
/* all costs for touching index itself included here */
*/
csquared = indexCorrelation * indexCorrelation;
- run_cost += max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
+ disk_run_cost = max_IO_cost + csquared * (min_IO_cost - max_IO_cost);
+ run_cost += disk_run_cost;
/*
* Estimate CPU costs per tuple.
path->path.startup_cost = startup_cost;
path->path.total_cost = startup_cost + run_cost;
+ path->path.serial_cost = indexSerialCost + disk_run_cost;
}
/*
Cost startup_cost = 0;
Cost run_cost = 0;
Cost indexTotalCost;
+ Cost indexSerialCost;
Selectivity indexSelectivity;
QualCost qpqual_cost;
Cost cpu_per_tuple;
Cost cost_per_page;
double tuples_fetched;
double pages_fetched;
+ double disk_run_cost;
double spc_seq_page_cost,
spc_random_page_cost;
double T;
* Fetch total cost of obtaining the bitmap, as well as its total
* selectivity.
*/
- cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSelectivity);
+ cost_bitmap_tree_node(bitmapqual, &indexTotalCost, &indexSerialCost,
+ &indexSelectivity);
startup_cost += indexTotalCost;
else
cost_per_page = spc_random_page_cost;
- run_cost += pages_fetched * cost_per_page;
+ disk_run_cost = pages_fetched * cost_per_page;
+ run_cost += disk_run_cost;
/*
* Estimate CPU costs per tuple.
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
+ path->serial_cost = indexSerialCost + disk_run_cost;
}
/*
* Extract cost and selectivity from a bitmap tree node (index/and/or)
*/
void
-cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec)
+cost_bitmap_tree_node(Path *path, Cost *total_cost, Cost *serial_cost,
+ Selectivity *selec)
{
if (IsA(path, IndexPath))
{
- *cost = ((IndexPath *) path)->indextotalcost;
+ *total_cost = ((IndexPath *) path)->indextotalcost;
+ *serial_cost = ((IndexPath *) path)->indexserialcost;
*selec = ((IndexPath *) path)->indexselectivity;
/*
* scan doesn't look to be the same cost as an indexscan to retrieve a
* single tuple.
*/
- *cost += 0.1 * cpu_operator_cost * path->rows;
+ *total_cost += 0.1 * cpu_operator_cost * path->rows;
}
else if (IsA(path, BitmapAndPath))
{
- *cost = path->total_cost;
+ *total_cost = path->total_cost;
+ *serial_cost = path->serial_cost;
*selec = ((BitmapAndPath *) path)->bitmapselectivity;
}
else if (IsA(path, BitmapOrPath))
{
- *cost = path->total_cost;
+ *total_cost = path->total_cost;
+ *serial_cost = path->serial_cost;
*selec = ((BitmapOrPath *) path)->bitmapselectivity;
}
else
{
elog(ERROR, "unrecognized node type: %d", nodeTag(path));
- *cost = *selec = 0; /* keep compiler quiet */
+ *total_cost = *serial_cost = *selec = 0; /* keep compiler quiet */
}
}
cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root)
{
Cost totalCost;
+ Cost serialCost;
Selectivity selec;
ListCell *l;
* definitely too simplistic?
*/
totalCost = 0.0;
+ serialCost = 0.0;
selec = 1.0;
foreach(l, path->bitmapquals)
{
Path *subpath = (Path *) lfirst(l);
- Cost subCost;
+ Cost subTotalCost;
+ Cost subSerialCost;
Selectivity subselec;
- cost_bitmap_tree_node(subpath, &subCost, &subselec);
+ cost_bitmap_tree_node(subpath, &subTotalCost, &subSerialCost,
+ &subselec);
selec *= subselec;
- totalCost += subCost;
+ totalCost += subTotalCost;
+ serialCost += subSerialCost;
if (l != list_head(path->bitmapquals))
totalCost += 100.0 * cpu_operator_cost;
}
path->path.rows = 0; /* per above, not used */
path->path.startup_cost = totalCost;
path->path.total_cost = totalCost;
+ path->path.serial_cost = serialCost;
}
/*
cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root)
{
Cost totalCost;
+ Cost serialCost;
Selectivity selec;
ListCell *l;
* optimized out when the inputs are BitmapIndexScans.
*/
totalCost = 0.0;
+ serialCost = 0.0;
selec = 0.0;
foreach(l, path->bitmapquals)
{
Path *subpath = (Path *) lfirst(l);
- Cost subCost;
+ Cost subTotalCost;
+ Cost subSerialCost;
Selectivity subselec;
- cost_bitmap_tree_node(subpath, &subCost, &subselec);
+ cost_bitmap_tree_node(subpath, &subTotalCost, &subSerialCost,
+ &subselec);
selec += subselec;
- totalCost += subCost;
+ totalCost += subTotalCost;
+ serialCost += subSerialCost;
if (l != list_head(path->bitmapquals) &&
!IsA(subpath, IndexPath))
totalCost += 100.0 * cpu_operator_cost;
path->path.rows = 0; /* per above, not used */
path->path.startup_cost = totalCost;
path->path.total_cost = totalCost;
+ path->path.serial_cost = serialCost;
}
/*
{
Cost startup_cost = 0;
Cost run_cost = 0;
+ Cost disk_run_cost;
bool isCurrentOf = false;
QualCost qpqual_cost;
Cost cpu_per_tuple;
NULL);
/* disk costs --- assume each tuple on a different page */
- run_cost += spc_random_page_cost * ntuples;
+ disk_run_cost = spc_random_page_cost * ntuples;
+ run_cost += disk_run_cost;
/* Add scanning CPU costs */
get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost);
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
+ path->serial_cost = disk_run_cost;
}
/*
path->path.startup_cost += startup_cost;
path->path.total_cost += startup_cost + run_cost;
+ path->path.serial_cost = path->subpath->serial_cost;
}
/*
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
+ path->serial_cost = 0; /* all CPU cost */
}
/*
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
+ path->serial_cost = 0; /* all CPU cost */
}
/*
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
+ path->serial_cost = 0; /* all CPU cost? can't this spill to disk? */
}
/*
runion->startup_cost = startup_cost;
runion->total_cost = total_cost;
+ runion->serial_cost = nrterm->serial_cost;
runion->rows = total_rows;
runion->pathtarget->width = Max(nrterm->pathtarget->width,
rterm->pathtarget->width);
{
Cost startup_cost = input_cost;
Cost run_cost = 0;
+ Cost disk_run_cost = 0;
double input_bytes = relation_byte_size(tuples, width);
double output_bytes;
double output_tuples;
log_runs = 1.0;
npageaccesses = 2.0 * npages * log_runs;
/* Assume 3/4ths of accesses are sequential, 1/4th are not */
- startup_cost += npageaccesses *
+ disk_run_cost = npageaccesses *
(seq_page_cost * 0.75 + random_page_cost * 0.25);
+ startup_cost += disk_run_cost;
}
else if (tuples > 2 * output_tuples || input_bytes > sort_mem_bytes)
{
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
+ path->serial_cost = disk_run_cost;
}
/*
* 'n_streams' is the number of input streams
* 'input_startup_cost' is the sum of the input streams' startup costs
* 'input_total_cost' is the sum of the input streams' total costs
+ * 'input_serial_cost' is the sum of the input streams' total costs
* 'tuples' is the number of tuples in all the streams
*/
void
cost_merge_append(Path *path, PlannerInfo *root,
List *pathkeys, int n_streams,
Cost input_startup_cost, Cost input_total_cost,
- double tuples)
+ Cost input_serial_cost, double tuples)
{
Cost startup_cost = 0;
Cost run_cost = 0;
path->startup_cost = startup_cost + input_startup_cost;
path->total_cost = startup_cost + run_cost + input_total_cost;
+ path->serial_cost = input_serial_cost;
}
/*
void
cost_material(Path *path,
Cost input_startup_cost, Cost input_total_cost,
- double tuples, int width)
+ Cost input_serial_cost, double tuples, int width)
{
Cost startup_cost = input_startup_cost;
Cost run_cost = input_total_cost - input_startup_cost;
+ Cost serial_cost = input_serial_cost;
double nbytes = relation_byte_size(tuples, width);
long work_mem_bytes = work_mem * 1024L;
double npages = ceil(nbytes / BLCKSZ);
run_cost += seq_page_cost * npages;
+ serial_cost += seq_page_cost * npages;
}
path->startup_cost = startup_cost;
path->total_cost = startup_cost + run_cost;
+ path->serial_cost = serial_cost;
}
/*
AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
int numGroupCols, double numGroups,
Cost input_startup_cost, Cost input_total_cost,
- double input_tuples)
+ Cost input_serial_cost, double input_tuples)
{
double output_tuples;
Cost startup_cost;
path->rows = output_tuples;
path->startup_cost = startup_cost;
path->total_cost = total_cost;
+ path->serial_cost = input_serial_cost;
}
/*
cost_windowagg(Path *path, PlannerInfo *root,
List *windowFuncs, int numPartCols, int numOrderCols,
Cost input_startup_cost, Cost input_total_cost,
- double input_tuples)
+ Cost input_serial_cost, double input_tuples)
{
Cost startup_cost;
Cost total_cost;
path->rows = input_tuples;
path->startup_cost = startup_cost;
path->total_cost = total_cost;
+ path->serial_cost = input_serial_cost;
}
/*
cost_group(Path *path, PlannerInfo *root,
int numGroupCols, double numGroups,
Cost input_startup_cost, Cost input_total_cost,
- double input_tuples)
+ Cost input_serial_cost, double input_tuples)
{
Cost startup_cost;
Cost total_cost;
path->rows = numGroups;
path->startup_cost = startup_cost;
path->total_cost = total_cost;
+ path->serial_cost = input_serial_cost;
}
/*
double inner_path_rows = inner_path->rows;
Cost startup_cost = workspace->startup_cost;
Cost run_cost = workspace->run_cost;
+ Cost serial_cost = outer_path->serial_cost;
+ Cost inner_rescan_serial_cost;
Cost cpu_per_tuple;
QualCost restrict_qual_cost;
double ntuples;
if (!enable_nestloop)
startup_cost += disable_cost;
+ /*
+ * We don't have a mechanism for accurately estimating the serial
+ * cost of rescanning the inner path. So assume it's the same as the
+ * serial cost of the first scan -- unless that'd be more than the
+ * total cost of a rescan, which would be an obviously unreasonable
+ * result.
+ */
+ inner_rescan_serial_cost = Min(inner_path->serial_cost,
+ workspace->inner_rescan_run_cost);
+
/* cost of inner-relation source data (we already dealt with outer rel) */
if (path->jointype == JOIN_SEMI || path->jointype == JOIN_ANTI)
* inner_rescan_run_cost for additional ones.
*/
run_cost += inner_run_cost * inner_scan_frac;
+ serial_cost += inner_path->serial_cost * inner_scan_frac;
if (outer_matched_rows > 1)
+ {
run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
+ serial_cost += (outer_matched_rows - 1) * inner_rescan_serial_cost * inner_scan_frac;
+ }
/*
* Add the cost of inner-scan executions for unmatched outer rows.
*/
run_cost += (outer_path_rows - outer_matched_rows) *
inner_rescan_run_cost / inner_path_rows;
+ serial_cost += (outer_path_rows - outer_matched_rows) *
+ inner_rescan_serial_cost / inner_path_rows;
/*
* We won't be evaluating any quals at all for unmatched rows, so
* conservative and always charge the whole first-scan cost once.
*/
run_cost += inner_run_cost;
+ serial_cost += inner_path->serial_cost;
/* Add inner run cost for additional outer tuples having matches */
if (outer_matched_rows > 1)
+ {
run_cost += (outer_matched_rows - 1) * inner_rescan_run_cost * inner_scan_frac;
+ serial_cost += (outer_matched_rows - 1) * inner_rescan_serial_cost * inner_scan_frac;
+ }
/* Add inner run cost for unmatched outer tuples */
run_cost += (outer_path_rows - outer_matched_rows) *
inner_rescan_run_cost;
+ serial_cost += (outer_path_rows - outer_matched_rows) *
+ inner_rescan_serial_cost;
/* And count the unmatched join tuples as being processed */
ntuples += (outer_path_rows - outer_matched_rows) *
}
else
{
- /* Normal-case source costs were included in preliminary estimate */
+ /*
+ * Normal-case source costs were included in preliminary estimate, but
+ * we still need to work out the serial cost.
+ */
+ serial_cost += inner_path->serial_cost;
+ if (outer_path_rows > 1)
+ serial_cost += (outer_path_rows - 1) * inner_rescan_serial_cost;
/* Compute number of tuples processed (not number emitted!) */
ntuples = outer_path_rows * inner_path_rows;
path->path.startup_cost = startup_cost;
path->path.total_cost = startup_cost + run_cost;
+ path->path.serial_cost = serial_cost;
}
/*
{
/* duplicate clauseids, keep the cheaper one */
Cost ncost;
+ Cost nscost;
Cost ocost;
+ Cost oscost;
Selectivity nselec;
Selectivity oselec;
- cost_bitmap_tree_node(pathinfo->path, &ncost, &nselec);
- cost_bitmap_tree_node(pathinfoarray[i]->path, &ocost, &oselec);
+ cost_bitmap_tree_node(pathinfo->path, &ncost, &nscost, &nselec);
+ cost_bitmap_tree_node(pathinfoarray[i]->path, &ocost, &oscost,
+ &oselec);
if (ncost < ocost)
pathinfoarray[i] = pathinfo;
}
PathClauseUsage *pb = *(PathClauseUsage *const *) b;
Cost acost;
Cost bcost;
+ Cost ascost;
+ Cost bscost;
Selectivity aselec;
Selectivity bselec;
- cost_bitmap_tree_node(pa->path, &acost, &aselec);
- cost_bitmap_tree_node(pb->path, &bcost, &bselec);
+ cost_bitmap_tree_node(pa->path, &acost, &ascost, &aselec);
+ cost_bitmap_tree_node(pb->path, &bcost, &bscost, &bselec);
/*
* If costs are the same, sort by selectivity.
/* and set its cost/width fields appropriately */
plan->startup_cost = 0.0;
plan->total_cost = ipath->indextotalcost;
+ plan->serial_cost = ipath->indexserialcost;
plan->plan_rows =
clamp_row_est(ipath->indexselectivity * ipath->path.parent->tuples);
plan->plan_width = 0; /* meaningless */
{
dest->startup_cost = src->startup_cost;
dest->total_cost = src->total_cost;
+ dest->serial_cost = src->serial_cost;
dest->plan_rows = src->rows;
dest->plan_width = src->pathtarget->width;
dest->parallel_aware = src->parallel_aware;
cost_material(&matpath,
subplan->startup_cost,
subplan->total_cost,
+ subplan->serial_cost,
subplan->plan_rows,
subplan->plan_width);
matplan->startup_cost = matpath.startup_cost;
cost_agg(&hashed_p, root, AGG_HASHED, NULL,
numGroupCols, dNumGroups,
input_path->startup_cost, input_path->total_cost,
- input_path->rows);
+ input_path->serial_cost, input_path->rows);
/*
* Now for the sorted case. Note that the input is *always* unsorted,
0.0, work_mem, -1.0);
cost_group(&sorted_p, root, numGroupCols, dNumGroups,
sorted_p.startup_cost, sorted_p.total_cost,
- input_path->rows);
+ sorted_p.serial_cost, input_path->rows);
/*
* Now make the decision using the top-level tuple fraction. First we
MergeAppendPath *pathnode = makeNode(MergeAppendPath);
Cost input_startup_cost;
Cost input_total_cost;
+ Cost input_serial_cost;
ListCell *l;
pathnode->path.pathtype = T_MergeAppend;
pathnode->path.rows = 0;
input_startup_cost = 0;
input_total_cost = 0;
+ input_serial_cost = 0;
foreach(l, subpaths)
{
Path *subpath = (Path *) lfirst(l);
pathnode->limit_tuples);
input_startup_cost += sort_path.startup_cost;
input_total_cost += sort_path.total_cost;
+ input_serial_cost += sort_path.serial_cost;
}
/* All child paths must have same parameterization */
cost_merge_append(&pathnode->path, root,
pathkeys, list_length(subpaths),
input_startup_cost, input_total_cost,
- rel->tuples);
+ input_serial_cost, rel->tuples);
return pathnode;
}
cost_material(&pathnode->path,
subpath->startup_cost,
subpath->total_cost,
+ subpath->serial_cost,
subpath->rows,
subpath->pathtarget->width);
numCols, pathnode->path.rows,
subpath->startup_cost,
subpath->total_cost,
+ subpath->serial_cost,
rel->rows);
}
list_length(groupClause),
numGroups,
subpath->startup_cost, subpath->total_cost,
- subpath->rows);
+ subpath->serial_cost, subpath->rows);
/* add tlist eval cost for each output row */
pathnode->path.startup_cost += target->cost.startup;
aggstrategy, aggcosts,
list_length(groupClause), numGroups,
subpath->startup_cost, subpath->total_cost,
- subpath->rows);
+ subpath->serial_cost, subpath->rows);
/* add tlist eval cost for each output row */
pathnode->path.startup_cost += target->cost.startup;
numGroups,
subpath->startup_cost,
subpath->total_cost,
+ subpath->serial_cost,
subpath->rows);
/*
numGroups, /* XXX surely not right for all steps? */
sort_path.startup_cost,
sort_path.total_cost,
+ sort_path.serial_cost,
sort_path.rows);
pathnode->path.total_cost += agg_path.total_cost;
list_length(winclause->orderClause),
subpath->startup_cost,
subpath->total_cost,
+ subpath->serial_cost,
subpath->rows);
/* add tlist eval cost for each output row */
pathnode->path.startup_cost = subpath->startup_cost;
pathnode->path.total_cost = subpath->total_cost +
cpu_operator_cost * subpath->rows * list_length(distinctList);
+ pathnode->path.serial_cost = subpath->serial_cost;
pathnode->path.rows = outputRows;
return pathnode;
/* These are the values the cost estimator must return to the planner */
Cost indexStartupCost; /* index-related startup cost */
Cost indexTotalCost; /* total index-related scan cost */
+ Cost indexSerialCost; /* non-parallelizable portion */
Selectivity indexSelectivity; /* selectivity of index */
double indexCorrelation; /* order correlation of index */
List *indexOrderBys = path->indexorderbys;
Cost indexStartupCost;
Cost indexTotalCost;
+ Cost indexSerialCost;
Selectivity indexSelectivity;
double indexCorrelation;
double numIndexPages;
indexTotalCost = numIndexPages * spc_random_page_cost;
}
+ /*
+ * Everything after this point is just CPU overhead, which permits
+ * effective parallelism.
+ */
+ indexSerialCost = indexTotalCost;
+
/*
* CPU cost: any complex expressions in the indexquals will need to be
* evaluated once at the start of the scan to reduce them to runtime keys
*/
costs->indexStartupCost = indexStartupCost;
costs->indexTotalCost = indexTotalCost;
+ costs->indexSerialCost = indexSerialCost;
costs->indexSelectivity = indexSelectivity;
costs->indexCorrelation = indexCorrelation;
costs->numIndexPages = numIndexPages;
void
btcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
Cost *indexStartupCost, Cost *indexTotalCost,
+ Cost *indexSerialCost,
Selectivity *indexSelectivity, double *indexCorrelation)
{
IndexOptInfo *index = path->indexinfo;
*indexStartupCost = costs.indexStartupCost;
*indexTotalCost = costs.indexTotalCost;
+ *indexSerialCost = costs.indexSerialCost;
*indexSelectivity = costs.indexSelectivity;
*indexCorrelation = costs.indexCorrelation;
}
void
hashcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
Cost *indexStartupCost, Cost *indexTotalCost,
+ Cost *indexSerialCost,
Selectivity *indexSelectivity, double *indexCorrelation)
{
List *qinfos;
*indexStartupCost = costs.indexStartupCost;
*indexTotalCost = costs.indexTotalCost;
+ *indexSerialCost = costs.indexSerialCost;
*indexSelectivity = costs.indexSelectivity;
*indexCorrelation = costs.indexCorrelation;
}
void
gistcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
Cost *indexStartupCost, Cost *indexTotalCost,
+ Cost *indexSerialCost,
Selectivity *indexSelectivity, double *indexCorrelation)
{
IndexOptInfo *index = path->indexinfo;
*indexStartupCost = costs.indexStartupCost;
*indexTotalCost = costs.indexTotalCost;
+ *indexSerialCost = costs.indexSerialCost;
*indexSelectivity = costs.indexSelectivity;
*indexCorrelation = costs.indexCorrelation;
}
void
spgcostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
Cost *indexStartupCost, Cost *indexTotalCost,
+ Cost *indexSerialCost,
Selectivity *indexSelectivity, double *indexCorrelation)
{
IndexOptInfo *index = path->indexinfo;
*indexStartupCost = costs.indexStartupCost;
*indexTotalCost = costs.indexTotalCost;
+ *indexSerialCost = costs.indexSerialCost;
*indexSelectivity = costs.indexSelectivity;
*indexCorrelation = costs.indexCorrelation;
}
void
gincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
Cost *indexStartupCost, Cost *indexTotalCost,
+ Cost *indexSerialCost,
Selectivity *indexSelectivity, double *indexCorrelation)
{
IndexOptInfo *index = path->indexinfo;
*indexTotalCost = *indexStartupCost +
dataPagesFetched * spc_random_page_cost;
+ /*
+ * Everything after this point is just CPU overhead, which permits
+ * effective parallelism.
+ */
+ *indexSerialCost = *indexTotalCost;
+
/*
* Add on index qual eval costs, much as in genericcostestimate
*/
void
brincostestimate(PlannerInfo *root, IndexPath *path, double loop_count,
Cost *indexStartupCost, Cost *indexTotalCost,
+ Cost *indexSerialCost,
Selectivity *indexSelectivity, double *indexCorrelation)
{
IndexOptInfo *index = path->indexinfo;
*/
*indexTotalCost = spc_random_page_cost * numPages * loop_count;
+ /*
+ * Everything after this point is just CPU overhead, which permits
+ * effective parallelism.
+ */
+ *indexSerialCost = *indexTotalCost;
+
*indexSelectivity =
clauselist_selectivity(root, indexQuals,
path->indexinfo->rel->relid,
double loop_count,
Cost *indexStartupCost,
Cost *indexTotalCost,
+ Cost *indexSerialCost,
Selectivity *indexSelectivity,
double *indexCorrelation);
*/
Cost startup_cost; /* cost expended before fetching any tuples */
Cost total_cost; /* total cost (assuming all tuples fetched) */
+ Cost serial_cost; /* non-parallelizable portion of total cost */
/*
* planner's estimate of result size of this plan step
double rows; /* estimated number of result tuples */
Cost startup_cost; /* cost expended before fetching any tuples */
Cost total_cost; /* total cost (assuming all tuples fetched) */
+ Cost serial_cost; /* non-parallelizable portion of total cost */
List *pathkeys; /* sort ordering of path's output */
/* pathkeys is a List of PathKey nodes; see above */
* NoMovementScanDirection for an indexscan, but the planner wants to
* distinguish ordered from unordered indexes for building pathkeys.)
*
- * 'indextotalcost' and 'indexselectivity' are saved in the IndexPath so that
- * we need not recompute them when considering using the same index in a
- * bitmap index/heap scan (see BitmapHeapPath). The costs of the IndexPath
- * itself represent the costs of an IndexScan or IndexOnlyScan plan type.
+ * 'indextotalcost', 'indexserialcost', and 'indexselectivity' are saved in
+ * the IndexPath so that we need not recompute them when considering using the
+ * same index in a * bitmap index/heap scan (see BitmapHeapPath). The costs
+ * of the IndexPath itself represent the costs of an IndexScan or IndexOnlyScan
+ * plan type.
*----------
*/
typedef struct IndexPath
List *indexorderbycols;
ScanDirection indexscandir;
Cost indextotalcost;
+ Cost indexserialcost;
Selectivity indexselectivity;
} IndexPath;
Path *bitmapqual, double loop_count);
extern void cost_bitmap_and_node(BitmapAndPath *path, PlannerInfo *root);
extern void cost_bitmap_or_node(BitmapOrPath *path, PlannerInfo *root);
-extern void cost_bitmap_tree_node(Path *path, Cost *cost, Selectivity *selec);
+extern void cost_bitmap_tree_node(Path *path, Cost *total_cost,
+ Cost *serial_cost, Selectivity *selec);
extern void cost_tidscan(Path *path, PlannerInfo *root,
RelOptInfo *baserel, List *tidquals, ParamPathInfo *param_info);
extern void cost_subqueryscan(SubqueryScanPath *path, PlannerInfo *root,
extern void cost_merge_append(Path *path, PlannerInfo *root,
List *pathkeys, int n_streams,
Cost input_startup_cost, Cost input_total_cost,
- double tuples);
+ Cost input_serial_cost, double tuples);
extern void cost_material(Path *path,
Cost input_startup_cost, Cost input_total_cost,
- double tuples, int width);
+ Cost input_serial_cost, double tuples, int width);
extern void cost_agg(Path *path, PlannerInfo *root,
AggStrategy aggstrategy, const AggClauseCosts *aggcosts,
int numGroupCols, double numGroups,
Cost input_startup_cost, Cost input_total_cost,
- double input_tuples);
+ Cost input_serial_cost, double input_tuples);
extern void cost_windowagg(Path *path, PlannerInfo *root,
List *windowFuncs, int numPartCols, int numOrderCols,
Cost input_startup_cost, Cost input_total_cost,
- double input_tuples);
+ Cost input_serial_cost, double input_tuples);
extern void cost_group(Path *path, PlannerInfo *root,
int numGroupCols, double numGroups,
Cost input_startup_cost, Cost input_total_cost,
- double input_tuples);
+ Cost input_serial_cost, double input_tuples);
extern void initial_cost_nestloop(PlannerInfo *root,
JoinCostWorkspace *workspace,
JoinType jointype,
double loop_count,
Cost *indexStartupCost,
Cost *indexTotalCost,
+ Cost *indexSerialCost,
Selectivity *indexSelectivity,
double *indexCorrelation);
extern void btcostestimate(struct PlannerInfo *root,
double loop_count,
Cost *indexStartupCost,
Cost *indexTotalCost,
+ Cost *indexSerialCost,
Selectivity *indexSelectivity,
double *indexCorrelation);
extern void hashcostestimate(struct PlannerInfo *root,
double loop_count,
Cost *indexStartupCost,
Cost *indexTotalCost,
+ Cost *indexSerialCost,
Selectivity *indexSelectivity,
double *indexCorrelation);
extern void gistcostestimate(struct PlannerInfo *root,
double loop_count,
Cost *indexStartupCost,
Cost *indexTotalCost,
+ Cost *indexSerialCost,
Selectivity *indexSelectivity,
double *indexCorrelation);
extern void spgcostestimate(struct PlannerInfo *root,
double loop_count,
Cost *indexStartupCost,
Cost *indexTotalCost,
+ Cost *indexSerialCost,
Selectivity *indexSelectivity,
double *indexCorrelation);
extern void gincostestimate(struct PlannerInfo *root,
double loop_count,
Cost *indexStartupCost,
Cost *indexTotalCost,
+ Cost *indexSerialCost,
Selectivity *indexSelectivity,
double *indexCorrelation);