Commit e4158319 authored by Tom Lane's avatar Tom Lane

Mop-up for parallel degree-ectomy.

Fix a couple of overlooked uses of "degree" terminology.  Make the parallel
worker count selection logic in create_plain_partial_paths more robust (in
particular, it failed with max_parallel_workers_per_gather set to zero).
parent c9ce4a1c
......@@ -669,27 +669,14 @@ set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
static void
create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
{
int parallel_workers = 1;
int parallel_workers;
/*
* If the user has set the parallel_workers reloption, we decide what to do
* based on the value of that option. Otherwise, we estimate a value.
* If the user has set the parallel_workers reloption, use that; otherwise
* select a default number of workers.
*/
if (rel->rel_parallel_workers != -1)
{
/*
* If parallel_workers = 0 is set for this relation, bail out. The
* user does not want a parallel path for this relation.
*/
if (rel->rel_parallel_workers == 0)
return;
/*
* Use the table parallel_workers, but don't go further than
* max_parallel_workers_per_gather.
*/
parallel_workers = Min(rel->rel_parallel_workers, max_parallel_workers_per_gather);
}
parallel_workers = rel->rel_parallel_workers;
else
{
int parallel_threshold = 1000;
......@@ -706,20 +693,29 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
return;
/*
* Limit the degree of parallelism logarithmically based on the size
* of the relation. This probably needs to be a good deal more
* Select the number of workers based on the log of the size of the
* relation. This probably needs to be a good deal more
* sophisticated, but we need something here for now.
*/
while (rel->pages > parallel_threshold * 3 &&
parallel_workers < max_parallel_workers_per_gather)
parallel_workers = 1;
while (rel->pages > parallel_threshold * 3)
{
parallel_workers++;
parallel_threshold *= 3;
if (parallel_threshold >= PG_INT32_MAX / 3)
break;
break; /* avoid overflow */
}
}
/*
* In no case use more than max_parallel_workers_per_gather workers.
*/
parallel_workers = Min(parallel_workers, max_parallel_workers_per_gather);
/* If any limit was set to zero, the user doesn't want a parallel scan. */
if (parallel_workers <= 0)
return;
/* Add an unordered partial path based on a parallel sequential scan. */
add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers));
}
......
......@@ -128,8 +128,8 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
estimate_rel_size(relation, rel->attr_widths - rel->min_attr,
&rel->pages, &rel->tuples, &rel->allvisfrac);
/* Retrive the parallel_workers reloption, if set. */
rel->rel_parallel_workers = RelationGetParallelDegree(relation, -1);
/* Retrieve the parallel_workers reloption, or -1 if not set. */
rel->rel_parallel_workers = RelationGetParallelWorkers(relation, -1);
/*
* Make list of indexes. Ignore indexes on system catalogs if told to.
......
......@@ -235,19 +235,20 @@ typedef struct StdRdOptions
/*
* RelationIsUsedAsCatalogTable
* Returns whether the relation should be treated as a catalog table
* from the pov of logical decoding. Note multiple eval or argument!
* from the pov of logical decoding. Note multiple eval of argument!
*/
#define RelationIsUsedAsCatalogTable(relation) \
((relation)->rd_options ? \
((StdRdOptions *) (relation)->rd_options)->user_catalog_table : false)
/*
* RelationGetParallelDegree
* Returns the relation's parallel_workers. Note multiple eval of argument!
* RelationGetParallelWorkers
* Returns the relation's parallel_workers reloption setting.
* Note multiple eval of argument!
*/
#define RelationGetParallelDegree(relation, defaultpd) \
#define RelationGetParallelWorkers(relation, defaultpw) \
((relation)->rd_options ? \
((StdRdOptions *) (relation)->rd_options)->parallel_workers : (defaultpd))
((StdRdOptions *) (relation)->rd_options)->parallel_workers : (defaultpw))
/*
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment