Commit c9ce4a1c authored by Robert Haas's avatar Robert Haas

Eliminate "parallel degree" terminology.

This terminology provoked widespread complaints.  So, instead, rename
the GUC max_parallel_degree to max_parallel_workers_per_gather
(leaving room for a possible future GUC max_parallel_workers that acts
as a system-wide limit), and rename the parallel_degree reloption to
parallel_workers.  Rename structure members to match.

These changes create a dump/restore hazard for users of PostgreSQL
9.6beta1 who have set the reloption (or applied the GUC using ALTER
USER or ALTER DATABASE).
parent 6581e930
...@@ -1998,16 +1998,16 @@ include_dir 'conf.d' ...@@ -1998,16 +1998,16 @@ include_dir 'conf.d'
</listitem> </listitem>
</varlistentry> </varlistentry>
<varlistentry id="guc-max-parallel-degree" xreflabel="max_parallel_degree"> <varlistentry id="guc-max-parallel-workers-per-gather" xreflabel="max_parallel_workers_per_gather">
<term><varname>max_parallel_degree</varname> (<type>integer</type>) <term><varname>max_parallel_workers_per_gather</varname> (<type>integer</type>)
<indexterm> <indexterm>
<primary><varname>max_parallel_degree</> configuration parameter</primary> <primary><varname>max_parallel_workers_per_gather</> configuration parameter</primary>
</indexterm> </indexterm>
</term> </term>
<listitem> <listitem>
<para> <para>
Sets the maximum number of workers that can be started for an Sets the maximum number of workers that can be started by a single
individual parallel operation. Parallel workers are taken from the <literal>Gather</literal> node. Parallel workers are taken from the
pool of processes established by pool of processes established by
<xref linkend="guc-max-worker-processes">. Note that the requested <xref linkend="guc-max-worker-processes">. Note that the requested
number of workers may not actually be available at runtime. If this number of workers may not actually be available at runtime. If this
......
...@@ -909,14 +909,14 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI ...@@ -909,14 +909,14 @@ CREATE [ [ GLOBAL | LOCAL ] { TEMPORARY | TEMP } | UNLOGGED ] TABLE [ IF NOT EXI
</varlistentry> </varlistentry>
<varlistentry> <varlistentry>
<term><literal>parallel_degree</> (<type>integer</>)</term> <term><literal>parallel_workers</> (<type>integer</>)</term>
<listitem> <listitem>
<para> <para>
The parallel degree for a table is the number of workers that should This sets the number of workers that should be used to assist a parallel
be used to assist a parallel scan of that table. If not set, the scan of this table. If not set, the system will determine a value based
system will determine a value based on the relation size. The actual on the relation size. The actual number of workers chosen by the planner
number of workers chosen by the planner may be less, for example due to may be less, for example due to
the setting of <xref linkend="guc-max-parallel-degree">. the setting of <xref linkend="guc-max-worker-processes">.
</para> </para>
</listitem> </listitem>
</varlistentry> </varlistentry>
......
...@@ -153,7 +153,7 @@ ...@@ -153,7 +153,7 @@
<para> <para>
Use of parallel query execution can be controlled through the new Use of parallel query execution can be controlled through the new
configuration parameters configuration parameters
<xref linkend="guc-max-parallel-degree">, <xref linkend="guc-max-parallel-workers-per-gather">,
<xref linkend="guc-force-parallel-mode">, <xref linkend="guc-force-parallel-mode">,
<xref linkend="guc-parallel-setup-cost">, and <xref linkend="guc-parallel-setup-cost">, and
<xref linkend="guc-parallel-tuple-cost">. <xref linkend="guc-parallel-tuple-cost">.
......
...@@ -270,7 +270,7 @@ static relopt_int intRelOpts[] = ...@@ -270,7 +270,7 @@ static relopt_int intRelOpts[] =
}, },
{ {
{ {
"parallel_degree", "parallel_workers",
"Number of parallel processes that can be used per executor node for this relation.", "Number of parallel processes that can be used per executor node for this relation.",
RELOPT_KIND_HEAP, RELOPT_KIND_HEAP,
AccessExclusiveLock AccessExclusiveLock
...@@ -1301,8 +1301,8 @@ default_reloptions(Datum reloptions, bool validate, relopt_kind kind) ...@@ -1301,8 +1301,8 @@ default_reloptions(Datum reloptions, bool validate, relopt_kind kind)
offsetof(StdRdOptions, autovacuum) +offsetof(AutoVacOpts, analyze_scale_factor)}, offsetof(StdRdOptions, autovacuum) +offsetof(AutoVacOpts, analyze_scale_factor)},
{"user_catalog_table", RELOPT_TYPE_BOOL, {"user_catalog_table", RELOPT_TYPE_BOOL,
offsetof(StdRdOptions, user_catalog_table)}, offsetof(StdRdOptions, user_catalog_table)},
{"parallel_degree", RELOPT_TYPE_INT, {"parallel_workers", RELOPT_TYPE_INT,
offsetof(StdRdOptions, parallel_degree)} offsetof(StdRdOptions, parallel_workers)}
}; };
options = parseRelOptions(reloptions, validate, kind, &numoptions); options = parseRelOptions(reloptions, validate, kind, &numoptions);
......
...@@ -1609,7 +1609,7 @@ _outPathInfo(StringInfo str, const Path *node) ...@@ -1609,7 +1609,7 @@ _outPathInfo(StringInfo str, const Path *node)
_outBitmapset(str, NULL); _outBitmapset(str, NULL);
WRITE_BOOL_FIELD(parallel_aware); WRITE_BOOL_FIELD(parallel_aware);
WRITE_BOOL_FIELD(parallel_safe); WRITE_BOOL_FIELD(parallel_safe);
WRITE_INT_FIELD(parallel_degree); WRITE_INT_FIELD(parallel_workers);
WRITE_FLOAT_FIELD(rows, "%.0f"); WRITE_FLOAT_FIELD(rows, "%.0f");
WRITE_FLOAT_FIELD(startup_cost, "%.2f"); WRITE_FLOAT_FIELD(startup_cost, "%.2f");
WRITE_FLOAT_FIELD(total_cost, "%.2f"); WRITE_FLOAT_FIELD(total_cost, "%.2f");
......
...@@ -669,26 +669,26 @@ set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) ...@@ -669,26 +669,26 @@ set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte)
static void static void
create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
{ {
int parallel_degree = 1; int parallel_workers = 1;
/* /*
* If the user has set the parallel_degree reloption, we decide what to do * If the user has set the parallel_workers reloption, we decide what to do
* based on the value of that option. Otherwise, we estimate a value. * based on the value of that option. Otherwise, we estimate a value.
*/ */
if (rel->rel_parallel_degree != -1) if (rel->rel_parallel_workers != -1)
{ {
/* /*
* If parallel_degree = 0 is set for this relation, bail out. The * If parallel_workers = 0 is set for this relation, bail out. The
* user does not want a parallel path for this relation. * user does not want a parallel path for this relation.
*/ */
if (rel->rel_parallel_degree == 0) if (rel->rel_parallel_workers == 0)
return; return;
/* /*
* Use the table parallel_degree, but don't go further than * Use the table parallel_workers, but don't go further than
* max_parallel_degree. * max_parallel_workers_per_gather.
*/ */
parallel_degree = Min(rel->rel_parallel_degree, max_parallel_degree); parallel_workers = Min(rel->rel_parallel_workers, max_parallel_workers_per_gather);
} }
else else
{ {
...@@ -711,9 +711,9 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) ...@@ -711,9 +711,9 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
* sophisticated, but we need something here for now. * sophisticated, but we need something here for now.
*/ */
while (rel->pages > parallel_threshold * 3 && while (rel->pages > parallel_threshold * 3 &&
parallel_degree < max_parallel_degree) parallel_workers < max_parallel_workers_per_gather)
{ {
parallel_degree++; parallel_workers++;
parallel_threshold *= 3; parallel_threshold *= 3;
if (parallel_threshold >= PG_INT32_MAX / 3) if (parallel_threshold >= PG_INT32_MAX / 3)
break; break;
...@@ -721,7 +721,7 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel) ...@@ -721,7 +721,7 @@ create_plain_partial_paths(PlannerInfo *root, RelOptInfo *rel)
} }
/* Add an unordered partial path based on a parallel sequential scan. */ /* Add an unordered partial path based on a parallel sequential scan. */
add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_degree)); add_partial_path(rel, create_seqscan_path(root, rel, NULL, parallel_workers));
} }
/* /*
...@@ -1242,11 +1242,11 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, ...@@ -1242,11 +1242,11 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
{ {
AppendPath *appendpath; AppendPath *appendpath;
ListCell *lc; ListCell *lc;
int parallel_degree = 0; int parallel_workers = 0;
/* /*
* Decide what parallel degree to request for this append path. For * Decide on the numebr of workers to request for this append path. For
* now, we just use the maximum parallel degree of any member. It * now, we just use the maximum value from among the members. It
* might be useful to use a higher number if the Append node were * might be useful to use a higher number if the Append node were
* smart enough to spread out the workers, but it currently isn't. * smart enough to spread out the workers, but it currently isn't.
*/ */
...@@ -1254,13 +1254,13 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, ...@@ -1254,13 +1254,13 @@ set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel,
{ {
Path *path = lfirst(lc); Path *path = lfirst(lc);
parallel_degree = Max(parallel_degree, path->parallel_degree); parallel_workers = Max(parallel_workers, path->parallel_workers);
} }
Assert(parallel_degree > 0); Assert(parallel_workers > 0);
/* Generate a partial append path. */ /* Generate a partial append path. */
appendpath = create_append_path(rel, partial_subpaths, NULL, appendpath = create_append_path(rel, partial_subpaths, NULL,
parallel_degree); parallel_workers);
add_partial_path(rel, (Path *) appendpath); add_partial_path(rel, (Path *) appendpath);
} }
......
...@@ -113,7 +113,7 @@ int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE; ...@@ -113,7 +113,7 @@ int effective_cache_size = DEFAULT_EFFECTIVE_CACHE_SIZE;
Cost disable_cost = 1.0e10; Cost disable_cost = 1.0e10;
int max_parallel_degree = 2; int max_parallel_workers_per_gather = 2;
bool enable_seqscan = true; bool enable_seqscan = true;
bool enable_indexscan = true; bool enable_indexscan = true;
...@@ -229,9 +229,9 @@ cost_seqscan(Path *path, PlannerInfo *root, ...@@ -229,9 +229,9 @@ cost_seqscan(Path *path, PlannerInfo *root,
cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows; cpu_run_cost += path->pathtarget->cost.per_tuple * path->rows;
/* Adjust costing for parallelism, if used. */ /* Adjust costing for parallelism, if used. */
if (path->parallel_degree > 0) if (path->parallel_workers > 0)
{ {
double parallel_divisor = path->parallel_degree; double parallel_divisor = path->parallel_workers;
double leader_contribution; double leader_contribution;
/* /*
...@@ -245,7 +245,7 @@ cost_seqscan(Path *path, PlannerInfo *root, ...@@ -245,7 +245,7 @@ cost_seqscan(Path *path, PlannerInfo *root,
* estimate that the leader spends 30% of its time servicing each * estimate that the leader spends 30% of its time servicing each
* worker, and the remainder executing the parallel plan. * worker, and the remainder executing the parallel plan.
*/ */
leader_contribution = 1.0 - (0.3 * path->parallel_degree); leader_contribution = 1.0 - (0.3 * path->parallel_workers);
if (leader_contribution > 0) if (leader_contribution > 0)
parallel_divisor += leader_contribution; parallel_divisor += leader_contribution;
......
...@@ -1394,7 +1394,7 @@ create_gather_plan(PlannerInfo *root, GatherPath *best_path) ...@@ -1394,7 +1394,7 @@ create_gather_plan(PlannerInfo *root, GatherPath *best_path)
gather_plan = make_gather(tlist, gather_plan = make_gather(tlist,
NIL, NIL,
best_path->path.parallel_degree, best_path->path.parallel_workers,
best_path->single_copy, best_path->single_copy,
subplan); subplan);
......
...@@ -245,7 +245,7 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams) ...@@ -245,7 +245,7 @@ standard_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
glob->parallelModeOK = (cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 && glob->parallelModeOK = (cursorOptions & CURSOR_OPT_PARALLEL_OK) != 0 &&
IsUnderPostmaster && dynamic_shared_memory_type != DSM_IMPL_NONE && IsUnderPostmaster && dynamic_shared_memory_type != DSM_IMPL_NONE &&
parse->commandType == CMD_SELECT && !parse->hasModifyingCTE && parse->commandType == CMD_SELECT && !parse->hasModifyingCTE &&
parse->utilityStmt == NULL && max_parallel_degree > 0 && parse->utilityStmt == NULL && max_parallel_workers_per_gather > 0 &&
!IsParallelWorker() && !IsolationIsSerializable() && !IsParallelWorker() && !IsolationIsSerializable() &&
!has_parallel_hazard((Node *) parse, true); !has_parallel_hazard((Node *) parse, true);
...@@ -3622,7 +3622,7 @@ create_grouping_paths(PlannerInfo *root, ...@@ -3622,7 +3622,7 @@ create_grouping_paths(PlannerInfo *root,
if (grouped_rel->partial_pathlist) if (grouped_rel->partial_pathlist)
{ {
Path *path = (Path *) linitial(grouped_rel->partial_pathlist); Path *path = (Path *) linitial(grouped_rel->partial_pathlist);
double total_groups = path->rows * path->parallel_degree; double total_groups = path->rows * path->parallel_workers;
path = (Path *) create_gather_path(root, path = (Path *) create_gather_path(root,
grouped_rel, grouped_rel,
...@@ -3717,7 +3717,7 @@ create_grouping_paths(PlannerInfo *root, ...@@ -3717,7 +3717,7 @@ create_grouping_paths(PlannerInfo *root,
if (hashaggtablesize < work_mem * 1024L) if (hashaggtablesize < work_mem * 1024L)
{ {
double total_groups = path->rows * path->parallel_degree; double total_groups = path->rows * path->parallel_workers;
path = (Path *) create_gather_path(root, path = (Path *) create_gather_path(root,
grouped_rel, grouped_rel,
......
This diff is collapsed.
...@@ -128,8 +128,8 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent, ...@@ -128,8 +128,8 @@ get_relation_info(PlannerInfo *root, Oid relationObjectId, bool inhparent,
estimate_rel_size(relation, rel->attr_widths - rel->min_attr, estimate_rel_size(relation, rel->attr_widths - rel->min_attr,
&rel->pages, &rel->tuples, &rel->allvisfrac); &rel->pages, &rel->tuples, &rel->allvisfrac);
/* Retrive the parallel_degree reloption, if set. */ /* Retrive the parallel_workers reloption, if set. */
rel->rel_parallel_degree = RelationGetParallelDegree(relation, -1); rel->rel_parallel_workers = RelationGetParallelDegree(relation, -1);
/* /*
* Make list of indexes. Ignore indexes on system catalogs if told to. * Make list of indexes. Ignore indexes on system catalogs if told to.
......
...@@ -107,7 +107,7 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptKind reloptkind) ...@@ -107,7 +107,7 @@ build_simple_rel(PlannerInfo *root, int relid, RelOptKind reloptkind)
rel->consider_startup = (root->tuple_fraction > 0); rel->consider_startup = (root->tuple_fraction > 0);
rel->consider_param_startup = false; /* might get changed later */ rel->consider_param_startup = false; /* might get changed later */
rel->consider_parallel = false; /* might get changed later */ rel->consider_parallel = false; /* might get changed later */
rel->rel_parallel_degree = -1; /* set up in GetRelationInfo */ rel->rel_parallel_workers = -1; /* set up in GetRelationInfo */
rel->reltarget = create_empty_pathtarget(); rel->reltarget = create_empty_pathtarget();
rel->pathlist = NIL; rel->pathlist = NIL;
rel->ppilist = NIL; rel->ppilist = NIL;
......
...@@ -2648,11 +2648,11 @@ static struct config_int ConfigureNamesInt[] = ...@@ -2648,11 +2648,11 @@ static struct config_int ConfigureNamesInt[] =
}, },
{ {
{"max_parallel_degree", PGC_USERSET, RESOURCES_ASYNCHRONOUS, {"max_parallel_workers_per_gather", PGC_USERSET, RESOURCES_ASYNCHRONOUS,
gettext_noop("Sets the maximum number of parallel processes per executor node."), gettext_noop("Sets the maximum number of parallel processes per executor node."),
NULL NULL
}, },
&max_parallel_degree, &max_parallel_workers_per_gather,
2, 0, 1024, 2, 0, 1024,
NULL, NULL, NULL NULL, NULL, NULL
}, },
......
...@@ -167,7 +167,7 @@ ...@@ -167,7 +167,7 @@
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching #effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
#max_worker_processes = 8 # (change requires restart) #max_worker_processes = 8 # (change requires restart)
#max_parallel_degree = 2 # max number of worker processes per node #max_parallel_workers_per_gather = 2 # taken from max_worker_processes
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate #old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
# (change requires restart) # (change requires restart)
#backend_flush_after = 0 # 0 disables, #backend_flush_after = 0 # 0 disables,
......
...@@ -1784,7 +1784,7 @@ psql_completion(const char *text, int start, int end) ...@@ -1784,7 +1784,7 @@ psql_completion(const char *text, int start, int end)
"autovacuum_vacuum_scale_factor", "autovacuum_vacuum_scale_factor",
"autovacuum_vacuum_threshold", "autovacuum_vacuum_threshold",
"fillfactor", "fillfactor",
"parallel_degree", "parallel_workers",
"log_autovacuum_min_duration", "log_autovacuum_min_duration",
"toast.autovacuum_enabled", "toast.autovacuum_enabled",
"toast.autovacuum_freeze_max_age", "toast.autovacuum_freeze_max_age",
......
...@@ -521,7 +521,7 @@ typedef struct RelOptInfo ...@@ -521,7 +521,7 @@ typedef struct RelOptInfo
double allvisfrac; double allvisfrac;
PlannerInfo *subroot; /* if subquery */ PlannerInfo *subroot; /* if subquery */
List *subplan_params; /* if subquery */ List *subplan_params; /* if subquery */
int rel_parallel_degree; /* wanted number of parallel workers */ int rel_parallel_workers; /* wanted number of parallel workers */
/* Information about foreign tables and foreign joins */ /* Information about foreign tables and foreign joins */
Oid serverid; /* identifies server for the table or join */ Oid serverid; /* identifies server for the table or join */
...@@ -850,7 +850,7 @@ typedef struct Path ...@@ -850,7 +850,7 @@ typedef struct Path
bool parallel_aware; /* engage parallel-aware logic? */ bool parallel_aware; /* engage parallel-aware logic? */
bool parallel_safe; /* OK to use as part of parallel plan? */ bool parallel_safe; /* OK to use as part of parallel plan? */
int parallel_degree; /* desired parallel degree; 0 = not parallel */ int parallel_workers; /* desired # of workers; 0 = not parallel */
/* estimated size/costs for path (see costsize.c for more info) */ /* estimated size/costs for path (see costsize.c for more info) */
double rows; /* estimated number of result tuples */ double rows; /* estimated number of result tuples */
......
...@@ -54,7 +54,7 @@ extern PGDLLIMPORT double parallel_tuple_cost; ...@@ -54,7 +54,7 @@ extern PGDLLIMPORT double parallel_tuple_cost;
extern PGDLLIMPORT double parallel_setup_cost; extern PGDLLIMPORT double parallel_setup_cost;
extern PGDLLIMPORT int effective_cache_size; extern PGDLLIMPORT int effective_cache_size;
extern Cost disable_cost; extern Cost disable_cost;
extern int max_parallel_degree; extern int max_parallel_workers_per_gather;
extern bool enable_seqscan; extern bool enable_seqscan;
extern bool enable_indexscan; extern bool enable_indexscan;
extern bool enable_indexonlyscan; extern bool enable_indexonlyscan;
......
...@@ -34,7 +34,7 @@ extern bool add_partial_path_precheck(RelOptInfo *parent_rel, ...@@ -34,7 +34,7 @@ extern bool add_partial_path_precheck(RelOptInfo *parent_rel,
Cost total_cost, List *pathkeys); Cost total_cost, List *pathkeys);
extern Path *create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, extern Path *create_seqscan_path(PlannerInfo *root, RelOptInfo *rel,
Relids required_outer, int parallel_degree); Relids required_outer, int parallel_workers);
extern Path *create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, extern Path *create_samplescan_path(PlannerInfo *root, RelOptInfo *rel,
Relids required_outer); Relids required_outer);
extern IndexPath *create_index_path(PlannerInfo *root, extern IndexPath *create_index_path(PlannerInfo *root,
...@@ -62,7 +62,7 @@ extern BitmapOrPath *create_bitmap_or_path(PlannerInfo *root, ...@@ -62,7 +62,7 @@ extern BitmapOrPath *create_bitmap_or_path(PlannerInfo *root,
extern TidPath *create_tidscan_path(PlannerInfo *root, RelOptInfo *rel, extern TidPath *create_tidscan_path(PlannerInfo *root, RelOptInfo *rel,
List *tidquals, Relids required_outer); List *tidquals, Relids required_outer);
extern AppendPath *create_append_path(RelOptInfo *rel, List *subpaths, extern AppendPath *create_append_path(RelOptInfo *rel, List *subpaths,
Relids required_outer, int parallel_degree); Relids required_outer, int parallel_workers);
extern MergeAppendPath *create_merge_append_path(PlannerInfo *root, extern MergeAppendPath *create_merge_append_path(PlannerInfo *root,
RelOptInfo *rel, RelOptInfo *rel,
List *subpaths, List *subpaths,
......
...@@ -204,7 +204,7 @@ typedef struct StdRdOptions ...@@ -204,7 +204,7 @@ typedef struct StdRdOptions
AutoVacOpts autovacuum; /* autovacuum-related options */ AutoVacOpts autovacuum; /* autovacuum-related options */
bool user_catalog_table; /* use as an additional catalog bool user_catalog_table; /* use as an additional catalog
* relation */ * relation */
int parallel_degree; /* max number of parallel workers */ int parallel_workers; /* max number of parallel workers */
} StdRdOptions; } StdRdOptions;
#define HEAP_MIN_FILLFACTOR 10 #define HEAP_MIN_FILLFACTOR 10
...@@ -243,11 +243,11 @@ typedef struct StdRdOptions ...@@ -243,11 +243,11 @@ typedef struct StdRdOptions
/* /*
* RelationGetParallelDegree * RelationGetParallelDegree
* Returns the relation's parallel_degree. Note multiple eval of argument! * Returns the relation's parallel_workers. Note multiple eval of argument!
*/ */
#define RelationGetParallelDegree(relation, defaultpd) \ #define RelationGetParallelDegree(relation, defaultpd) \
((relation)->rd_options ? \ ((relation)->rd_options ? \
((StdRdOptions *) (relation)->rd_options)->parallel_degree : (defaultpd)) ((StdRdOptions *) (relation)->rd_options)->parallel_workers : (defaultpd))
/* /*
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment