Commit 2f153ddf authored by Tom Lane's avatar Tom Lane

Refactor to reduce code duplication for function property checking.

As noted by Andres Freund, we'd accumulated quite a few similar functions
in clauses.c that examine all functions in an expression tree to see if
they satisfy some boolean test.  Reduce the duplication by inventing a
function check_functions_in_node() that applies a simple callback function
to each SQL function OID appearing in a given expression node.  This also
fixes some arguable oversights; for example, contain_mutable_functions()
did not check aggregate or window functions for mutability.  I doubt that
that represents a live bug at the moment, because we don't really consider
mutability for aggregates; but it might someday be one.

I chose to put check_functions_in_node() in nodeFuncs.c because it seemed
like other modules might wish to use it in future.  That in turn forced
moving set_opfuncid() et al into nodeFuncs.c, as the alternative was for
nodeFuncs.c to depend on optimizer/setrefs.c which didn't seem very clean.

In passing, teach contain_leaked_vars_walker() about a few more expression
node types it can safely look through, and improve the rather messy and
undercommented code in has_parallel_hazard_walker().

Discussion: <20160527185853.ziol2os2zskahl7v@alap3.anarazel.de>
parent 13761bcc
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
static bool expression_returns_set_walker(Node *node, void *context); static bool expression_returns_set_walker(Node *node, void *context);
static int leftmostLoc(int loc1, int loc2); static int leftmostLoc(int loc1, int loc2);
static bool fix_opfuncids_walker(Node *node, void *context);
static bool planstate_walk_subplans(List *plans, bool (*walker) (), static bool planstate_walk_subplans(List *plans, bool (*walker) (),
void *context); void *context);
static bool planstate_walk_members(List *plans, PlanState **planstates, static bool planstate_walk_members(List *plans, PlanState **planstates,
...@@ -1559,6 +1560,183 @@ leftmostLoc(int loc1, int loc2) ...@@ -1559,6 +1560,183 @@ leftmostLoc(int loc1, int loc2)
} }
/*
* fix_opfuncids
* Calculate opfuncid field from opno for each OpExpr node in given tree.
* The given tree can be anything expression_tree_walker handles.
*
* The argument is modified in-place. (This is OK since we'd want the
* same change for any node, even if it gets visited more than once due to
* shared structure.)
*/
void
fix_opfuncids(Node *node)
{
/* This tree walk requires no special setup, so away we go... */
fix_opfuncids_walker(node, NULL);
}
static bool
fix_opfuncids_walker(Node *node, void *context)
{
if (node == NULL)
return false;
if (IsA(node, OpExpr))
set_opfuncid((OpExpr *) node);
else if (IsA(node, DistinctExpr))
set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
else if (IsA(node, NullIfExpr))
set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
else if (IsA(node, ScalarArrayOpExpr))
set_sa_opfuncid((ScalarArrayOpExpr *) node);
return expression_tree_walker(node, fix_opfuncids_walker, context);
}
/*
* set_opfuncid
* Set the opfuncid (procedure OID) in an OpExpr node,
* if it hasn't been set already.
*
* Because of struct equivalence, this can also be used for
* DistinctExpr and NullIfExpr nodes.
*/
void
set_opfuncid(OpExpr *opexpr)
{
if (opexpr->opfuncid == InvalidOid)
opexpr->opfuncid = get_opcode(opexpr->opno);
}
/*
* set_sa_opfuncid
* As above, for ScalarArrayOpExpr nodes.
*/
void
set_sa_opfuncid(ScalarArrayOpExpr *opexpr)
{
if (opexpr->opfuncid == InvalidOid)
opexpr->opfuncid = get_opcode(opexpr->opno);
}
/*
* check_functions_in_node -
* apply checker() to each function OID contained in given expression node
*
* Returns TRUE if the checker() function does; for nodes representing more
* than one function call, returns TRUE if the checker() function does so
* for any of those functions. Returns FALSE if node does not invoke any
* SQL-visible function. Caller must not pass node == NULL.
*
* This function examines only the given node; it does not recurse into any
* sub-expressions. Callers typically prefer to keep control of the recursion
* for themselves, in case additional checks should be made, or because they
* have special rules about which parts of the tree need to be visited.
*
* Note: we ignore MinMaxExpr, XmlExpr, and CoerceToDomain nodes, because they
* do not contain SQL function OIDs. However, they can invoke SQL-visible
* functions, so callers should take thought about how to treat them.
*/
bool
check_functions_in_node(Node *node, check_function_callback checker,
void *context)
{
switch (nodeTag(node))
{
case T_Aggref:
{
Aggref *expr = (Aggref *) node;
if (checker(expr->aggfnoid, context))
return true;
}
break;
case T_WindowFunc:
{
WindowFunc *expr = (WindowFunc *) node;
if (checker(expr->winfnoid, context))
return true;
}
break;
case T_FuncExpr:
{
FuncExpr *expr = (FuncExpr *) node;
if (checker(expr->funcid, context))
return true;
}
break;
case T_OpExpr:
case T_DistinctExpr: /* struct-equivalent to OpExpr */
case T_NullIfExpr: /* struct-equivalent to OpExpr */
{
OpExpr *expr = (OpExpr *) node;
/* Set opfuncid if it wasn't set already */
set_opfuncid(expr);
if (checker(expr->opfuncid, context))
return true;
}
break;
case T_ScalarArrayOpExpr:
{
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
set_sa_opfuncid(expr);
if (checker(expr->opfuncid, context))
return true;
}
break;
case T_CoerceViaIO:
{
CoerceViaIO *expr = (CoerceViaIO *) node;
Oid iofunc;
Oid typioparam;
bool typisvarlena;
/* check the result type's input function */
getTypeInputInfo(expr->resulttype,
&iofunc, &typioparam);
if (checker(iofunc, context))
return true;
/* check the input type's output function */
getTypeOutputInfo(exprType((Node *) expr->arg),
&iofunc, &typisvarlena);
if (checker(iofunc, context))
return true;
}
break;
case T_ArrayCoerceExpr:
{
ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node;
if (OidIsValid(expr->elemfuncid) &&
checker(expr->elemfuncid, context))
return true;
}
break;
case T_RowCompareExpr:
{
RowCompareExpr *rcexpr = (RowCompareExpr *) node;
ListCell *opid;
foreach(opid, rcexpr->opnos)
{
Oid opfuncid = get_opcode(lfirst_oid(opid));
if (checker(opfuncid, context))
return true;
}
}
break;
default:
break;
}
return false;
}
/* /*
* Standard expression-tree walking support * Standard expression-tree walking support
* *
......
...@@ -147,7 +147,6 @@ static List *set_returning_clause_references(PlannerInfo *root, ...@@ -147,7 +147,6 @@ static List *set_returning_clause_references(PlannerInfo *root,
Plan *topplan, Plan *topplan,
Index resultRelation, Index resultRelation,
int rtoffset); int rtoffset);
static bool fix_opfuncids_walker(Node *node, void *context);
static bool extract_query_dependencies_walker(Node *node, static bool extract_query_dependencies_walker(Node *node,
PlannerInfo *context); PlannerInfo *context);
...@@ -2556,68 +2555,6 @@ set_returning_clause_references(PlannerInfo *root, ...@@ -2556,68 +2555,6 @@ set_returning_clause_references(PlannerInfo *root,
} }
/*****************************************************************************
* OPERATOR REGPROC LOOKUP
*****************************************************************************/
/*
* fix_opfuncids
* Calculate opfuncid field from opno for each OpExpr node in given tree.
* The given tree can be anything expression_tree_walker handles.
*
* The argument is modified in-place. (This is OK since we'd want the
* same change for any node, even if it gets visited more than once due to
* shared structure.)
*/
void
fix_opfuncids(Node *node)
{
/* This tree walk requires no special setup, so away we go... */
fix_opfuncids_walker(node, NULL);
}
static bool
fix_opfuncids_walker(Node *node, void *context)
{
if (node == NULL)
return false;
if (IsA(node, OpExpr))
set_opfuncid((OpExpr *) node);
else if (IsA(node, DistinctExpr))
set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
else if (IsA(node, NullIfExpr))
set_opfuncid((OpExpr *) node); /* rely on struct equivalence */
else if (IsA(node, ScalarArrayOpExpr))
set_sa_opfuncid((ScalarArrayOpExpr *) node);
return expression_tree_walker(node, fix_opfuncids_walker, context);
}
/*
* set_opfuncid
* Set the opfuncid (procedure OID) in an OpExpr node,
* if it hasn't been set already.
*
* Because of struct equivalence, this can also be used for
* DistinctExpr and NullIfExpr nodes.
*/
void
set_opfuncid(OpExpr *opexpr)
{
if (opexpr->opfuncid == InvalidOid)
opexpr->opfuncid = get_opcode(opexpr->opno);
}
/*
* set_sa_opfuncid
* As above, for ScalarArrayOpExpr nodes.
*/
void
set_sa_opfuncid(ScalarArrayOpExpr *opexpr)
{
if (opexpr->opfuncid == InvalidOid)
opexpr->opfuncid = get_opcode(opexpr->opno);
}
/***************************************************************************** /*****************************************************************************
* QUERY DEPENDENCY MANAGEMENT * QUERY DEPENDENCY MANAGEMENT
*****************************************************************************/ *****************************************************************************/
......
...@@ -113,8 +113,6 @@ static bool contain_volatile_functions_walker(Node *node, void *context); ...@@ -113,8 +113,6 @@ static bool contain_volatile_functions_walker(Node *node, void *context);
static bool contain_volatile_functions_not_nextval_walker(Node *node, void *context); static bool contain_volatile_functions_not_nextval_walker(Node *node, void *context);
static bool has_parallel_hazard_walker(Node *node, static bool has_parallel_hazard_walker(Node *node,
has_parallel_hazard_arg *context); has_parallel_hazard_arg *context);
static bool parallel_too_dangerous(char proparallel,
has_parallel_hazard_arg *context);
static bool contain_nonstrict_functions_walker(Node *node, void *context); static bool contain_nonstrict_functions_walker(Node *node, void *context);
static bool contain_leaked_vars_walker(Node *node, void *context); static bool contain_leaked_vars_walker(Node *node, void *context);
static Relids find_nonnullable_rels_walker(Node *node, bool top_level); static Relids find_nonnullable_rels_walker(Node *node, bool top_level);
...@@ -993,96 +991,34 @@ contain_mutable_functions(Node *clause) ...@@ -993,96 +991,34 @@ contain_mutable_functions(Node *clause)
return contain_mutable_functions_walker(clause, NULL); return contain_mutable_functions_walker(clause, NULL);
} }
static bool
contain_mutable_functions_checker(Oid func_id, void *context)
{
return (func_volatile(func_id) != PROVOLATILE_IMMUTABLE);
}
static bool static bool
contain_mutable_functions_walker(Node *node, void *context) contain_mutable_functions_walker(Node *node, void *context)
{ {
if (node == NULL) if (node == NULL)
return false; return false;
if (IsA(node, FuncExpr)) /* Check for mutable functions in node itself */
{ if (check_functions_in_node(node, contain_mutable_functions_checker,
FuncExpr *expr = (FuncExpr *) node; context))
if (func_volatile(expr->funcid) != PROVOLATILE_IMMUTABLE)
return true; return true;
/* else fall through to check args */
}
else if (IsA(node, OpExpr))
{
OpExpr *expr = (OpExpr *) node;
set_opfuncid(expr);
if (func_volatile(expr->opfuncid) != PROVOLATILE_IMMUTABLE)
return true;
/* else fall through to check args */
}
else if (IsA(node, DistinctExpr))
{
DistinctExpr *expr = (DistinctExpr *) node;
set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
if (func_volatile(expr->opfuncid) != PROVOLATILE_IMMUTABLE)
return true;
/* else fall through to check args */
}
else if (IsA(node, NullIfExpr))
{
NullIfExpr *expr = (NullIfExpr *) node;
set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
if (func_volatile(expr->opfuncid) != PROVOLATILE_IMMUTABLE)
return true;
/* else fall through to check args */
}
else if (IsA(node, ScalarArrayOpExpr))
{
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
set_sa_opfuncid(expr);
if (func_volatile(expr->opfuncid) != PROVOLATILE_IMMUTABLE)
return true;
/* else fall through to check args */
}
else if (IsA(node, CoerceViaIO))
{
CoerceViaIO *expr = (CoerceViaIO *) node;
Oid iofunc;
Oid typioparam;
bool typisvarlena;
/* check the result type's input function */
getTypeInputInfo(expr->resulttype,
&iofunc, &typioparam);
if (func_volatile(iofunc) != PROVOLATILE_IMMUTABLE)
return true;
/* check the input type's output function */
getTypeOutputInfo(exprType((Node *) expr->arg),
&iofunc, &typisvarlena);
if (func_volatile(iofunc) != PROVOLATILE_IMMUTABLE)
return true;
/* else fall through to check args */
}
else if (IsA(node, ArrayCoerceExpr))
{
ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node;
if (OidIsValid(expr->elemfuncid) && /*
func_volatile(expr->elemfuncid) != PROVOLATILE_IMMUTABLE) * It should be safe to treat MinMaxExpr as immutable, because it will
return true; * depend on a non-cross-type btree comparison function, and those should
/* else fall through to check args */ * always be immutable. Treating XmlExpr as immutable is more dubious,
} * and treating CoerceToDomain as immutable is outright dangerous. But we
else if (IsA(node, RowCompareExpr)) * have done so historically, and changing this would probably cause more
{ * problems than it would fix. In practice, if you have a non-immutable
RowCompareExpr *rcexpr = (RowCompareExpr *) node; * domain constraint you are in for pain anyhow.
ListCell *opid; */
foreach(opid, rcexpr->opnos) /* Recurse to check arguments */
{ if (IsA(node, Query))
if (op_volatile(lfirst_oid(opid)) != PROVOLATILE_IMMUTABLE)
return true;
}
/* else fall through to check args */
}
else if (IsA(node, Query))
{ {
/* Recurse into subselects */ /* Recurse into subselects */
return query_tree_walker((Query *) node, return query_tree_walker((Query *) node,
...@@ -1122,110 +1058,29 @@ contain_volatile_functions(Node *clause) ...@@ -1122,110 +1058,29 @@ contain_volatile_functions(Node *clause)
return contain_volatile_functions_walker(clause, NULL); return contain_volatile_functions_walker(clause, NULL);
} }
bool static bool
contain_volatile_functions_not_nextval(Node *clause) contain_volatile_functions_checker(Oid func_id, void *context)
{ {
return contain_volatile_functions_not_nextval_walker(clause, NULL); return (func_volatile(func_id) == PROVOLATILE_VOLATILE);
} }
/*
* General purpose code for checking expression volatility.
*
* Special purpose code for use in COPY is almost identical to this,
* so any changes here may also be needed in other contain_volatile...
* functions.
*/
static bool static bool
contain_volatile_functions_walker(Node *node, void *context) contain_volatile_functions_walker(Node *node, void *context)
{ {
if (node == NULL) if (node == NULL)
return false; return false;
if (IsA(node, FuncExpr)) /* Check for volatile functions in node itself */
{ if (check_functions_in_node(node, contain_volatile_functions_checker,
FuncExpr *expr = (FuncExpr *) node; context))
if (func_volatile(expr->funcid) == PROVOLATILE_VOLATILE)
return true;
/* else fall through to check args */
}
else if (IsA(node, OpExpr))
{
OpExpr *expr = (OpExpr *) node;
set_opfuncid(expr);
if (func_volatile(expr->opfuncid) == PROVOLATILE_VOLATILE)
return true;
/* else fall through to check args */
}
else if (IsA(node, DistinctExpr))
{
DistinctExpr *expr = (DistinctExpr *) node;
set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
if (func_volatile(expr->opfuncid) == PROVOLATILE_VOLATILE)
return true;
/* else fall through to check args */
}
else if (IsA(node, NullIfExpr))
{
NullIfExpr *expr = (NullIfExpr *) node;
set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
if (func_volatile(expr->opfuncid) == PROVOLATILE_VOLATILE)
return true;
/* else fall through to check args */
}
else if (IsA(node, ScalarArrayOpExpr))
{
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
set_sa_opfuncid(expr);
if (func_volatile(expr->opfuncid) == PROVOLATILE_VOLATILE)
return true; return true;
/* else fall through to check args */
}
else if (IsA(node, CoerceViaIO))
{
CoerceViaIO *expr = (CoerceViaIO *) node;
Oid iofunc;
Oid typioparam;
bool typisvarlena;
/* check the result type's input function */ /*
getTypeInputInfo(expr->resulttype, * See notes in contain_mutable_functions_walker about why we treat
&iofunc, &typioparam); * MinMaxExpr, XmlExpr, and CoerceToDomain as immutable.
if (func_volatile(iofunc) == PROVOLATILE_VOLATILE) */
return true;
/* check the input type's output function */
getTypeOutputInfo(exprType((Node *) expr->arg),
&iofunc, &typisvarlena);
if (func_volatile(iofunc) == PROVOLATILE_VOLATILE)
return true;
/* else fall through to check args */
}
else if (IsA(node, ArrayCoerceExpr))
{
ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node;
if (OidIsValid(expr->elemfuncid) &&
func_volatile(expr->elemfuncid) == PROVOLATILE_VOLATILE)
return true;
/* else fall through to check args */
}
else if (IsA(node, RowCompareExpr))
{
/* RowCompare probably can't have volatile ops, but check anyway */
RowCompareExpr *rcexpr = (RowCompareExpr *) node;
ListCell *opid;
foreach(opid, rcexpr->opnos) /* Recurse to check arguments */
{ if (IsA(node, Query))
if (op_volatile(lfirst_oid(opid)) == PROVOLATILE_VOLATILE)
return true;
}
/* else fall through to check args */
}
else if (IsA(node, Query))
{ {
/* Recurse into subselects */ /* Recurse into subselects */
return query_tree_walker((Query *) node, return query_tree_walker((Query *) node,
...@@ -1237,104 +1092,48 @@ contain_volatile_functions_walker(Node *node, void *context) ...@@ -1237,104 +1092,48 @@ contain_volatile_functions_walker(Node *node, void *context)
} }
/* /*
* Special purpose version of contain_volatile_functions for use in COPY * Special purpose version of contain_volatile_functions() for use in COPY:
* ignore nextval(), but treat all other functions normally.
*/ */
bool
contain_volatile_functions_not_nextval(Node *clause)
{
return contain_volatile_functions_not_nextval_walker(clause, NULL);
}
static bool
contain_volatile_functions_not_nextval_checker(Oid func_id, void *context)
{
return (func_id != F_NEXTVAL_OID &&
func_volatile(func_id) == PROVOLATILE_VOLATILE);
}
static bool static bool
contain_volatile_functions_not_nextval_walker(Node *node, void *context) contain_volatile_functions_not_nextval_walker(Node *node, void *context)
{ {
if (node == NULL) if (node == NULL)
return false; return false;
if (IsA(node, FuncExpr)) /* Check for volatile functions in node itself */
{ if (check_functions_in_node(node,
FuncExpr *expr = (FuncExpr *) node; contain_volatile_functions_not_nextval_checker,
context))
return true;
/* /*
* For this case only, we want to ignore the volatility of the * See notes in contain_mutable_functions_walker about why we treat
* nextval() function, since some callers want this. * MinMaxExpr, XmlExpr, and CoerceToDomain as immutable.
*/ */
if (expr->funcid != F_NEXTVAL_OID &&
func_volatile(expr->funcid) == PROVOLATILE_VOLATILE)
return true;
/* else fall through to check args */
}
else if (IsA(node, OpExpr))
{
OpExpr *expr = (OpExpr *) node;
set_opfuncid(expr);
if (func_volatile(expr->opfuncid) == PROVOLATILE_VOLATILE)
return true;
/* else fall through to check args */
}
else if (IsA(node, DistinctExpr))
{
DistinctExpr *expr = (DistinctExpr *) node;
set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
if (func_volatile(expr->opfuncid) == PROVOLATILE_VOLATILE)
return true;
/* else fall through to check args */
}
else if (IsA(node, NullIfExpr))
{
NullIfExpr *expr = (NullIfExpr *) node;
set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
if (func_volatile(expr->opfuncid) == PROVOLATILE_VOLATILE)
return true;
/* else fall through to check args */
}
else if (IsA(node, ScalarArrayOpExpr))
{
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
set_sa_opfuncid(expr);
if (func_volatile(expr->opfuncid) == PROVOLATILE_VOLATILE)
return true;
/* else fall through to check args */
}
else if (IsA(node, CoerceViaIO))
{
CoerceViaIO *expr = (CoerceViaIO *) node;
Oid iofunc;
Oid typioparam;
bool typisvarlena;
/* check the result type's input function */
getTypeInputInfo(expr->resulttype,
&iofunc, &typioparam);
if (func_volatile(iofunc) == PROVOLATILE_VOLATILE)
return true;
/* check the input type's output function */
getTypeOutputInfo(exprType((Node *) expr->arg),
&iofunc, &typisvarlena);
if (func_volatile(iofunc) == PROVOLATILE_VOLATILE)
return true;
/* else fall through to check args */
}
else if (IsA(node, ArrayCoerceExpr))
{
ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node;
if (OidIsValid(expr->elemfuncid) &&
func_volatile(expr->elemfuncid) == PROVOLATILE_VOLATILE)
return true;
/* else fall through to check args */
}
else if (IsA(node, RowCompareExpr))
{
/* RowCompare probably can't have volatile ops, but check anyway */
RowCompareExpr *rcexpr = (RowCompareExpr *) node;
ListCell *opid;
foreach(opid, rcexpr->opnos) /* Recurse to check arguments */
if (IsA(node, Query))
{ {
if (op_volatile(lfirst_oid(opid)) == PROVOLATILE_VOLATILE) /* Recurse into subselects */
return true; return query_tree_walker((Query *) node,
} contain_volatile_functions_not_nextval_walker,
/* else fall through to check args */ context, 0);
} }
return expression_tree_walker(node, contain_volatile_functions_not_nextval_walker, return expression_tree_walker(node,
contain_volatile_functions_not_nextval_walker,
context); context);
} }
...@@ -1343,12 +1142,12 @@ contain_volatile_functions_not_nextval_walker(Node *node, void *context) ...@@ -1343,12 +1142,12 @@ contain_volatile_functions_not_nextval_walker(Node *node, void *context)
*****************************************************************************/ *****************************************************************************/
/* /*
* Check whether a node tree contains parallel hazards. This is used both * Check whether a node tree contains parallel hazards. This is used both on
* on the entire query tree, to see whether the query can be parallelized at * the entire query tree, to see whether the query can be parallelized at all
* all, and also to evaluate whether a particular expression is safe to * (with allow_restricted = true), and also to evaluate whether a particular
* run in a parallel worker. We could separate these concerns into two * expression is safe to run within a parallel worker (with allow_restricted =
* different functions, but there's enough overlap that it doesn't seem * false). We could separate these concerns into two different functions, but
* worthwhile. * there's enough overlap that it doesn't seem worthwhile.
*/ */
bool bool
has_parallel_hazard(Node *node, bool allow_restricted) has_parallel_hazard(Node *node, bool allow_restricted)
...@@ -1359,49 +1158,48 @@ has_parallel_hazard(Node *node, bool allow_restricted) ...@@ -1359,49 +1158,48 @@ has_parallel_hazard(Node *node, bool allow_restricted)
return has_parallel_hazard_walker(node, &context); return has_parallel_hazard_walker(node, &context);
} }
static bool
has_parallel_hazard_checker(Oid func_id, void *context)
{
char proparallel = func_parallel(func_id);
if (((has_parallel_hazard_arg *) context)->allow_restricted)
return (proparallel == PROPARALLEL_UNSAFE);
else
return (proparallel != PROPARALLEL_SAFE);
}
static bool static bool
has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context) has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
{ {
if (node == NULL) if (node == NULL)
return false; return false;
/* /* Check for hazardous functions in node itself */
* When we're first invoked on a completely unplanned tree, we must if (check_functions_in_node(node, has_parallel_hazard_checker,
* recurse through Query objects to as to locate parallel-unsafe context))
* constructs anywhere in the tree.
*
* Later, we'll be called again for specific quals, possibly after some
* planning has been done, we may encounter SubPlan, SubLink, or
* AlternativeSubLink nodes. Currently, there's no need to recurse
* through these; they can't be unsafe, since we've already cleared the
* entire query of unsafe operations, and they're definitely
* parallel-restricted.
*/
if (IsA(node, Query))
{
Query *query = (Query *) node;
if (query->rowMarks != NULL)
return true; return true;
/* Recurse into subselects */
return query_tree_walker(query,
has_parallel_hazard_walker,
context, 0);
}
else if (IsA(node, SubPlan) ||IsA(node, SubLink) ||
IsA(node, AlternativeSubPlan) ||IsA(node, Param))
{
/* /*
* Since we don't have the ability to push subplans down to workers at * It should be OK to treat MinMaxExpr as parallel-safe, since btree
* present, we treat subplan references as parallel-restricted. * opclass support functions are generally parallel-safe. XmlExpr is a
* bit more dubious but we can probably get away with it. We err on the
* side of caution by treating CoerceToDomain as parallel-restricted.
* (Note: in principle that's wrong because a domain constraint could
* contain a parallel-unsafe function; but useful constraints probably
* never would have such, and assuming they do would cripple use of
* parallel query in the presence of domain types.)
*/ */
if (IsA(node, CoerceToDomain))
{
if (!context->allow_restricted) if (!context->allow_restricted)
return true; return true;
} }
/* This is just a notational convenience for callers. */ /*
if (IsA(node, RestrictInfo)) * As a notational convenience for callers, look through RestrictInfo.
*/
else if (IsA(node, RestrictInfo))
{ {
RestrictInfo *rinfo = (RestrictInfo *) node; RestrictInfo *rinfo = (RestrictInfo *) node;
...@@ -1409,111 +1207,56 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context) ...@@ -1409,111 +1207,56 @@ has_parallel_hazard_walker(Node *node, has_parallel_hazard_arg *context)
} }
/* /*
* For each node that might potentially call a function, we need to * Since we don't have the ability to push subplans down to workers at
* examine the pg_proc.proparallel marking for that function to see * present, we treat subplan references as parallel-restricted. We need
* whether it's safe enough for the current value of allow_restricted. * not worry about examining their contents; if they are unsafe, we would
* have found that out while examining the whole tree before reduction of
* sublinks to subplans. (Really we should not see SubLink during a
* not-allow_restricted scan, but if we do, return true.)
*/ */
if (IsA(node, FuncExpr)) else if (IsA(node, SubLink) ||
{ IsA(node, SubPlan) ||
FuncExpr *expr = (FuncExpr *) node; IsA(node, AlternativeSubPlan))
if (parallel_too_dangerous(func_parallel(expr->funcid), context))
return true;
}
else if (IsA(node, Aggref))
{
Aggref *aggref = (Aggref *) node;
if (parallel_too_dangerous(func_parallel(aggref->aggfnoid), context))
return true;
}
else if (IsA(node, OpExpr))
{
OpExpr *expr = (OpExpr *) node;
set_opfuncid(expr);
if (parallel_too_dangerous(func_parallel(expr->opfuncid), context))
return true;
}
else if (IsA(node, DistinctExpr))
{ {
DistinctExpr *expr = (DistinctExpr *) node; if (!context->allow_restricted)
set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */
if (parallel_too_dangerous(func_parallel(expr->opfuncid), context))
return true; return true;
} }
else if (IsA(node, NullIfExpr))
{
NullIfExpr *expr = (NullIfExpr *) node;
set_opfuncid((OpExpr *) expr); /* rely on struct equivalence */ /*
if (parallel_too_dangerous(func_parallel(expr->opfuncid), context)) * We can't pass Params to workers at the moment either, so they are also
return true; * parallel-restricted.
} */
else if (IsA(node, ScalarArrayOpExpr)) else if (IsA(node, Param))
{ {
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node; if (!context->allow_restricted)
set_sa_opfuncid(expr);
if (parallel_too_dangerous(func_parallel(expr->opfuncid), context))
return true; return true;
} }
else if (IsA(node, CoerceViaIO))
{
CoerceViaIO *expr = (CoerceViaIO *) node;
Oid iofunc;
Oid typioparam;
bool typisvarlena;
/* check the result type's input function */ /*
getTypeInputInfo(expr->resulttype, * When we're first invoked on a completely unplanned tree, we must
&iofunc, &typioparam); * recurse into subqueries so to as to locate parallel-unsafe constructs
if (parallel_too_dangerous(func_parallel(iofunc), context)) * anywhere in the tree.
return true; */
/* check the input type's output function */ else if (IsA(node, Query))
getTypeOutputInfo(exprType((Node *) expr->arg),
&iofunc, &typisvarlena);
if (parallel_too_dangerous(func_parallel(iofunc), context))
return true;
}
else if (IsA(node, ArrayCoerceExpr))
{ {
ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node; Query *query = (Query *) node;
if (OidIsValid(expr->elemfuncid) && /* SELECT FOR UPDATE/SHARE must be treated as unsafe */
parallel_too_dangerous(func_parallel(expr->elemfuncid), context)) if (query->rowMarks != NULL)
return true; return true;
}
else if (IsA(node, RowCompareExpr))
{
RowCompareExpr *rcexpr = (RowCompareExpr *) node;
ListCell *opid;
foreach(opid, rcexpr->opnos)
{
Oid opfuncid = get_opcode(lfirst_oid(opid));
if (parallel_too_dangerous(func_parallel(opfuncid), context)) /* Recurse into subselects */
return true; return query_tree_walker(query,
} has_parallel_hazard_walker,
context, 0);
} }
/* ... and recurse to check substructure */ /* Recurse to check arguments */
return expression_tree_walker(node, return expression_tree_walker(node,
has_parallel_hazard_walker, has_parallel_hazard_walker,
context); context);
} }
static bool
parallel_too_dangerous(char proparallel, has_parallel_hazard_arg *context)
{
if (context->allow_restricted)
return proparallel == PROPARALLEL_UNSAFE;
else
return proparallel != PROPARALLEL_SAFE;
}
/***************************************************************************** /*****************************************************************************
* Check clauses for nonstrict functions * Check clauses for nonstrict functions
*****************************************************************************/ *****************************************************************************/
...@@ -1536,6 +1279,12 @@ contain_nonstrict_functions(Node *clause) ...@@ -1536,6 +1279,12 @@ contain_nonstrict_functions(Node *clause)
return contain_nonstrict_functions_walker(clause, NULL); return contain_nonstrict_functions_walker(clause, NULL);
} }
static bool
contain_nonstrict_functions_checker(Oid func_id, void *context)
{
return !func_strict(func_id);
}
static bool static bool
contain_nonstrict_functions_walker(Node *node, void *context) contain_nonstrict_functions_walker(Node *node, void *context)
{ {
...@@ -1546,6 +1295,14 @@ contain_nonstrict_functions_walker(Node *node, void *context) ...@@ -1546,6 +1295,14 @@ contain_nonstrict_functions_walker(Node *node, void *context)
/* an aggregate could return non-null with null input */ /* an aggregate could return non-null with null input */
return true; return true;
} }
if (IsA(node, GroupingFunc))
{
/*
* A GroupingFunc doesn't evaluate its arguments, and therefore must
* be treated as nonstrict.
*/
return true;
}
if (IsA(node, WindowFunc)) if (IsA(node, WindowFunc))
{ {
/* a window function could return non-null with null input */ /* a window function could return non-null with null input */
...@@ -1558,37 +1315,15 @@ contain_nonstrict_functions_walker(Node *node, void *context) ...@@ -1558,37 +1315,15 @@ contain_nonstrict_functions_walker(Node *node, void *context)
return true; return true;
/* else fall through to check args */ /* else fall through to check args */
} }
if (IsA(node, FuncExpr))
{
FuncExpr *expr = (FuncExpr *) node;
if (!func_strict(expr->funcid))
return true;
/* else fall through to check args */
}
if (IsA(node, OpExpr))
{
OpExpr *expr = (OpExpr *) node;
set_opfuncid(expr);
if (!func_strict(expr->opfuncid))
return true;
/* else fall through to check args */
}
if (IsA(node, DistinctExpr)) if (IsA(node, DistinctExpr))
{ {
/* IS DISTINCT FROM is inherently non-strict */ /* IS DISTINCT FROM is inherently non-strict */
return true; return true;
} }
if (IsA(node, NullIfExpr)) if (IsA(node, NullIfExpr))
return true;
if (IsA(node, ScalarArrayOpExpr))
{ {
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node; /* NULLIF is inherently non-strict */
if (!is_strict_saop(expr, false))
return true; return true;
/* else fall through to check args */
} }
if (IsA(node, BoolExpr)) if (IsA(node, BoolExpr))
{ {
...@@ -1613,7 +1348,6 @@ contain_nonstrict_functions_walker(Node *node, void *context) ...@@ -1613,7 +1348,6 @@ contain_nonstrict_functions_walker(Node *node, void *context)
return true; return true;
if (IsA(node, AlternativeSubPlan)) if (IsA(node, AlternativeSubPlan))
return true; return true;
/* ArrayCoerceExpr is strict at the array level, regardless of elemfunc */
if (IsA(node, FieldStore)) if (IsA(node, FieldStore))
return true; return true;
if (IsA(node, CaseExpr)) if (IsA(node, CaseExpr))
...@@ -1634,6 +1368,15 @@ contain_nonstrict_functions_walker(Node *node, void *context) ...@@ -1634,6 +1368,15 @@ contain_nonstrict_functions_walker(Node *node, void *context)
return true; return true;
if (IsA(node, BooleanTest)) if (IsA(node, BooleanTest))
return true; return true;
/*
* Check other function-containing nodes; but ArrayCoerceExpr is strict at
* the array level, regardless of elemfunc.
*/
if (!IsA(node, ArrayCoerceExpr) &&
check_functions_in_node(node, contain_nonstrict_functions_checker,
context))
return true;
return expression_tree_walker(node, contain_nonstrict_functions_walker, return expression_tree_walker(node, contain_nonstrict_functions_walker,
context); context);
} }
...@@ -1661,6 +1404,12 @@ contain_leaked_vars(Node *clause) ...@@ -1661,6 +1404,12 @@ contain_leaked_vars(Node *clause)
return contain_leaked_vars_walker(clause, NULL); return contain_leaked_vars_walker(clause, NULL);
} }
static bool
contain_leaked_vars_checker(Oid func_id, void *context)
{
return !get_func_leakproof(func_id);
}
static bool static bool
contain_leaked_vars_walker(Node *node, void *context) contain_leaked_vars_walker(Node *node, void *context)
{ {
...@@ -1672,10 +1421,14 @@ contain_leaked_vars_walker(Node *node, void *context) ...@@ -1672,10 +1421,14 @@ contain_leaked_vars_walker(Node *node, void *context)
case T_Var: case T_Var:
case T_Const: case T_Const:
case T_Param: case T_Param:
case T_ArrayRef:
case T_ArrayExpr: case T_ArrayExpr:
case T_FieldSelect:
case T_FieldStore:
case T_NamedArgExpr: case T_NamedArgExpr:
case T_BoolExpr: case T_BoolExpr:
case T_RelabelType: case T_RelabelType:
case T_CollateExpr:
case T_CaseExpr: case T_CaseExpr:
case T_CaseTestExpr: case T_CaseTestExpr:
case T_RowExpr: case T_RowExpr:
...@@ -1691,114 +1444,35 @@ contain_leaked_vars_walker(Node *node, void *context) ...@@ -1691,114 +1444,35 @@ contain_leaked_vars_walker(Node *node, void *context)
break; break;
case T_FuncExpr: case T_FuncExpr:
{
FuncExpr *expr = (FuncExpr *) node;
if (!get_func_leakproof(expr->funcid) &&
contain_var_clause((Node *) expr->args))
return true;
}
break;
case T_OpExpr: case T_OpExpr:
case T_DistinctExpr: /* struct-equivalent to OpExpr */ case T_DistinctExpr:
case T_NullIfExpr: /* struct-equivalent to OpExpr */ case T_NullIfExpr:
{
OpExpr *expr = (OpExpr *) node;
set_opfuncid(expr);
if (!get_func_leakproof(expr->opfuncid) &&
contain_var_clause((Node *) expr->args))
return true;
}
break;
case T_ScalarArrayOpExpr: case T_ScalarArrayOpExpr:
{
ScalarArrayOpExpr *expr = (ScalarArrayOpExpr *) node;
set_sa_opfuncid(expr);
if (!get_func_leakproof(expr->opfuncid) &&
contain_var_clause((Node *) expr->args))
return true;
}
break;
case T_CoerceViaIO: case T_CoerceViaIO:
{
CoerceViaIO *expr = (CoerceViaIO *) node;
Oid funcid;
Oid ioparam;
bool leakproof;
bool varlena;
/*
* Data may be leaked if either the input or the output
* function is leaky.
*/
getTypeInputInfo(exprType((Node *) expr->arg),
&funcid, &ioparam);
leakproof = get_func_leakproof(funcid);
/*
* If the input function is leakproof, then check the output
* function.
*/
if (leakproof)
{
getTypeOutputInfo(expr->resulttype, &funcid, &varlena);
leakproof = get_func_leakproof(funcid);
}
if (!leakproof &&
contain_var_clause((Node *) expr->arg))
return true;
}
break;
case T_ArrayCoerceExpr: case T_ArrayCoerceExpr:
{
ArrayCoerceExpr *expr = (ArrayCoerceExpr *) node;
Oid funcid;
Oid ioparam;
bool leakproof;
bool varlena;
/*
* Data may be leaked if either the input or the output
* function is leaky.
*/
getTypeInputInfo(exprType((Node *) expr->arg),
&funcid, &ioparam);
leakproof = get_func_leakproof(funcid);
/* /*
* If the input function is leakproof, then check the output * If node contains a leaky function call, and there's any Var
* function. * underneath it, reject.
*/ */
if (leakproof) if (check_functions_in_node(node, contain_leaked_vars_checker,
{ context) &&
getTypeOutputInfo(expr->resulttype, &funcid, &varlena); contain_var_clause(node))
leakproof = get_func_leakproof(funcid);
}
if (!leakproof &&
contain_var_clause((Node *) expr->arg))
return true; return true;
}
break; break;
case T_RowCompareExpr: case T_RowCompareExpr:
{ {
/*
* It's worth special-casing this because a leaky comparison
* function only compromises one pair of row elements, which
* might not contain Vars while others do.
*/
RowCompareExpr *rcexpr = (RowCompareExpr *) node; RowCompareExpr *rcexpr = (RowCompareExpr *) node;
ListCell *opid; ListCell *opid;
ListCell *larg; ListCell *larg;
ListCell *rarg; ListCell *rarg;
/*
* Check the comparison function and arguments passed to it
* for each pair of row elements.
*/
forthree(opid, rcexpr->opnos, forthree(opid, rcexpr->opnos,
larg, rcexpr->largs, larg, rcexpr->largs,
rarg, rcexpr->rargs) rarg, rcexpr->rargs)
......
...@@ -19,8 +19,8 @@ ...@@ -19,8 +19,8 @@
#include "catalog/pg_type.h" #include "catalog/pg_type.h"
#include "executor/executor.h" #include "executor/executor.h"
#include "miscadmin.h" #include "miscadmin.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/clauses.h" #include "optimizer/clauses.h"
#include "optimizer/planmain.h"
#include "optimizer/predtest.h" #include "optimizer/predtest.h"
#include "utils/array.h" #include "utils/array.h"
#include "utils/inval.h" #include "utils/inval.h"
......
...@@ -60,8 +60,8 @@ ...@@ -60,8 +60,8 @@
#include "commands/policy.h" #include "commands/policy.h"
#include "commands/trigger.h" #include "commands/trigger.h"
#include "miscadmin.h" #include "miscadmin.h"
#include "nodes/nodeFuncs.h"
#include "optimizer/clauses.h" #include "optimizer/clauses.h"
#include "optimizer/planmain.h"
#include "optimizer/prep.h" #include "optimizer/prep.h"
#include "optimizer/var.h" #include "optimizer/var.h"
#include "rewrite/rewriteDefine.h" #include "rewrite/rewriteDefine.h"
......
...@@ -25,6 +25,9 @@ ...@@ -25,6 +25,9 @@
#define QTW_EXAMINE_RTES 0x10 /* examine RTEs */ #define QTW_EXAMINE_RTES 0x10 /* examine RTEs */
#define QTW_DONT_COPY_QUERY 0x20 /* do not copy top Query */ #define QTW_DONT_COPY_QUERY 0x20 /* do not copy top Query */
/* callback function for check_functions_in_node */
typedef bool (*check_function_callback) (Oid func_id, void *context);
extern Oid exprType(const Node *expr); extern Oid exprType(const Node *expr);
extern int32 exprTypmod(const Node *expr); extern int32 exprTypmod(const Node *expr);
...@@ -40,6 +43,13 @@ extern void exprSetInputCollation(Node *expr, Oid inputcollation); ...@@ -40,6 +43,13 @@ extern void exprSetInputCollation(Node *expr, Oid inputcollation);
extern int exprLocation(const Node *expr); extern int exprLocation(const Node *expr);
extern void fix_opfuncids(Node *node);
extern void set_opfuncid(OpExpr *opexpr);
extern void set_sa_opfuncid(ScalarArrayOpExpr *opexpr);
extern bool check_functions_in_node(Node *node, check_function_callback checker,
void *context);
extern bool expression_tree_walker(Node *node, bool (*walker) (), extern bool expression_tree_walker(Node *node, bool (*walker) (),
void *context); void *context);
extern Node *expression_tree_mutator(Node *node, Node *(*mutator) (), extern Node *expression_tree_mutator(Node *node, Node *(*mutator) (),
......
...@@ -107,9 +107,6 @@ extern bool query_is_distinct_for(Query *query, List *colnos, List *opids); ...@@ -107,9 +107,6 @@ extern bool query_is_distinct_for(Query *query, List *colnos, List *opids);
* prototypes for plan/setrefs.c * prototypes for plan/setrefs.c
*/ */
extern Plan *set_plan_references(PlannerInfo *root, Plan *plan); extern Plan *set_plan_references(PlannerInfo *root, Plan *plan);
extern void fix_opfuncids(Node *node);
extern void set_opfuncid(OpExpr *opexpr);
extern void set_sa_opfuncid(ScalarArrayOpExpr *opexpr);
extern void record_plan_function_dependency(PlannerInfo *root, Oid funcid); extern void record_plan_function_dependency(PlannerInfo *root, Oid funcid);
extern void extract_query_dependencies(Node *query, extern void extract_query_dependencies(Node *query,
List **relationOids, List **relationOids,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment