Commit ee895a65 authored by Tom Lane's avatar Tom Lane

Improve performance of repeated CALLs within plpgsql procedures.

This patch essentially is cleaning up technical debt left behind
by the original implementation of plpgsql procedures, particularly
commit d92bc83c.  That patch (or more precisely, follow-on patches
fixing its worst bugs) forced us to re-plan CALL and DO statements
each time through, if we're in a non-atomic context.  That wasn't
for any fundamental reason, but just because use of a saved plan
requires having a ResourceOwner to hold a reference count for the
plan, and we had no suitable resowner at hand, nor would the
available APIs support using one if we did.  While it's not that
expensive to create a "plan" for CALL/DO, the cycles do add up
in repeated executions.

This patch therefore makes the following API changes:

* GetCachedPlan/ReleaseCachedPlan are modified to let the caller
specify which resowner to use to pin the plan, rather than forcing
use of CurrentResourceOwner.

* spi.c gains a "SPI_execute_plan_extended" entry point that lets
callers say which resowner to use to pin the plan.  This borrows the
idea of an options struct from the recently added SPI_prepare_extended,
hopefully allowing future options to be added without more API breaks.
This supersedes SPI_execute_plan_with_paramlist (which I've marked
deprecated) as well as SPI_execute_plan_with_receiver (which is new
in v14, so I just took it out altogether).

* I also took the opportunity to remove the crude hack of letting
plpgsql reach into SPI private data structures to mark SPI plans as
"no_snapshot".  It's better to treat that as an option of
SPI_prepare_extended.

Now, when running a non-atomic procedure or DO block that contains
any CALL or DO commands, plpgsql creates a ResourceOwner that
will be used to pin the plans of the CALL/DO commands.  (In an
atomic context, we just use CurrentResourceOwner, as before.)
Having done this, we can just save CALL/DO plans normally,
whether or not they are used across transaction boundaries.
This seems to be good for something like 2X speedup of a CALL
of a trivial procedure with a few simple argument expressions.
By restricting the creation of an extra ResourceOwner like this,
there's essentially zero penalty in cases that can't benefit.

Pavel Stehule, with some further hacking by me

Discussion: https://postgr.es/m/CAFj8pRCLPdDAETvR7Po7gC5y_ibkn_-bOzbeJb39WHms01194Q@mail.gmail.com
parent 55ef8555
This diff is collapsed.
......@@ -230,7 +230,7 @@ ExecuteQuery(ParseState *pstate,
entry->plansource->query_string);
/* Replan if needed, and increment plan refcount for portal */
cplan = GetCachedPlan(entry->plansource, paramLI, false, NULL);
cplan = GetCachedPlan(entry->plansource, paramLI, NULL, NULL);
plan_list = cplan->stmt_list;
/*
......@@ -651,7 +651,8 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es,
}
/* Replan if needed, and acquire a transient refcount */
cplan = GetCachedPlan(entry->plansource, paramLI, true, queryEnv);
cplan = GetCachedPlan(entry->plansource, paramLI,
CurrentResourceOwner, queryEnv);
INSTR_TIME_SET_CURRENT(planduration);
INSTR_TIME_SUBTRACT(planduration, planstart);
......@@ -687,7 +688,7 @@ ExplainExecuteQuery(ExecuteStmt *execstmt, IntoClause *into, ExplainState *es,
if (estate)
FreeExecutorState(estate);
ReleaseCachedPlan(cplan, true);
ReleaseCachedPlan(cplan, CurrentResourceOwner);
}
/*
......
......@@ -66,8 +66,10 @@ static void _SPI_prepare_oneshot_plan(const char *src, SPIPlanPtr plan);
static int _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
Snapshot snapshot, Snapshot crosscheck_snapshot,
bool read_only, bool fire_triggers, uint64 tcount,
DestReceiver *caller_dest);
bool read_only, bool no_snapshots,
bool fire_triggers, uint64 tcount,
DestReceiver *caller_dest,
ResourceOwner plan_owner);
static ParamListInfo _SPI_convert_params(int nargs, Oid *argtypes,
Datum *Values, const char *Nulls);
......@@ -521,7 +523,9 @@ SPI_execute(const char *src, bool read_only, long tcount)
res = _SPI_execute_plan(&plan, NULL,
InvalidSnapshot, InvalidSnapshot,
read_only, true, tcount, NULL);
read_only, false,
true, tcount,
NULL, NULL);
_SPI_end_call(true);
return res;
......@@ -555,7 +559,9 @@ SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls,
_SPI_convert_params(plan->nargs, plan->argtypes,
Values, Nulls),
InvalidSnapshot, InvalidSnapshot,
read_only, true, tcount, NULL);
read_only, false,
true, tcount,
NULL, NULL);
_SPI_end_call(true);
return res;
......@@ -570,37 +576,32 @@ SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls, long tcount)
/* Execute a previously prepared plan */
int
SPI_execute_plan_with_paramlist(SPIPlanPtr plan, ParamListInfo params,
bool read_only, long tcount)
SPI_execute_plan_extended(SPIPlanPtr plan,
const SPIExecuteOptions *options)
{
int res;
if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC || tcount < 0)
if (plan == NULL || plan->magic != _SPI_PLAN_MAGIC || options == NULL)
return SPI_ERROR_ARGUMENT;
res = _SPI_begin_call(true);
if (res < 0)
return res;
res = _SPI_execute_plan(plan, params,
res = _SPI_execute_plan(plan, options->params,
InvalidSnapshot, InvalidSnapshot,
read_only, true, tcount, NULL);
options->read_only, options->no_snapshots,
true, options->tcount,
options->dest, options->owner);
_SPI_end_call(true);
return res;
}
/*
* Execute a previously prepared plan. If dest isn't NULL, we send result
* tuples to the caller-supplied DestReceiver rather than through the usual
* SPI output arrangements. If dest is NULL this is equivalent to
* SPI_execute_plan_with_paramlist.
*/
/* Execute a previously prepared plan */
int
SPI_execute_plan_with_receiver(SPIPlanPtr plan,
ParamListInfo params,
bool read_only, long tcount,
DestReceiver *dest)
SPI_execute_plan_with_paramlist(SPIPlanPtr plan, ParamListInfo params,
bool read_only, long tcount)
{
int res;
......@@ -613,7 +614,9 @@ SPI_execute_plan_with_receiver(SPIPlanPtr plan,
res = _SPI_execute_plan(plan, params,
InvalidSnapshot, InvalidSnapshot,
read_only, true, tcount, dest);
read_only, false,
true, tcount,
NULL, NULL);
_SPI_end_call(true);
return res;
......@@ -654,7 +657,9 @@ SPI_execute_snapshot(SPIPlanPtr plan,
_SPI_convert_params(plan->nargs, plan->argtypes,
Values, Nulls),
snapshot, crosscheck_snapshot,
read_only, fire_triggers, tcount, NULL);
read_only, false,
fire_triggers, tcount,
NULL, NULL);
_SPI_end_call(true);
return res;
......@@ -702,7 +707,9 @@ SPI_execute_with_args(const char *src,
res = _SPI_execute_plan(&plan, paramLI,
InvalidSnapshot, InvalidSnapshot,
read_only, true, tcount, NULL);
read_only, false,
true, tcount,
NULL, NULL);
_SPI_end_call(true);
return res;
......@@ -746,7 +753,9 @@ SPI_execute_with_receiver(const char *src,
res = _SPI_execute_plan(&plan, params,
InvalidSnapshot, InvalidSnapshot,
read_only, true, tcount, dest);
read_only, false,
true, tcount,
dest, NULL);
_SPI_end_call(true);
return res;
......@@ -1554,7 +1563,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
*/
/* Replan if needed, and increment plan refcount for portal */
cplan = GetCachedPlan(plansource, paramLI, false, _SPI_current->queryEnv);
cplan = GetCachedPlan(plansource, paramLI, NULL, _SPI_current->queryEnv);
stmt_list = cplan->stmt_list;
if (!plan->saved)
......@@ -1568,7 +1577,7 @@ SPI_cursor_open_internal(const char *name, SPIPlanPtr plan,
oldcontext = MemoryContextSwitchTo(portal->portalContext);
stmt_list = copyObject(stmt_list);
MemoryContextSwitchTo(oldcontext);
ReleaseCachedPlan(cplan, false);
ReleaseCachedPlan(cplan, NULL);
cplan = NULL; /* portal shouldn't depend on cplan */
}
......@@ -1950,7 +1959,10 @@ SPI_plan_get_plan_sources(SPIPlanPtr plan)
/*
* SPI_plan_get_cached_plan --- get a SPI plan's generic CachedPlan,
* if the SPI plan contains exactly one CachedPlanSource. If not,
* return NULL. Caller is responsible for doing ReleaseCachedPlan().
* return NULL.
*
* The plan's refcount is incremented (and logged in CurrentResourceOwner,
* if it's a saved plan). Caller is responsible for doing ReleaseCachedPlan.
*
* This is exported so that PL/pgSQL can use it (this beats letting PL/pgSQL
* look directly into the SPIPlan for itself). It's not documented in
......@@ -1984,7 +1996,8 @@ SPI_plan_get_cached_plan(SPIPlanPtr plan)
error_context_stack = &spierrcontext;
/* Get the generic plan for the query */
cplan = GetCachedPlan(plansource, NULL, plan->saved,
cplan = GetCachedPlan(plansource, NULL,
plan->saved ? CurrentResourceOwner : NULL,
_SPI_current->queryEnv);
Assert(cplan == plansource->gplan);
......@@ -2265,16 +2278,20 @@ _SPI_prepare_oneshot_plan(const char *src, SPIPlanPtr plan)
* behavior of taking a new snapshot for each query.
* crosscheck_snapshot: for RI use, all others pass InvalidSnapshot
* read_only: true for read-only execution (no CommandCounterIncrement)
* no_snapshots: true to skip snapshot management
* fire_triggers: true to fire AFTER triggers at end of query (normal case);
* false means any AFTER triggers are postponed to end of outer query
* tcount: execution tuple-count limit, or 0 for none
* caller_dest: DestReceiver to receive output, or NULL for normal SPI output
* plan_owner: ResourceOwner that will be used to hold refcount on plan;
* if NULL, CurrentResourceOwner is used (ignored for non-saved plan)
*/
static int
_SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
Snapshot snapshot, Snapshot crosscheck_snapshot,
bool read_only, bool fire_triggers, uint64 tcount,
DestReceiver *caller_dest)
bool read_only, bool no_snapshots,
bool fire_triggers, uint64 tcount,
DestReceiver *caller_dest, ResourceOwner plan_owner)
{
int my_res = 0;
uint64 my_processed = 0;
......@@ -2315,10 +2332,10 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
* In the first two cases, we can just push the snap onto the stack once
* for the whole plan list.
*
* But if the plan has no_snapshots set to true, then don't manage
* snapshots at all. The caller should then take care of that.
* But if no_snapshots is true, then don't manage snapshots at all here.
* The caller must then take care of that.
*/
if (snapshot != InvalidSnapshot && !plan->no_snapshots)
if (snapshot != InvalidSnapshot && !no_snapshots)
{
if (read_only)
{
......@@ -2333,6 +2350,15 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
}
}
/*
* Ensure that we have a resource owner if plan is saved, and not if it
* isn't.
*/
if (!plan->saved)
plan_owner = NULL;
else if (plan_owner == NULL)
plan_owner = CurrentResourceOwner;
foreach(lc1, plan->plancache_list)
{
CachedPlanSource *plansource = (CachedPlanSource *) lfirst(lc1);
......@@ -2388,16 +2414,18 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
/*
* Replan if needed, and increment plan refcount. If it's a saved
* plan, the refcount must be backed by the CurrentResourceOwner.
* plan, the refcount must be backed by the plan_owner.
*/
cplan = GetCachedPlan(plansource, paramLI, plan->saved, _SPI_current->queryEnv);
cplan = GetCachedPlan(plansource, paramLI,
plan_owner, _SPI_current->queryEnv);
stmt_list = cplan->stmt_list;
/*
* In the default non-read-only case, get a new snapshot, replacing
* any that we pushed in a previous cycle.
*/
if (snapshot == InvalidSnapshot && !read_only && !plan->no_snapshots)
if (snapshot == InvalidSnapshot && !read_only && !no_snapshots)
{
if (pushed_active_snap)
PopActiveSnapshot();
......@@ -2450,7 +2478,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
* If not read-only mode, advance the command counter before each
* command and update the snapshot.
*/
if (!read_only && !plan->no_snapshots)
if (!read_only && !no_snapshots)
{
CommandCounterIncrement();
UpdateActiveSnapshotCommandId();
......@@ -2499,7 +2527,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
* caller must be in a nonatomic SPI context and manage
* snapshots itself.
*/
if (_SPI_current->atomic || !plan->no_snapshots)
if (_SPI_current->atomic || !no_snapshots)
context = PROCESS_UTILITY_QUERY;
else
context = PROCESS_UTILITY_QUERY_NONATOMIC;
......@@ -2586,7 +2614,7 @@ _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI,
}
/* Done with this plan, so release refcount */
ReleaseCachedPlan(cplan, plan->saved);
ReleaseCachedPlan(cplan, plan_owner);
cplan = NULL;
/*
......@@ -2606,7 +2634,7 @@ fail:
/* We no longer need the cached plan refcount, if any */
if (cplan)
ReleaseCachedPlan(cplan, plan->saved);
ReleaseCachedPlan(cplan, plan_owner);
/*
* Pop the error context stack
......
......@@ -1963,7 +1963,7 @@ exec_bind_message(StringInfo input_message)
* will be generated in MessageContext. The plan refcount will be
* assigned to the Portal, so it will be released at portal destruction.
*/
cplan = GetCachedPlan(psrc, params, false, NULL);
cplan = GetCachedPlan(psrc, params, NULL, NULL);
/*
* Now we can define the portal.
......
......@@ -533,7 +533,7 @@ ReleaseGenericPlan(CachedPlanSource *plansource)
Assert(plan->magic == CACHEDPLAN_MAGIC);
plansource->gplan = NULL;
ReleaseCachedPlan(plan, false);
ReleaseCachedPlan(plan, NULL);
}
}
......@@ -1130,16 +1130,16 @@ cached_plan_cost(CachedPlan *plan, bool include_planner)
* execution.
*
* On return, the refcount of the plan has been incremented; a later
* ReleaseCachedPlan() call is expected. The refcount has been reported
* to the CurrentResourceOwner if useResOwner is true (note that that must
* only be true if it's a "saved" CachedPlanSource).
* ReleaseCachedPlan() call is expected. If "owner" is not NULL then
* the refcount has been reported to that ResourceOwner (note that this
* is only supported for "saved" CachedPlanSources).
*
* Note: if any replanning activity is required, the caller's memory context
* is used for that work.
*/
CachedPlan *
GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
bool useResOwner, QueryEnvironment *queryEnv)
ResourceOwner owner, QueryEnvironment *queryEnv)
{
CachedPlan *plan = NULL;
List *qlist;
......@@ -1149,7 +1149,7 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
Assert(plansource->magic == CACHEDPLANSOURCE_MAGIC);
Assert(plansource->is_complete);
/* This seems worth a real test, though */
if (useResOwner && !plansource->is_saved)
if (owner && !plansource->is_saved)
elog(ERROR, "cannot apply ResourceOwner to non-saved cached plan");
/* Make sure the querytree list is valid and we have parse-time locks */
......@@ -1228,11 +1228,11 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
Assert(plan != NULL);
/* Flag the plan as in use by caller */
if (useResOwner)
ResourceOwnerEnlargePlanCacheRefs(CurrentResourceOwner);
if (owner)
ResourceOwnerEnlargePlanCacheRefs(owner);
plan->refcount++;
if (useResOwner)
ResourceOwnerRememberPlanCacheRef(CurrentResourceOwner, plan);
if (owner)
ResourceOwnerRememberPlanCacheRef(owner, plan);
/*
* Saved plans should be under CacheMemoryContext so they will not go away
......@@ -1253,21 +1253,21 @@ GetCachedPlan(CachedPlanSource *plansource, ParamListInfo boundParams,
* ReleaseCachedPlan: release active use of a cached plan.
*
* This decrements the reference count, and frees the plan if the count
* has thereby gone to zero. If useResOwner is true, it is assumed that
* the reference count is managed by the CurrentResourceOwner.
* has thereby gone to zero. If "owner" is not NULL, it is assumed that
* the reference count is managed by that ResourceOwner.
*
* Note: useResOwner = false is used for releasing references that are in
* Note: owner == NULL is used for releasing references that are in
* persistent data structures, such as the parent CachedPlanSource or a
* Portal. Transient references should be protected by a resource owner.
*/
void
ReleaseCachedPlan(CachedPlan *plan, bool useResOwner)
ReleaseCachedPlan(CachedPlan *plan, ResourceOwner owner)
{
Assert(plan->magic == CACHEDPLAN_MAGIC);
if (useResOwner)
if (owner)
{
Assert(plan->is_saved);
ResourceOwnerForgetPlanCacheRef(CurrentResourceOwner, plan);
ResourceOwnerForgetPlanCacheRef(owner, plan);
}
Assert(plan->refcount > 0);
plan->refcount--;
......
......@@ -310,7 +310,7 @@ PortalReleaseCachedPlan(Portal portal)
{
if (portal->cplan)
{
ReleaseCachedPlan(portal->cplan, false);
ReleaseCachedPlan(portal->cplan, NULL);
portal->cplan = NULL;
/*
......
......@@ -652,7 +652,7 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
if (isCommit)
PrintPlanCacheLeakWarning(res);
ReleaseCachedPlan(res, true);
ReleaseCachedPlan(res, owner);
}
/* Ditto for tupdesc references */
......@@ -703,18 +703,14 @@ ResourceOwnerReleaseInternal(ResourceOwner owner,
void
ResourceOwnerReleaseAllPlanCacheRefs(ResourceOwner owner)
{
ResourceOwner save;
Datum foundres;
save = CurrentResourceOwner;
CurrentResourceOwner = owner;
while (ResourceArrayGetAny(&(owner->planrefarr), &foundres))
{
CachedPlan *res = (CachedPlan *) DatumGetPointer(foundres);
ReleaseCachedPlan(res, true);
ReleaseCachedPlan(res, owner);
}
CurrentResourceOwner = save;
}
/*
......
......@@ -42,6 +42,17 @@ typedef struct SPIPrepareOptions
int cursorOptions;
} SPIPrepareOptions;
/* Optional arguments for SPI_execute_plan_extended */
typedef struct SPIExecuteOptions
{
ParamListInfo params;
bool read_only;
bool no_snapshots;
uint64 tcount;
DestReceiver *dest;
ResourceOwner owner;
} SPIExecuteOptions;
/* Plans are opaque structs for standard users of SPI */
typedef struct _SPI_plan *SPIPlanPtr;
......@@ -96,13 +107,11 @@ extern int SPI_finish(void);
extern int SPI_execute(const char *src, bool read_only, long tcount);
extern int SPI_execute_plan(SPIPlanPtr plan, Datum *Values, const char *Nulls,
bool read_only, long tcount);
extern int SPI_execute_plan_extended(SPIPlanPtr plan,
const SPIExecuteOptions *options);
extern int SPI_execute_plan_with_paramlist(SPIPlanPtr plan,
ParamListInfo params,
bool read_only, long tcount);
extern int SPI_execute_plan_with_receiver(SPIPlanPtr plan,
ParamListInfo params,
bool read_only, long tcount,
DestReceiver *dest);
extern int SPI_exec(const char *src, long tcount);
extern int SPI_execp(SPIPlanPtr plan, Datum *Values, const char *Nulls,
long tcount);
......
......@@ -92,7 +92,6 @@ typedef struct _SPI_plan
int magic; /* should equal _SPI_PLAN_MAGIC */
bool saved; /* saved or unsaved plan? */
bool oneshot; /* one-shot plan? */
bool no_snapshots; /* let the caller handle the snapshots */
List *plancache_list; /* one CachedPlanSource per parsetree */
MemoryContext plancxt; /* Context containing _SPI_plan and data */
RawParseMode parse_mode; /* raw_parser() mode */
......
......@@ -219,9 +219,9 @@ extern List *CachedPlanGetTargetList(CachedPlanSource *plansource,
extern CachedPlan *GetCachedPlan(CachedPlanSource *plansource,
ParamListInfo boundParams,
bool useResOwner,
ResourceOwner owner,
QueryEnvironment *queryEnv);
extern void ReleaseCachedPlan(CachedPlan *plan, bool useResOwner);
extern void ReleaseCachedPlan(CachedPlan *plan, ResourceOwner owner);
extern bool CachedPlanAllowsSimpleValidityCheck(CachedPlanSource *plansource,
CachedPlan *plan,
......
......@@ -369,6 +369,7 @@ do_compile(FunctionCallInfo fcinfo,
function->fn_prokind = procStruct->prokind;
function->nstatements = 0;
function->requires_procedure_resowner = false;
/*
* Initialize the compiler, particularly the namespace stack. The
......@@ -903,6 +904,7 @@ plpgsql_compile_inline(char *proc_source)
function->extra_errors = 0;
function->nstatements = 0;
function->requires_procedure_resowner = false;
plpgsql_ns_init();
plpgsql_ns_push(func_name, PLPGSQL_LABEL_BLOCK);
......
This diff is collapsed.
......@@ -951,6 +951,9 @@ stmt_call : K_CALL
new->expr = read_sql_stmt();
new->is_call = true;
/* Remember we may need a procedure resource owner */
plpgsql_curr_compile->requires_procedure_resowner = true;
$$ = (PLpgSQL_stmt *)new;
}
......@@ -967,6 +970,9 @@ stmt_call : K_CALL
new->expr = read_sql_stmt();
new->is_call = false;
/* Remember we may need a procedure resource owner */
plpgsql_curr_compile->requires_procedure_resowner = true;
$$ = (PLpgSQL_stmt *)new;
}
......
......@@ -224,6 +224,7 @@ plpgsql_call_handler(PG_FUNCTION_ARGS)
bool nonatomic;
PLpgSQL_function *func;
PLpgSQL_execstate *save_cur_estate;
ResourceOwner procedure_resowner = NULL;
Datum retval;
int rc;
......@@ -246,6 +247,17 @@ plpgsql_call_handler(PG_FUNCTION_ARGS)
/* Mark the function as busy, so it can't be deleted from under us */
func->use_count++;
/*
* If we'll need a procedure-lifespan resowner to execute any CALL or DO
* statements, create it now. Since this resowner is not tied to any
* parent, failing to free it would result in process-lifespan leaks.
* Therefore, be very wary of adding any code between here and the PG_TRY
* block.
*/
if (nonatomic && func->requires_procedure_resowner)
procedure_resowner =
ResourceOwnerCreate(NULL, "PL/pgSQL procedure resources");
PG_TRY();
{
/*
......@@ -264,6 +276,7 @@ plpgsql_call_handler(PG_FUNCTION_ARGS)
else
retval = plpgsql_exec_function(func, fcinfo,
NULL, NULL,
procedure_resowner,
!nonatomic);
}
PG_FINALLY();
......@@ -271,6 +284,13 @@ plpgsql_call_handler(PG_FUNCTION_ARGS)
/* Decrement use-count, restore cur_estate */
func->use_count--;
func->cur_estate = save_cur_estate;
/* Be sure to release the procedure resowner if any */
if (procedure_resowner)
{
ResourceOwnerReleaseAllPlanCacheRefs(procedure_resowner);
ResourceOwnerDelete(procedure_resowner);
}
}
PG_END_TRY();
......@@ -333,6 +353,10 @@ plpgsql_inline_handler(PG_FUNCTION_ARGS)
* unconditionally try to clean them up below. (Hence, be wary of adding
* anything that could fail between here and the PG_TRY block.) See the
* comments for shared_simple_eval_estate.
*
* Because this resowner isn't tied to the calling transaction, we can
* also use it as the "procedure" resowner for any CALL statements. That
* helps reduce the opportunities for failure here.
*/
simple_eval_estate = CreateExecutorState();
simple_eval_resowner =
......@@ -344,6 +368,7 @@ plpgsql_inline_handler(PG_FUNCTION_ARGS)
retval = plpgsql_exec_function(func, fake_fcinfo,
simple_eval_estate,
simple_eval_resowner,
simple_eval_resowner, /* see above */
codeblock->atomic);
}
PG_CATCH();
......
......@@ -1009,9 +1009,6 @@ typedef struct PLpgSQL_function
int extra_warnings;
int extra_errors;
/* count of statements inside function */
unsigned int nstatements;
/* the datums representing the function's local variables */
int ndatums;
PLpgSQL_datum **datums;
......@@ -1020,6 +1017,10 @@ typedef struct PLpgSQL_function
/* function body parsetree */
PLpgSQL_stmt_block *action;
/* data derived while parsing body */
unsigned int nstatements; /* counter for assigning stmtids */
bool requires_procedure_resowner; /* contains CALL or DO? */
/* these fields change when the function is used */
struct PLpgSQL_execstate *cur_estate;
unsigned long use_count;
......@@ -1081,6 +1082,9 @@ typedef struct PLpgSQL_execstate
EState *simple_eval_estate;
ResourceOwner simple_eval_resowner;
/* if running nonatomic procedure or DO block, resowner to use for CALL */
ResourceOwner procedure_resowner;
/* lookup table to use for executing type casts */
HTAB *cast_hash;
MemoryContext cast_hash_context;
......@@ -1265,6 +1269,7 @@ extern Datum plpgsql_exec_function(PLpgSQL_function *func,
FunctionCallInfo fcinfo,
EState *simple_eval_estate,
ResourceOwner simple_eval_resowner,
ResourceOwner procedure_resowner,
bool atomic);
extern HeapTuple plpgsql_exec_trigger(PLpgSQL_function *func,
TriggerData *trigdata);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment