Commit 06bd458c authored by Robert Haas's avatar Robert Haas

Use mul_size when multiplying by the number of parallel workers.

That way, if the result overflows size_t, you'll get an error instead
of undefined behavior, which seems like a plus.  This also has the
effect of casting the number of workers from int to Size, which is
better because it's harder to overflow int than size_t.

Dilip Kumar reported this issue and provided a patch upon which this
patch is based, but his version did use mul_size.
parent a89505fd
...@@ -241,7 +241,8 @@ InitializeParallelDSM(ParallelContext *pcxt) ...@@ -241,7 +241,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
PARALLEL_ERROR_QUEUE_SIZE, PARALLEL_ERROR_QUEUE_SIZE,
"parallel error queue size not buffer-aligned"); "parallel error queue size not buffer-aligned");
shm_toc_estimate_chunk(&pcxt->estimator, shm_toc_estimate_chunk(&pcxt->estimator,
PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers); mul_size(PARALLEL_ERROR_QUEUE_SIZE,
pcxt->nworkers));
shm_toc_estimate_keys(&pcxt->estimator, 1); shm_toc_estimate_keys(&pcxt->estimator, 1);
/* Estimate how much we'll need for extension entrypoint info. */ /* Estimate how much we'll need for extension entrypoint info. */
...@@ -347,7 +348,8 @@ InitializeParallelDSM(ParallelContext *pcxt) ...@@ -347,7 +348,8 @@ InitializeParallelDSM(ParallelContext *pcxt)
*/ */
error_queue_space = error_queue_space =
shm_toc_allocate(pcxt->toc, shm_toc_allocate(pcxt->toc,
PARALLEL_ERROR_QUEUE_SIZE * pcxt->nworkers); mul_size(PARALLEL_ERROR_QUEUE_SIZE,
pcxt->nworkers));
for (i = 0; i < pcxt->nworkers; ++i) for (i = 0; i < pcxt->nworkers; ++i)
{ {
char *start; char *start;
......
...@@ -287,7 +287,8 @@ ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize) ...@@ -287,7 +287,8 @@ ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize)
if (!reinitialize) if (!reinitialize)
tqueuespace = tqueuespace =
shm_toc_allocate(pcxt->toc, shm_toc_allocate(pcxt->toc,
PARALLEL_TUPLE_QUEUE_SIZE * pcxt->nworkers); mul_size(PARALLEL_TUPLE_QUEUE_SIZE,
pcxt->nworkers));
else else
tqueuespace = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE); tqueuespace = shm_toc_lookup(pcxt->toc, PARALLEL_KEY_TUPLE_QUEUE);
...@@ -296,7 +297,8 @@ ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize) ...@@ -296,7 +297,8 @@ ExecParallelSetupTupleQueues(ParallelContext *pcxt, bool reinitialize)
{ {
shm_mq *mq; shm_mq *mq;
mq = shm_mq_create(tqueuespace + i * PARALLEL_TUPLE_QUEUE_SIZE, mq = shm_mq_create(tqueuespace +
((Size) i) * PARALLEL_TUPLE_QUEUE_SIZE,
(Size) PARALLEL_TUPLE_QUEUE_SIZE); (Size) PARALLEL_TUPLE_QUEUE_SIZE);
shm_mq_set_receiver(mq, MyProc); shm_mq_set_receiver(mq, MyProc);
...@@ -380,12 +382,12 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) ...@@ -380,12 +382,12 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
* looking at pgBufferUsage, so do it unconditionally. * looking at pgBufferUsage, so do it unconditionally.
*/ */
shm_toc_estimate_chunk(&pcxt->estimator, shm_toc_estimate_chunk(&pcxt->estimator,
sizeof(BufferUsage) * pcxt->nworkers); mul_size(sizeof(BufferUsage), pcxt->nworkers));
shm_toc_estimate_keys(&pcxt->estimator, 1); shm_toc_estimate_keys(&pcxt->estimator, 1);
/* Estimate space for tuple queues. */ /* Estimate space for tuple queues. */
shm_toc_estimate_chunk(&pcxt->estimator, shm_toc_estimate_chunk(&pcxt->estimator,
PARALLEL_TUPLE_QUEUE_SIZE * pcxt->nworkers); mul_size(PARALLEL_TUPLE_QUEUE_SIZE, pcxt->nworkers));
shm_toc_estimate_keys(&pcxt->estimator, 1); shm_toc_estimate_keys(&pcxt->estimator, 1);
/* /*
...@@ -404,7 +406,9 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) ...@@ -404,7 +406,9 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
sizeof(int) * e.nnodes; sizeof(int) * e.nnodes;
instrumentation_len = MAXALIGN(instrumentation_len); instrumentation_len = MAXALIGN(instrumentation_len);
instrument_offset = instrumentation_len; instrument_offset = instrumentation_len;
instrumentation_len += sizeof(Instrumentation) * e.nnodes * nworkers; instrumentation_len +=
mul_size(sizeof(Instrumentation),
mul_size(e.nnodes, nworkers));
shm_toc_estimate_chunk(&pcxt->estimator, instrumentation_len); shm_toc_estimate_chunk(&pcxt->estimator, instrumentation_len);
shm_toc_estimate_keys(&pcxt->estimator, 1); shm_toc_estimate_keys(&pcxt->estimator, 1);
} }
...@@ -432,7 +436,7 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers) ...@@ -432,7 +436,7 @@ ExecInitParallelPlan(PlanState *planstate, EState *estate, int nworkers)
/* Allocate space for each worker's BufferUsage; no need to initialize. */ /* Allocate space for each worker's BufferUsage; no need to initialize. */
bufusage_space = shm_toc_allocate(pcxt->toc, bufusage_space = shm_toc_allocate(pcxt->toc,
sizeof(BufferUsage) * pcxt->nworkers); mul_size(sizeof(BufferUsage), pcxt->nworkers));
shm_toc_insert(pcxt->toc, PARALLEL_KEY_BUFFER_USAGE, bufusage_space); shm_toc_insert(pcxt->toc, PARALLEL_KEY_BUFFER_USAGE, bufusage_space);
pei->buffer_usage = bufusage_space; pei->buffer_usage = bufusage_space;
...@@ -511,7 +515,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate, ...@@ -511,7 +515,7 @@ ExecParallelRetrieveInstrumentation(PlanState *planstate,
InstrAggNode(planstate->instrument, &instrument[n]); InstrAggNode(planstate->instrument, &instrument[n]);
/* Also store the per-worker detail. */ /* Also store the per-worker detail. */
ibytes = instrumentation->num_workers * sizeof(Instrumentation); ibytes = mul_size(instrumentation->num_workers, sizeof(Instrumentation));
planstate->worker_instrument = planstate->worker_instrument =
palloc(ibytes + offsetof(WorkerInstrumentation, instrument)); palloc(ibytes + offsetof(WorkerInstrumentation, instrument));
planstate->worker_instrument->num_workers = instrumentation->num_workers; planstate->worker_instrument->num_workers = instrumentation->num_workers;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment