Commit 263865a4 authored by Noah Misch's avatar Noah Misch

Permit super-MaxAllocSize allocations with MemoryContextAllocHuge().

The MaxAllocSize guard is convenient for most callers, because it
reduces the need for careful attention to overflow, data type selection,
and the SET_VARSIZE() limit.  A handful of callers are happy to navigate
those hazards in exchange for the ability to allocate a larger chunk.
Introduce MemoryContextAllocHuge() and repalloc_huge().  Use this in
tuplesort.c and tuplestore.c, enabling internal sorts of up to INT_MAX
tuples, a factor-of-48 increase.  In particular, B-tree index builds can
now benefit from much-larger maintenance_work_mem settings.

Reviewed by Stephen Frost, Simon Riggs and Jeff Janes.
parent 9ef86cd9
...@@ -458,6 +458,7 @@ AllocSetContextCreate(MemoryContext parent, ...@@ -458,6 +458,7 @@ AllocSetContextCreate(MemoryContext parent,
maxBlockSize = MAXALIGN(maxBlockSize); maxBlockSize = MAXALIGN(maxBlockSize);
if (maxBlockSize < initBlockSize) if (maxBlockSize < initBlockSize)
maxBlockSize = initBlockSize; maxBlockSize = initBlockSize;
Assert(AllocHugeSizeIsValid(maxBlockSize)); /* must be safe to double */
context->initBlockSize = initBlockSize; context->initBlockSize = initBlockSize;
context->maxBlockSize = maxBlockSize; context->maxBlockSize = maxBlockSize;
context->nextBlockSize = initBlockSize; context->nextBlockSize = initBlockSize;
...@@ -643,6 +644,10 @@ AllocSetDelete(MemoryContext context) ...@@ -643,6 +644,10 @@ AllocSetDelete(MemoryContext context)
* AllocSetAlloc * AllocSetAlloc
* Returns pointer to allocated memory of given size; memory is added * Returns pointer to allocated memory of given size; memory is added
* to the set. * to the set.
*
* No request may exceed:
* MAXALIGN_DOWN(SIZE_MAX) - ALLOC_BLOCKHDRSZ - ALLOC_CHUNKHDRSZ
* All callers use a much-lower limit.
*/ */
static void * static void *
AllocSetAlloc(MemoryContext context, Size size) AllocSetAlloc(MemoryContext context, Size size)
......
...@@ -455,14 +455,7 @@ MemoryContextContains(MemoryContext context, void *pointer) ...@@ -455,14 +455,7 @@ MemoryContextContains(MemoryContext context, void *pointer)
header = (StandardChunkHeader *) header = (StandardChunkHeader *)
((char *) pointer - STANDARDCHUNKHEADERSIZE); ((char *) pointer - STANDARDCHUNKHEADERSIZE);
/* return header->context == context;
* If the context link doesn't match then we certainly have a non-member
* chunk. Also check for a reasonable-looking size as extra guard against
* being fooled by bogus pointers.
*/
if (header->context == context && AllocSizeIsValid(header->size))
return true;
return false;
} }
/*-------------------- /*--------------------
...@@ -757,6 +750,71 @@ repalloc(void *pointer, Size size) ...@@ -757,6 +750,71 @@ repalloc(void *pointer, Size size)
return ret; return ret;
} }
/*
* MemoryContextAllocHuge
* Allocate (possibly-expansive) space within the specified context.
*
* See considerations in comment at MaxAllocHugeSize.
*/
void *
MemoryContextAllocHuge(MemoryContext context, Size size)
{
void *ret;
AssertArg(MemoryContextIsValid(context));
if (!AllocHugeSizeIsValid(size))
elog(ERROR, "invalid memory alloc request size %lu",
(unsigned long) size);
context->isReset = false;
ret = (*context->methods->alloc) (context, size);
VALGRIND_MEMPOOL_ALLOC(context, ret, size);
return ret;
}
/*
* repalloc_huge
* Adjust the size of a previously allocated chunk, permitting a large
* value. The previous allocation need not have been "huge".
*/
void *
repalloc_huge(void *pointer, Size size)
{
MemoryContext context;
void *ret;
if (!AllocHugeSizeIsValid(size))
elog(ERROR, "invalid memory alloc request size %lu",
(unsigned long) size);
/*
* Try to detect bogus pointers handed to us, poorly though we can.
* Presumably, a pointer that isn't MAXALIGNED isn't pointing at an
* allocated chunk.
*/
Assert(pointer != NULL);
Assert(pointer == (void *) MAXALIGN(pointer));
/*
* OK, it's probably safe to look at the chunk header.
*/
context = ((StandardChunkHeader *)
((char *) pointer - STANDARDCHUNKHEADERSIZE))->context;
AssertArg(MemoryContextIsValid(context));
/* isReset must be false already */
Assert(!context->isReset);
ret = (*context->methods->realloc) (context, pointer, size);
VALGRIND_MEMPOOL_CHANGE(context, pointer, ret, size);
return ret;
}
/* /*
* MemoryContextStrdup * MemoryContextStrdup
* Like strdup(), but allocate from the specified context * Like strdup(), but allocate from the specified context
......
...@@ -211,8 +211,8 @@ struct Tuplesortstate ...@@ -211,8 +211,8 @@ struct Tuplesortstate
* tuples to return? */ * tuples to return? */
bool boundUsed; /* true if we made use of a bounded heap */ bool boundUsed; /* true if we made use of a bounded heap */
int bound; /* if bounded, the maximum number of tuples */ int bound; /* if bounded, the maximum number of tuples */
long availMem; /* remaining memory available, in bytes */ Size availMem; /* remaining memory available, in bytes */
long allowedMem; /* total memory allowed, in bytes */ Size allowedMem; /* total memory allowed, in bytes */
int maxTapes; /* number of tapes (Knuth's T) */ int maxTapes; /* number of tapes (Knuth's T) */
int tapeRange; /* maxTapes-1 (Knuth's P) */ int tapeRange; /* maxTapes-1 (Knuth's P) */
MemoryContext sortcontext; /* memory context holding all sort data */ MemoryContext sortcontext; /* memory context holding all sort data */
...@@ -308,7 +308,7 @@ struct Tuplesortstate ...@@ -308,7 +308,7 @@ struct Tuplesortstate
int *mergenext; /* first preread tuple for each source */ int *mergenext; /* first preread tuple for each source */
int *mergelast; /* last preread tuple for each source */ int *mergelast; /* last preread tuple for each source */
int *mergeavailslots; /* slots left for prereading each tape */ int *mergeavailslots; /* slots left for prereading each tape */
long *mergeavailmem; /* availMem for prereading each tape */ Size *mergeavailmem; /* availMem for prereading each tape */
int mergefreelist; /* head of freelist of recycled slots */ int mergefreelist; /* head of freelist of recycled slots */
int mergefirstfree; /* first slot never used in this merge */ int mergefirstfree; /* first slot never used in this merge */
...@@ -961,25 +961,26 @@ tuplesort_end(Tuplesortstate *state) ...@@ -961,25 +961,26 @@ tuplesort_end(Tuplesortstate *state)
} }
/* /*
* Grow the memtuples[] array, if possible within our memory constraint. * Grow the memtuples[] array, if possible within our memory constraint. We
* Return TRUE if we were able to enlarge the array, FALSE if not. * must not exceed INT_MAX tuples in memory or the caller-provided memory
* * limit. Return TRUE if we were able to enlarge the array, FALSE if not.
* Normally, at each increment we double the size of the array. When we no *
* longer have enough memory to do that, we attempt one last, smaller increase * Normally, at each increment we double the size of the array. When doing
* (and then clear the growmemtuples flag so we don't try any more). That * that would exceed a limit, we attempt one last, smaller increase (and then
* allows us to use allowedMem as fully as possible; sticking to the pure * clear the growmemtuples flag so we don't try any more). That allows us to
* doubling rule could result in almost half of allowedMem going unused. * use memory as fully as permitted; sticking to the pure doubling rule could
* Because availMem moves around with tuple addition/removal, we need some * result in almost half going unused. Because availMem moves around with
* rule to prevent making repeated small increases in memtupsize, which would * tuple addition/removal, we need some rule to prevent making repeated small
* just be useless thrashing. The growmemtuples flag accomplishes that and * increases in memtupsize, which would just be useless thrashing. The
* also prevents useless recalculations in this function. * growmemtuples flag accomplishes that and also prevents useless
* recalculations in this function.
*/ */
static bool static bool
grow_memtuples(Tuplesortstate *state) grow_memtuples(Tuplesortstate *state)
{ {
int newmemtupsize; int newmemtupsize;
int memtupsize = state->memtupsize; int memtupsize = state->memtupsize;
long memNowUsed = state->allowedMem - state->availMem; Size memNowUsed = state->allowedMem - state->availMem;
/* Forget it if we've already maxed out memtuples, per comment above */ /* Forget it if we've already maxed out memtuples, per comment above */
if (!state->growmemtuples) if (!state->growmemtuples)
...@@ -989,14 +990,16 @@ grow_memtuples(Tuplesortstate *state) ...@@ -989,14 +990,16 @@ grow_memtuples(Tuplesortstate *state)
if (memNowUsed <= state->availMem) if (memNowUsed <= state->availMem)
{ {
/* /*
* It is surely safe to double memtupsize if we've used no more than * We've used no more than half of allowedMem; double our usage,
* half of allowedMem. * clamping at INT_MAX.
*
* Note: it might seem that we need to worry about memtupsize * 2
* overflowing an int, but the MaxAllocSize clamp applied below
* ensures the existing memtupsize can't be large enough for that.
*/ */
if (memtupsize < INT_MAX / 2)
newmemtupsize = memtupsize * 2; newmemtupsize = memtupsize * 2;
else
{
newmemtupsize = INT_MAX;
state->growmemtuples = false;
}
} }
else else
{ {
...@@ -1012,7 +1015,8 @@ grow_memtuples(Tuplesortstate *state) ...@@ -1012,7 +1015,8 @@ grow_memtuples(Tuplesortstate *state)
* we've already seen, and thus we can extrapolate from the space * we've already seen, and thus we can extrapolate from the space
* consumption so far to estimate an appropriate new size for the * consumption so far to estimate an appropriate new size for the
* memtuples array. The optimal value might be higher or lower than * memtuples array. The optimal value might be higher or lower than
* this estimate, but it's hard to know that in advance. * this estimate, but it's hard to know that in advance. We again
* clamp at INT_MAX tuples.
* *
* This calculation is safe against enlarging the array so much that * This calculation is safe against enlarging the array so much that
* LACKMEM becomes true, because the memory currently used includes * LACKMEM becomes true, because the memory currently used includes
...@@ -1020,16 +1024,18 @@ grow_memtuples(Tuplesortstate *state) ...@@ -1020,16 +1024,18 @@ grow_memtuples(Tuplesortstate *state)
* new array elements even if no other memory were currently used. * new array elements even if no other memory were currently used.
* *
* We do the arithmetic in float8, because otherwise the product of * We do the arithmetic in float8, because otherwise the product of
* memtupsize and allowedMem could overflow. (A little algebra shows * memtupsize and allowedMem could overflow. Any inaccuracy in the
* that grow_ratio must be less than 2 here, so we are not risking * result should be insignificant; but even if we computed a
* integer overflow this way.) Any inaccuracy in the result should be * completely insane result, the checks below will prevent anything
* insignificant; but even if we computed a completely insane result, * really bad from happening.
* the checks below will prevent anything really bad from happening.
*/ */
double grow_ratio; double grow_ratio;
grow_ratio = (double) state->allowedMem / (double) memNowUsed; grow_ratio = (double) state->allowedMem / (double) memNowUsed;
if (memtupsize * grow_ratio < INT_MAX)
newmemtupsize = (int) (memtupsize * grow_ratio); newmemtupsize = (int) (memtupsize * grow_ratio);
else
newmemtupsize = INT_MAX;
/* We won't make any further enlargement attempts */ /* We won't make any further enlargement attempts */
state->growmemtuples = false; state->growmemtuples = false;
...@@ -1040,12 +1046,13 @@ grow_memtuples(Tuplesortstate *state) ...@@ -1040,12 +1046,13 @@ grow_memtuples(Tuplesortstate *state)
goto noalloc; goto noalloc;
/* /*
* On a 64-bit machine, allowedMem could be more than MaxAllocSize. Clamp * On a 32-bit machine, allowedMem could exceed MaxAllocHugeSize. Clamp
* to ensure our request won't be rejected by palloc. * to ensure our request won't be rejected. Note that we can easily
* exhaust address space before facing this outcome.
*/ */
if ((Size) newmemtupsize >= MaxAllocSize / sizeof(SortTuple)) if ((Size) newmemtupsize >= MaxAllocHugeSize / sizeof(SortTuple))
{ {
newmemtupsize = (int) (MaxAllocSize / sizeof(SortTuple)); newmemtupsize = (int) (MaxAllocHugeSize / sizeof(SortTuple));
state->growmemtuples = false; /* can't grow any more */ state->growmemtuples = false; /* can't grow any more */
} }
...@@ -1060,14 +1067,14 @@ grow_memtuples(Tuplesortstate *state) ...@@ -1060,14 +1067,14 @@ grow_memtuples(Tuplesortstate *state)
* palloc would be treating both old and new arrays as separate chunks. * palloc would be treating both old and new arrays as separate chunks.
* But we'll check LACKMEM explicitly below just in case.) * But we'll check LACKMEM explicitly below just in case.)
*/ */
if (state->availMem < (long) ((newmemtupsize - memtupsize) * sizeof(SortTuple))) if (state->availMem < (Size) ((newmemtupsize - memtupsize) * sizeof(SortTuple)))
goto noalloc; goto noalloc;
/* OK, do it */ /* OK, do it */
FREEMEM(state, GetMemoryChunkSpace(state->memtuples)); FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
state->memtupsize = newmemtupsize; state->memtupsize = newmemtupsize;
state->memtuples = (SortTuple *) state->memtuples = (SortTuple *)
repalloc(state->memtuples, repalloc_huge(state->memtuples,
state->memtupsize * sizeof(SortTuple)); state->memtupsize * sizeof(SortTuple));
USEMEM(state, GetMemoryChunkSpace(state->memtuples)); USEMEM(state, GetMemoryChunkSpace(state->memtuples));
if (LACKMEM(state)) if (LACKMEM(state))
...@@ -1715,7 +1722,7 @@ tuplesort_getdatum(Tuplesortstate *state, bool forward, ...@@ -1715,7 +1722,7 @@ tuplesort_getdatum(Tuplesortstate *state, bool forward,
* This is exported for use by the planner. allowedMem is in bytes. * This is exported for use by the planner. allowedMem is in bytes.
*/ */
int int
tuplesort_merge_order(long allowedMem) tuplesort_merge_order(Size allowedMem)
{ {
int mOrder; int mOrder;
...@@ -1749,7 +1756,7 @@ inittapes(Tuplesortstate *state) ...@@ -1749,7 +1756,7 @@ inittapes(Tuplesortstate *state)
int maxTapes, int maxTapes,
ntuples, ntuples,
j; j;
long tapeSpace; Size tapeSpace;
/* Compute number of tapes to use: merge order plus 1 */ /* Compute number of tapes to use: merge order plus 1 */
maxTapes = tuplesort_merge_order(state->allowedMem) + 1; maxTapes = tuplesort_merge_order(state->allowedMem) + 1;
...@@ -1798,7 +1805,7 @@ inittapes(Tuplesortstate *state) ...@@ -1798,7 +1805,7 @@ inittapes(Tuplesortstate *state)
state->mergenext = (int *) palloc0(maxTapes * sizeof(int)); state->mergenext = (int *) palloc0(maxTapes * sizeof(int));
state->mergelast = (int *) palloc0(maxTapes * sizeof(int)); state->mergelast = (int *) palloc0(maxTapes * sizeof(int));
state->mergeavailslots = (int *) palloc0(maxTapes * sizeof(int)); state->mergeavailslots = (int *) palloc0(maxTapes * sizeof(int));
state->mergeavailmem = (long *) palloc0(maxTapes * sizeof(long)); state->mergeavailmem = (Size *) palloc0(maxTapes * sizeof(Size));
state->tp_fib = (int *) palloc0(maxTapes * sizeof(int)); state->tp_fib = (int *) palloc0(maxTapes * sizeof(int));
state->tp_runs = (int *) palloc0(maxTapes * sizeof(int)); state->tp_runs = (int *) palloc0(maxTapes * sizeof(int));
state->tp_dummy = (int *) palloc0(maxTapes * sizeof(int)); state->tp_dummy = (int *) palloc0(maxTapes * sizeof(int));
...@@ -2026,7 +2033,7 @@ mergeonerun(Tuplesortstate *state) ...@@ -2026,7 +2033,7 @@ mergeonerun(Tuplesortstate *state)
int srcTape; int srcTape;
int tupIndex; int tupIndex;
SortTuple *tup; SortTuple *tup;
long priorAvail, Size priorAvail,
spaceFreed; spaceFreed;
/* /*
...@@ -2100,7 +2107,7 @@ beginmerge(Tuplesortstate *state) ...@@ -2100,7 +2107,7 @@ beginmerge(Tuplesortstate *state)
int tapenum; int tapenum;
int srcTape; int srcTape;
int slotsPerTape; int slotsPerTape;
long spacePerTape; Size spacePerTape;
/* Heap should be empty here */ /* Heap should be empty here */
Assert(state->memtupcount == 0); Assert(state->memtupcount == 0);
...@@ -2221,7 +2228,7 @@ mergeprereadone(Tuplesortstate *state, int srcTape) ...@@ -2221,7 +2228,7 @@ mergeprereadone(Tuplesortstate *state, int srcTape)
unsigned int tuplen; unsigned int tuplen;
SortTuple stup; SortTuple stup;
int tupIndex; int tupIndex;
long priorAvail, Size priorAvail,
spaceUsed; spaceUsed;
if (!state->mergeactive[srcTape]) if (!state->mergeactive[srcTape])
......
...@@ -104,8 +104,8 @@ struct Tuplestorestate ...@@ -104,8 +104,8 @@ struct Tuplestorestate
bool backward; /* store extra length words in file? */ bool backward; /* store extra length words in file? */
bool interXact; /* keep open through transactions? */ bool interXact; /* keep open through transactions? */
bool truncated; /* tuplestore_trim has removed tuples? */ bool truncated; /* tuplestore_trim has removed tuples? */
long availMem; /* remaining memory available, in bytes */ Size availMem; /* remaining memory available, in bytes */
long allowedMem; /* total memory allowed, in bytes */ Size allowedMem; /* total memory allowed, in bytes */
BufFile *myfile; /* underlying file, or NULL if none */ BufFile *myfile; /* underlying file, or NULL if none */
MemoryContext context; /* memory context for holding tuples */ MemoryContext context; /* memory context for holding tuples */
ResourceOwner resowner; /* resowner for holding temp files */ ResourceOwner resowner; /* resowner for holding temp files */
...@@ -531,25 +531,26 @@ tuplestore_ateof(Tuplestorestate *state) ...@@ -531,25 +531,26 @@ tuplestore_ateof(Tuplestorestate *state)
} }
/* /*
* Grow the memtuples[] array, if possible within our memory constraint. * Grow the memtuples[] array, if possible within our memory constraint. We
* Return TRUE if we were able to enlarge the array, FALSE if not. * must not exceed INT_MAX tuples in memory or the caller-provided memory
* limit. Return TRUE if we were able to enlarge the array, FALSE if not.
* *
* Normally, at each increment we double the size of the array. When we no * Normally, at each increment we double the size of the array. When doing
* longer have enough memory to do that, we attempt one last, smaller increase * that would exceed a limit, we attempt one last, smaller increase (and then
* (and then clear the growmemtuples flag so we don't try any more). That * clear the growmemtuples flag so we don't try any more). That allows us to
* allows us to use allowedMem as fully as possible; sticking to the pure * use memory as fully as permitted; sticking to the pure doubling rule could
* doubling rule could result in almost half of allowedMem going unused. * result in almost half going unused. Because availMem moves around with
* Because availMem moves around with tuple addition/removal, we need some * tuple addition/removal, we need some rule to prevent making repeated small
* rule to prevent making repeated small increases in memtupsize, which would * increases in memtupsize, which would just be useless thrashing. The
* just be useless thrashing. The growmemtuples flag accomplishes that and * growmemtuples flag accomplishes that and also prevents useless
* also prevents useless recalculations in this function. * recalculations in this function.
*/ */
static bool static bool
grow_memtuples(Tuplestorestate *state) grow_memtuples(Tuplestorestate *state)
{ {
int newmemtupsize; int newmemtupsize;
int memtupsize = state->memtupsize; int memtupsize = state->memtupsize;
long memNowUsed = state->allowedMem - state->availMem; Size memNowUsed = state->allowedMem - state->availMem;
/* Forget it if we've already maxed out memtuples, per comment above */ /* Forget it if we've already maxed out memtuples, per comment above */
if (!state->growmemtuples) if (!state->growmemtuples)
...@@ -559,14 +560,16 @@ grow_memtuples(Tuplestorestate *state) ...@@ -559,14 +560,16 @@ grow_memtuples(Tuplestorestate *state)
if (memNowUsed <= state->availMem) if (memNowUsed <= state->availMem)
{ {
/* /*
* It is surely safe to double memtupsize if we've used no more than * We've used no more than half of allowedMem; double our usage,
* half of allowedMem. * clamping at INT_MAX.
*
* Note: it might seem that we need to worry about memtupsize * 2
* overflowing an int, but the MaxAllocSize clamp applied below
* ensures the existing memtupsize can't be large enough for that.
*/ */
if (memtupsize < INT_MAX / 2)
newmemtupsize = memtupsize * 2; newmemtupsize = memtupsize * 2;
else
{
newmemtupsize = INT_MAX;
state->growmemtuples = false;
}
} }
else else
{ {
...@@ -582,7 +585,8 @@ grow_memtuples(Tuplestorestate *state) ...@@ -582,7 +585,8 @@ grow_memtuples(Tuplestorestate *state)
* we've already seen, and thus we can extrapolate from the space * we've already seen, and thus we can extrapolate from the space
* consumption so far to estimate an appropriate new size for the * consumption so far to estimate an appropriate new size for the
* memtuples array. The optimal value might be higher or lower than * memtuples array. The optimal value might be higher or lower than
* this estimate, but it's hard to know that in advance. * this estimate, but it's hard to know that in advance. We again
* clamp at INT_MAX tuples.
* *
* This calculation is safe against enlarging the array so much that * This calculation is safe against enlarging the array so much that
* LACKMEM becomes true, because the memory currently used includes * LACKMEM becomes true, because the memory currently used includes
...@@ -590,16 +594,18 @@ grow_memtuples(Tuplestorestate *state) ...@@ -590,16 +594,18 @@ grow_memtuples(Tuplestorestate *state)
* new array elements even if no other memory were currently used. * new array elements even if no other memory were currently used.
* *
* We do the arithmetic in float8, because otherwise the product of * We do the arithmetic in float8, because otherwise the product of
* memtupsize and allowedMem could overflow. (A little algebra shows * memtupsize and allowedMem could overflow. Any inaccuracy in the
* that grow_ratio must be less than 2 here, so we are not risking * result should be insignificant; but even if we computed a
* integer overflow this way.) Any inaccuracy in the result should be * completely insane result, the checks below will prevent anything
* insignificant; but even if we computed a completely insane result, * really bad from happening.
* the checks below will prevent anything really bad from happening.
*/ */
double grow_ratio; double grow_ratio;
grow_ratio = (double) state->allowedMem / (double) memNowUsed; grow_ratio = (double) state->allowedMem / (double) memNowUsed;
if (memtupsize * grow_ratio < INT_MAX)
newmemtupsize = (int) (memtupsize * grow_ratio); newmemtupsize = (int) (memtupsize * grow_ratio);
else
newmemtupsize = INT_MAX;
/* We won't make any further enlargement attempts */ /* We won't make any further enlargement attempts */
state->growmemtuples = false; state->growmemtuples = false;
...@@ -610,12 +616,13 @@ grow_memtuples(Tuplestorestate *state) ...@@ -610,12 +616,13 @@ grow_memtuples(Tuplestorestate *state)
goto noalloc; goto noalloc;
/* /*
* On a 64-bit machine, allowedMem could be more than MaxAllocSize. Clamp * On a 32-bit machine, allowedMem could exceed MaxAllocHugeSize. Clamp
* to ensure our request won't be rejected by palloc. * to ensure our request won't be rejected. Note that we can easily
* exhaust address space before facing this outcome.
*/ */
if ((Size) newmemtupsize >= MaxAllocSize / sizeof(void *)) if ((Size) newmemtupsize >= MaxAllocHugeSize / sizeof(void *))
{ {
newmemtupsize = (int) (MaxAllocSize / sizeof(void *)); newmemtupsize = (int) (MaxAllocHugeSize / sizeof(void *));
state->growmemtuples = false; /* can't grow any more */ state->growmemtuples = false; /* can't grow any more */
} }
...@@ -630,14 +637,14 @@ grow_memtuples(Tuplestorestate *state) ...@@ -630,14 +637,14 @@ grow_memtuples(Tuplestorestate *state)
* palloc would be treating both old and new arrays as separate chunks. * palloc would be treating both old and new arrays as separate chunks.
* But we'll check LACKMEM explicitly below just in case.) * But we'll check LACKMEM explicitly below just in case.)
*/ */
if (state->availMem < (long) ((newmemtupsize - memtupsize) * sizeof(void *))) if (state->availMem < (Size) ((newmemtupsize - memtupsize) * sizeof(void *)))
goto noalloc; goto noalloc;
/* OK, do it */ /* OK, do it */
FREEMEM(state, GetMemoryChunkSpace(state->memtuples)); FREEMEM(state, GetMemoryChunkSpace(state->memtuples));
state->memtupsize = newmemtupsize; state->memtupsize = newmemtupsize;
state->memtuples = (void **) state->memtuples = (void **)
repalloc(state->memtuples, repalloc_huge(state->memtuples,
state->memtupsize * sizeof(void *)); state->memtupsize * sizeof(void *));
USEMEM(state, GetMemoryChunkSpace(state->memtuples)); USEMEM(state, GetMemoryChunkSpace(state->memtuples));
if (LACKMEM(state)) if (LACKMEM(state))
......
...@@ -21,26 +21,30 @@ ...@@ -21,26 +21,30 @@
/* /*
* MaxAllocSize * MaxAllocSize, MaxAllocHugeSize
* Quasi-arbitrary limit on size of allocations. * Quasi-arbitrary limits on size of allocations.
* *
* Note: * Note:
* There is no guarantee that allocations smaller than MaxAllocSize * There is no guarantee that smaller allocations will succeed, but
* will succeed. Allocation requests larger than MaxAllocSize will * larger requests will be summarily denied.
* be summarily denied.
* *
* XXX This is deliberately chosen to correspond to the limiting size * palloc() enforces MaxAllocSize, chosen to correspond to the limiting size
* of varlena objects under TOAST. See VARSIZE_4B() and related macros * of varlena objects under TOAST. See VARSIZE_4B() and related macros in
* in postgres.h. Many datatypes assume that any allocatable size can * postgres.h. Many datatypes assume that any allocatable size can be
* be represented in a varlena header. * represented in a varlena header. This limit also permits a caller to use
* * an "int" variable for an index into or length of an allocation. Callers
* XXX Also, various places in aset.c assume they can compute twice an * careful to avoid these hazards can access the higher limit with
* allocation's size without overflow, so beware of raising this. * MemoryContextAllocHuge(). Both limits permit code to assume that it may
* compute twice an allocation's size without overflow.
*/ */
#define MaxAllocSize ((Size) 0x3fffffff) /* 1 gigabyte - 1 */ #define MaxAllocSize ((Size) 0x3fffffff) /* 1 gigabyte - 1 */
#define AllocSizeIsValid(size) ((Size) (size) <= MaxAllocSize) #define AllocSizeIsValid(size) ((Size) (size) <= MaxAllocSize)
#define MaxAllocHugeSize ((Size) -1 >> 1) /* SIZE_MAX / 2 */
#define AllocHugeSizeIsValid(size) ((Size) (size) <= MaxAllocHugeSize)
/* /*
* All chunks allocated by any memory context manager are required to be * All chunks allocated by any memory context manager are required to be
* preceded by a StandardChunkHeader at a spacing of STANDARDCHUNKHEADERSIZE. * preceded by a StandardChunkHeader at a spacing of STANDARDCHUNKHEADERSIZE.
......
...@@ -51,6 +51,10 @@ extern void *MemoryContextAlloc(MemoryContext context, Size size); ...@@ -51,6 +51,10 @@ extern void *MemoryContextAlloc(MemoryContext context, Size size);
extern void *MemoryContextAllocZero(MemoryContext context, Size size); extern void *MemoryContextAllocZero(MemoryContext context, Size size);
extern void *MemoryContextAllocZeroAligned(MemoryContext context, Size size); extern void *MemoryContextAllocZeroAligned(MemoryContext context, Size size);
/* Higher-limit allocators. */
extern void *MemoryContextAllocHuge(MemoryContext context, Size size);
extern void *repalloc_huge(void *pointer, Size size);
/* /*
* The result of palloc() is always word-aligned, so we can skip testing * The result of palloc() is always word-aligned, so we can skip testing
* alignment of the pointer when deciding which MemSet variant to use. * alignment of the pointer when deciding which MemSet variant to use.
......
...@@ -106,7 +106,7 @@ extern void tuplesort_get_stats(Tuplesortstate *state, ...@@ -106,7 +106,7 @@ extern void tuplesort_get_stats(Tuplesortstate *state,
const char **spaceType, const char **spaceType,
long *spaceUsed); long *spaceUsed);
extern int tuplesort_merge_order(long allowedMem); extern int tuplesort_merge_order(Size allowedMem);
/* /*
* These routines may only be called if randomAccess was specified 'true'. * These routines may only be called if randomAccess was specified 'true'.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment