Commit 82e861fb authored by Tom Lane's avatar Tom Lane

Fix LWLockAssign() so that it can safely be executed after postmaster

initialization.  Add spinlocking, fix EXEC_BACKEND unsafeness.
parent 77d1de3c
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.85 2005/08/20 23:26:20 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.86 2005/10/07 21:42:38 tgl Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -71,8 +71,7 @@ SHMEM_OFFSET ShmemBase; /* start address of shared memory */
static SHMEM_OFFSET ShmemEnd; /* end+1 address of shared memory */
NON_EXEC_STATIC slock_t *ShmemLock; /* spinlock for shared memory
* allocation */
slock_t *ShmemLock; /* spinlock for shared memory and LWLock allocation */
NON_EXEC_STATIC slock_t *ShmemIndexLock; /* spinlock for ShmemIndex */
......
......@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.31 2005/10/07 20:11:03 tgl Exp $
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.32 2005/10/07 21:42:38 tgl Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -27,6 +27,10 @@
#include "storage/spin.h"
/* We use the ShmemLock spinlock to protect LWLockAssign */
extern slock_t *ShmemLock;
typedef struct LWLock
{
slock_t mutex; /* Protects LWLock and queue of PGPROCs */
......@@ -65,9 +69,6 @@ typedef union LWLockPadded
*/
NON_EXEC_STATIC LWLockPadded *LWLockArray = NULL;
/* shared counter for dynamic allocation of LWLockIds */
static int *LWLockCounter;
/*
* We use this structure to keep track of locked LWLocks for release
......@@ -159,7 +160,7 @@ LWLockShmemSize(void)
/* Space for the LWLock array. */
size = mul_size(numLocks, sizeof(LWLockPadded));
/* Space for shared allocation counter, plus room for alignment. */
/* Space for dynamic allocation counter, plus room for alignment. */
size = add_size(size, 2 * sizeof(int) + LWLOCK_PADDED_SIZE);
return size;
......@@ -175,12 +176,16 @@ CreateLWLocks(void)
int numLocks = NumLWLocks();
Size spaceLocks = LWLockShmemSize();
LWLockPadded *lock;
int *LWLockCounter;
char *ptr;
int id;
/* Allocate space */
ptr = (char *) ShmemAlloc(spaceLocks);
/* Leave room for dynamic allocation counter */
ptr += 2 * sizeof(int);
/* Ensure desired alignment of LWLock array */
ptr += LWLOCK_PADDED_SIZE - ((unsigned long) ptr) % LWLOCK_PADDED_SIZE;
......@@ -200,9 +205,10 @@ CreateLWLocks(void)
}
/*
* Initialize the dynamic-allocation counter at the end of the array
* Initialize the dynamic-allocation counter, which is stored just before
* the first LWLock.
*/
LWLockCounter = (int *) lock;
LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
LWLockCounter[0] = (int) NumFixedLWLocks;
LWLockCounter[1] = numLocks;
}
......@@ -211,16 +217,27 @@ CreateLWLocks(void)
/*
* LWLockAssign - assign a dynamically-allocated LWLock number
*
* NB: we do not currently try to interlock this. Could perhaps use
* ShmemLock spinlock if there were any need to assign LWLockIds after
* shmem setup.
* We interlock this using the same spinlock that is used to protect
* ShmemAlloc(). Interlocking is not really necessary during postmaster
* startup, but it is needed if any user-defined code tries to allocate
* LWLocks after startup.
*/
LWLockId
LWLockAssign(void)
{
LWLockId result;
int *LWLockCounter;
LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
SpinLockAcquire(ShmemLock);
if (LWLockCounter[0] >= LWLockCounter[1])
elog(FATAL, "no more LWLockIds available");
return (LWLockId) (LWLockCounter[0]++);
{
SpinLockRelease(ShmemLock);
elog(ERROR, "no more LWLockIds available");
}
result = (LWLockId) (LWLockCounter[0]++);
SpinLockRelease(ShmemLock);
return result;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment