Commit 82e861fb authored by Tom Lane's avatar Tom Lane

Fix LWLockAssign() so that it can safely be executed after postmaster

initialization.  Add spinlocking, fix EXEC_BACKEND unsafeness.
parent 77d1de3c
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.85 2005/08/20 23:26:20 tgl Exp $ * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.86 2005/10/07 21:42:38 tgl Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
...@@ -71,8 +71,7 @@ SHMEM_OFFSET ShmemBase; /* start address of shared memory */ ...@@ -71,8 +71,7 @@ SHMEM_OFFSET ShmemBase; /* start address of shared memory */
static SHMEM_OFFSET ShmemEnd; /* end+1 address of shared memory */ static SHMEM_OFFSET ShmemEnd; /* end+1 address of shared memory */
NON_EXEC_STATIC slock_t *ShmemLock; /* spinlock for shared memory slock_t *ShmemLock; /* spinlock for shared memory and LWLock allocation */
* allocation */
NON_EXEC_STATIC slock_t *ShmemIndexLock; /* spinlock for ShmemIndex */ NON_EXEC_STATIC slock_t *ShmemIndexLock; /* spinlock for ShmemIndex */
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
* IDENTIFICATION * IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.31 2005/10/07 20:11:03 tgl Exp $ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.32 2005/10/07 21:42:38 tgl Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
...@@ -27,6 +27,10 @@ ...@@ -27,6 +27,10 @@
#include "storage/spin.h" #include "storage/spin.h"
/* We use the ShmemLock spinlock to protect LWLockAssign */
extern slock_t *ShmemLock;
typedef struct LWLock typedef struct LWLock
{ {
slock_t mutex; /* Protects LWLock and queue of PGPROCs */ slock_t mutex; /* Protects LWLock and queue of PGPROCs */
...@@ -65,9 +69,6 @@ typedef union LWLockPadded ...@@ -65,9 +69,6 @@ typedef union LWLockPadded
*/ */
NON_EXEC_STATIC LWLockPadded *LWLockArray = NULL; NON_EXEC_STATIC LWLockPadded *LWLockArray = NULL;
/* shared counter for dynamic allocation of LWLockIds */
static int *LWLockCounter;
/* /*
* We use this structure to keep track of locked LWLocks for release * We use this structure to keep track of locked LWLocks for release
...@@ -159,7 +160,7 @@ LWLockShmemSize(void) ...@@ -159,7 +160,7 @@ LWLockShmemSize(void)
/* Space for the LWLock array. */ /* Space for the LWLock array. */
size = mul_size(numLocks, sizeof(LWLockPadded)); size = mul_size(numLocks, sizeof(LWLockPadded));
/* Space for shared allocation counter, plus room for alignment. */ /* Space for dynamic allocation counter, plus room for alignment. */
size = add_size(size, 2 * sizeof(int) + LWLOCK_PADDED_SIZE); size = add_size(size, 2 * sizeof(int) + LWLOCK_PADDED_SIZE);
return size; return size;
...@@ -175,12 +176,16 @@ CreateLWLocks(void) ...@@ -175,12 +176,16 @@ CreateLWLocks(void)
int numLocks = NumLWLocks(); int numLocks = NumLWLocks();
Size spaceLocks = LWLockShmemSize(); Size spaceLocks = LWLockShmemSize();
LWLockPadded *lock; LWLockPadded *lock;
int *LWLockCounter;
char *ptr; char *ptr;
int id; int id;
/* Allocate space */ /* Allocate space */
ptr = (char *) ShmemAlloc(spaceLocks); ptr = (char *) ShmemAlloc(spaceLocks);
/* Leave room for dynamic allocation counter */
ptr += 2 * sizeof(int);
/* Ensure desired alignment of LWLock array */ /* Ensure desired alignment of LWLock array */
ptr += LWLOCK_PADDED_SIZE - ((unsigned long) ptr) % LWLOCK_PADDED_SIZE; ptr += LWLOCK_PADDED_SIZE - ((unsigned long) ptr) % LWLOCK_PADDED_SIZE;
...@@ -200,9 +205,10 @@ CreateLWLocks(void) ...@@ -200,9 +205,10 @@ CreateLWLocks(void)
} }
/* /*
* Initialize the dynamic-allocation counter at the end of the array * Initialize the dynamic-allocation counter, which is stored just before
* the first LWLock.
*/ */
LWLockCounter = (int *) lock; LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
LWLockCounter[0] = (int) NumFixedLWLocks; LWLockCounter[0] = (int) NumFixedLWLocks;
LWLockCounter[1] = numLocks; LWLockCounter[1] = numLocks;
} }
...@@ -211,16 +217,27 @@ CreateLWLocks(void) ...@@ -211,16 +217,27 @@ CreateLWLocks(void)
/* /*
* LWLockAssign - assign a dynamically-allocated LWLock number * LWLockAssign - assign a dynamically-allocated LWLock number
* *
* NB: we do not currently try to interlock this. Could perhaps use * We interlock this using the same spinlock that is used to protect
* ShmemLock spinlock if there were any need to assign LWLockIds after * ShmemAlloc(). Interlocking is not really necessary during postmaster
* shmem setup. * startup, but it is needed if any user-defined code tries to allocate
* LWLocks after startup.
*/ */
LWLockId LWLockId
LWLockAssign(void) LWLockAssign(void)
{ {
LWLockId result;
int *LWLockCounter;
LWLockCounter = (int *) ((char *) LWLockArray - 2 * sizeof(int));
SpinLockAcquire(ShmemLock);
if (LWLockCounter[0] >= LWLockCounter[1]) if (LWLockCounter[0] >= LWLockCounter[1])
elog(FATAL, "no more LWLockIds available"); {
return (LWLockId) (LWLockCounter[0]++); SpinLockRelease(ShmemLock);
elog(ERROR, "no more LWLockIds available");
}
result = (LWLockId) (LWLockCounter[0]++);
SpinLockRelease(ShmemLock);
return result;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment