Commit 36ac359d authored by Tom Lane's avatar Tom Lane

Rename assorted LWLock tranches.

Choose names that fit into the conventions for wait event names
(particularly, that multi-word names are in the style MultiWordName)
and hopefully convey more information to non-hacker users than the
previous names did.

Also rename SerializablePredicateLockListLock to
SerializablePredicateListLock; the old name was long enough to cause
table formatting problems, plus the double occurrence of "Lock" seems
confusing/error-prone.

Also change a couple of particularly opaque LWLock field names.

Discussion: https://postgr.es/m/28683.1589405363@sss.pgh.pa.us
parent a0ab4f49
This diff is collapsed.
......@@ -117,7 +117,7 @@ GetSessionDsmHandle(void)
dsa_space = shm_toc_allocate(toc, SESSION_DSA_SIZE);
dsa = dsa_create_in_place(dsa_space,
SESSION_DSA_SIZE,
LWTRANCHE_SESSION_DSA,
LWTRANCHE_PER_SESSION_DSA,
seg);
shm_toc_insert(toc, SESSION_KEY_DSA, dsa_space);
......
......@@ -889,7 +889,7 @@ tbm_prepare_shared_iterate(TIDBitmap *tbm)
pg_atomic_add_fetch_u32(&ptchunks->refcount, 1);
/* Initialize the iterator lock */
LWLockInitialize(&istate->lock, LWTRANCHE_TBM);
LWLockInitialize(&istate->lock, LWTRANCHE_SHARED_TIDBITMAP);
/* Initialize the shared iterator state */
istate->schunkbit = 0;
......
......@@ -506,7 +506,7 @@ ReplicationOriginShmemInit(void)
{
int i;
replication_states_ctl->tranche_id = LWTRANCHE_REPLICATION_ORIGIN;
replication_states_ctl->tranche_id = LWTRANCHE_REPLICATION_ORIGIN_STATE;
MemSet(replication_states, 0, ReplicationOriginShmemSize());
......
......@@ -153,7 +153,8 @@ ReplicationSlotsShmemInit(void)
/* everything else is zeroed by the memset above */
SpinLockInit(&slot->mutex);
LWLockInitialize(&slot->io_in_progress_lock, LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS);
LWLockInitialize(&slot->io_in_progress_lock,
LWTRANCHE_REPLICATION_SLOT_IO);
ConditionVariableInit(&slot->active_cv);
}
}
......
......@@ -132,7 +132,7 @@ InitBufferPool(void)
LWTRANCHE_BUFFER_CONTENT);
LWLockInitialize(BufferDescriptorGetIOLock(buf),
LWTRANCHE_BUFFER_IO_IN_PROGRESS);
LWTRANCHE_BUFFER_IO);
}
/* Correct last entry of linked list */
......
......@@ -936,13 +936,13 @@ LockAcquireExtended(const LOCKTAG *locktag,
* FastPathStrongRelationLocks->counts becomes visible after we test
* it has yet to begin to transfer fast-path locks.
*/
LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE);
LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
if (FastPathStrongRelationLocks->count[fasthashcode] != 0)
acquired = false;
else
acquired = FastPathGrantRelationLock(locktag->locktag_field2,
lockmode);
LWLockRelease(&MyProc->backendLock);
LWLockRelease(&MyProc->fpInfoLock);
if (acquired)
{
/*
......@@ -2085,10 +2085,10 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
* We might not find the lock here, even if we originally entered it
* here. Another backend may have moved it to the main table.
*/
LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE);
LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
released = FastPathUnGrantRelationLock(locktag->locktag_field2,
lockmode);
LWLockRelease(&MyProc->backendLock);
LWLockRelease(&MyProc->fpInfoLock);
if (released)
{
RemoveLocalLock(locallock);
......@@ -2291,7 +2291,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
*/
if (!have_fast_path_lwlock)
{
LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE);
LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
have_fast_path_lwlock = true;
}
......@@ -2308,7 +2308,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
* transferred to the main lock table. That's going to require
* some extra work, so release our fast-path lock before starting.
*/
LWLockRelease(&MyProc->backendLock);
LWLockRelease(&MyProc->fpInfoLock);
have_fast_path_lwlock = false;
/*
......@@ -2334,7 +2334,7 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
/* Done with the fast-path data structures */
if (have_fast_path_lwlock)
LWLockRelease(&MyProc->backendLock);
LWLockRelease(&MyProc->fpInfoLock);
/*
* Now, scan each lock partition separately.
......@@ -2737,7 +2737,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
PGPROC *proc = &ProcGlobal->allProcs[i];
uint32 f;
LWLockAcquire(&proc->backendLock, LW_EXCLUSIVE);
LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
/*
* If the target backend isn't referencing the same database as the
......@@ -2746,8 +2746,8 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
*
* proc->databaseId is set at backend startup time and never changes
* thereafter, so it might be safe to perform this test before
* acquiring &proc->backendLock. In particular, it's certainly safe
* to assume that if the target backend holds any fast-path locks, it
* acquiring &proc->fpInfoLock. In particular, it's certainly safe to
* assume that if the target backend holds any fast-path locks, it
* must have performed a memory-fencing operation (in particular, an
* LWLock acquisition) since setting proc->databaseId. However, it's
* less clear that our backend is certain to have performed a memory
......@@ -2756,7 +2756,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
*/
if (proc->databaseId != locktag->locktag_field1)
{
LWLockRelease(&proc->backendLock);
LWLockRelease(&proc->fpInfoLock);
continue;
}
......@@ -2783,7 +2783,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
if (!proclock)
{
LWLockRelease(partitionLock);
LWLockRelease(&proc->backendLock);
LWLockRelease(&proc->fpInfoLock);
return false;
}
GrantLock(proclock->tag.myLock, proclock, lockmode);
......@@ -2794,7 +2794,7 @@ FastPathTransferRelationLocks(LockMethod lockMethodTable, const LOCKTAG *locktag
/* No need to examine remaining slots. */
break;
}
LWLockRelease(&proc->backendLock);
LWLockRelease(&proc->fpInfoLock);
}
return true;
}
......@@ -2816,7 +2816,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock)
Oid relid = locktag->locktag_field2;
uint32 f;
LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE);
LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; f++)
{
......@@ -2839,7 +2839,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock)
if (!proclock)
{
LWLockRelease(partitionLock);
LWLockRelease(&MyProc->backendLock);
LWLockRelease(&MyProc->fpInfoLock);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
......@@ -2854,7 +2854,7 @@ FastPathGetRelationLockEntry(LOCALLOCK *locallock)
break;
}
LWLockRelease(&MyProc->backendLock);
LWLockRelease(&MyProc->fpInfoLock);
/* Lock may have already been transferred by some other backend. */
if (proclock == NULL)
......@@ -2980,7 +2980,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
if (proc == MyProc)
continue;
LWLockAcquire(&proc->backendLock, LW_SHARED);
LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
/*
* If the target backend isn't referencing the same database as
......@@ -2992,7 +2992,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
*/
if (proc->databaseId != locktag->locktag_field1)
{
LWLockRelease(&proc->backendLock);
LWLockRelease(&proc->fpInfoLock);
continue;
}
......@@ -3030,7 +3030,7 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode, int *countp)
break;
}
LWLockRelease(&proc->backendLock);
LWLockRelease(&proc->fpInfoLock);
}
}
......@@ -3599,7 +3599,7 @@ GetLockStatusData(void)
PGPROC *proc = &ProcGlobal->allProcs[i];
uint32 f;
LWLockAcquire(&proc->backendLock, LW_SHARED);
LWLockAcquire(&proc->fpInfoLock, LW_SHARED);
for (f = 0; f < FP_LOCK_SLOTS_PER_BACKEND; ++f)
{
......@@ -3659,7 +3659,7 @@ GetLockStatusData(void)
el++;
}
LWLockRelease(&proc->backendLock);
LWLockRelease(&proc->fpInfoLock);
}
/*
......@@ -4381,7 +4381,7 @@ lock_twophase_postabort(TransactionId xid, uint16 info,
* as MyProc->lxid, you might wonder if we really need both. The
* difference is that MyProc->lxid is set and cleared unlocked, and
* examined by procarray.c, while fpLocalTransactionId is protected by
* backendLock and is used only by the locking subsystem. Doing it this
* fpInfoLock and is used only by the locking subsystem. Doing it this
* way makes it easier to verify that there are no funny race conditions.
*
* We don't bother recording this lock in the local lock table, since it's
......@@ -4393,7 +4393,7 @@ VirtualXactLockTableInsert(VirtualTransactionId vxid)
{
Assert(VirtualTransactionIdIsValid(vxid));
LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE);
LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
Assert(MyProc->backendId == vxid.backendId);
Assert(MyProc->fpLocalTransactionId == InvalidLocalTransactionId);
......@@ -4402,7 +4402,7 @@ VirtualXactLockTableInsert(VirtualTransactionId vxid)
MyProc->fpVXIDLock = true;
MyProc->fpLocalTransactionId = vxid.localTransactionId;
LWLockRelease(&MyProc->backendLock);
LWLockRelease(&MyProc->fpInfoLock);
}
/*
......@@ -4422,14 +4422,14 @@ VirtualXactLockTableCleanup(void)
/*
* Clean up shared memory state.
*/
LWLockAcquire(&MyProc->backendLock, LW_EXCLUSIVE);
LWLockAcquire(&MyProc->fpInfoLock, LW_EXCLUSIVE);
fastpath = MyProc->fpVXIDLock;
lxid = MyProc->fpLocalTransactionId;
MyProc->fpVXIDLock = false;
MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
LWLockRelease(&MyProc->backendLock);
LWLockRelease(&MyProc->fpInfoLock);
/*
* If fpVXIDLock has been cleared without touching fpLocalTransactionId,
......@@ -4485,13 +4485,13 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait)
* against the ones we're waiting for. The target backend will only set
* or clear lxid while holding this lock.
*/
LWLockAcquire(&proc->backendLock, LW_EXCLUSIVE);
LWLockAcquire(&proc->fpInfoLock, LW_EXCLUSIVE);
/* If the transaction has ended, our work here is done. */
if (proc->backendId != vxid.backendId
|| proc->fpLocalTransactionId != vxid.localTransactionId)
{
LWLockRelease(&proc->backendLock);
LWLockRelease(&proc->fpInfoLock);
return true;
}
......@@ -4501,7 +4501,7 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait)
*/
if (!wait)
{
LWLockRelease(&proc->backendLock);
LWLockRelease(&proc->fpInfoLock);
return false;
}
......@@ -4526,7 +4526,7 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait)
if (!proclock)
{
LWLockRelease(partitionLock);
LWLockRelease(&proc->backendLock);
LWLockRelease(&proc->fpInfoLock);
ereport(ERROR,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory"),
......@@ -4540,7 +4540,7 @@ VirtualXactLock(VirtualTransactionId vxid, bool wait)
}
/* Done with proc->fpLockBits */
LWLockRelease(&proc->backendLock);
LWLockRelease(&proc->fpInfoLock);
/* Time to wait. */
(void) LockAcquire(&tag, ShareLock, false, false);
......
......@@ -121,6 +121,9 @@ extern slock_t *ShmemLock;
* 3. Extensions can create new tranches, via either RequestNamedLWLockTranche
* or LWLockRegisterTranche. The names of these that are known in the current
* process appear in LWLockTrancheNames[].
*
* All these names are user-visible as wait event names, so choose with care
* ... and do not forget to update the documentation's list of wait events.
*/
static const char *const BuiltinTrancheNames[] = {
......@@ -139,41 +142,41 @@ static const char *const BuiltinTrancheNames[] = {
/* LWTRANCHE_SERIAL_BUFFER: */
"SerialBuffer",
/* LWTRANCHE_WAL_INSERT: */
"wal_insert",
"WALInsert",
/* LWTRANCHE_BUFFER_CONTENT: */
"buffer_content",
/* LWTRANCHE_BUFFER_IO_IN_PROGRESS: */
"buffer_io",
/* LWTRANCHE_REPLICATION_ORIGIN: */
"replication_origin",
/* LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS: */
"replication_slot_io",
/* LWTRANCHE_PROC: */
"proc",
"BufferContent",
/* LWTRANCHE_BUFFER_IO: */
"BufferIO",
/* LWTRANCHE_REPLICATION_ORIGIN_STATE: */
"ReplicationOriginState",
/* LWTRANCHE_REPLICATION_SLOT_IO: */
"ReplicationSlotIO",
/* LWTRANCHE_LOCK_FASTPATH: */
"LockFastPath",
/* LWTRANCHE_BUFFER_MAPPING: */
"buffer_mapping",
"BufferMapping",
/* LWTRANCHE_LOCK_MANAGER: */
"lock_manager",
"LockManager",
/* LWTRANCHE_PREDICATE_LOCK_MANAGER: */
"predicate_lock_manager",
"PredicateLockManager",
/* LWTRANCHE_PARALLEL_HASH_JOIN: */
"parallel_hash_join",
"ParallelHashJoin",
/* LWTRANCHE_PARALLEL_QUERY_DSA: */
"parallel_query_dsa",
/* LWTRANCHE_SESSION_DSA: */
"session_dsa",
/* LWTRANCHE_SESSION_RECORD_TABLE: */
"session_record_table",
/* LWTRANCHE_SESSION_TYPMOD_TABLE: */
"session_typmod_table",
"ParallelQueryDSA",
/* LWTRANCHE_PER_SESSION_DSA: */
"PerSessionDSA",
/* LWTRANCHE_PER_SESSION_RECORD_TYPE: */
"PerSessionRecordType",
/* LWTRANCHE_PER_SESSION_RECORD_TYPMOD: */
"PerSessionRecordTypmod",
/* LWTRANCHE_SHARED_TUPLESTORE: */
"shared_tuplestore",
/* LWTRANCHE_TBM: */
"tbm",
"SharedTupleStore",
/* LWTRANCHE_SHARED_TIDBITMAP: */
"SharedTidBitmap",
/* LWTRANCHE_PARALLEL_APPEND: */
"parallel_append",
/* LWTRANCHE_SXACT: */
"serializable_xact"
"ParallelAppend",
/* LWTRANCHE_PER_XACT_PREDICATE_LIST: */
"PerXactPredicateList"
};
StaticAssertDecl(lengthof(BuiltinTrancheNames) ==
......@@ -640,7 +643,10 @@ LWLockNewTrancheId(void)
*
* This routine will save a pointer to the tranche name passed as an argument,
* so the name should be allocated in a backend-lifetime context
* (TopMemoryContext, static constant, or similar).
* (shared memory, TopMemoryContext, static constant, or similar).
*
* The tranche name will be user-visible as a wait event name, so try to
* use a name that fits the style for those.
*/
void
LWLockRegisterTranche(int tranche_id, const char *tranche_name)
......@@ -690,6 +696,9 @@ LWLockRegisterTranche(int tranche_id, const char *tranche_name)
* will be ignored. (We could raise an error, but it seems better to make
* it a no-op, so that libraries containing such calls can be reloaded if
* needed.)
*
* The tranche name will be user-visible as a wait event name, so try to
* use a name that fits the style for those.
*/
void
RequestNamedLWLockTranche(const char *tranche_name, int num_lwlocks)
......
......@@ -2,7 +2,8 @@
# these are defined here. If you add a lock, add it to the end to avoid
# renumbering the existing locks; if you remove a lock, consider leaving a gap
# in the numbering sequence for the benefit of DTrace and other external
# debugging scripts.
# debugging scripts. Also, do not forget to update the list of wait events
# in the user documentation.
# 0 is available; was formerly BufFreelistLock
ShmemIndexLock 1
......@@ -34,7 +35,7 @@ NotifySLRULock 26
NotifyQueueLock 27
SerializableXactHashLock 28
SerializableFinishedListLock 29
SerializablePredicateLockListLock 30
SerializablePredicateListLock 30
SerialSLRULock 31
SyncRepLock 32
BackgroundWorkerLock 33
......
This diff is collapsed.
......@@ -221,7 +221,7 @@ InitProcGlobal(void)
/* Common initialization for all PGPROCs, regardless of type. */
/*
* Set up per-PGPROC semaphore, latch, and backendLock. Prepared xact
* Set up per-PGPROC semaphore, latch, and fpInfoLock. Prepared xact
* dummy PGPROCs don't need these though - they're never associated
* with a real process
*/
......@@ -229,7 +229,7 @@ InitProcGlobal(void)
{
procs[i].sem = PGSemaphoreCreate();
InitSharedLatch(&(procs[i].procLatch));
LWLockInitialize(&(procs[i].backendLock), LWTRANCHE_PROC);
LWLockInitialize(&(procs[i].fpInfoLock), LWTRANCHE_LOCK_FASTPATH);
}
procs[i].pgprocno = i;
......
......@@ -256,7 +256,7 @@ static const dshash_parameters srtr_record_table_params = {
sizeof(SharedRecordTableEntry),
shared_record_table_compare,
shared_record_table_hash,
LWTRANCHE_SESSION_RECORD_TABLE
LWTRANCHE_PER_SESSION_RECORD_TYPE
};
/* Parameters for SharedRecordTypmodRegistry's typmod hash table. */
......@@ -265,7 +265,7 @@ static const dshash_parameters srtr_typmod_table_params = {
sizeof(SharedTypmodTableEntry),
dshash_memcmp,
dshash_memhash,
LWTRANCHE_SESSION_TYPMOD_TABLE
LWTRANCHE_PER_SESSION_RECORD_TYPMOD
};
/* hashtable for recognizing registered record types */
......
......@@ -204,22 +204,22 @@ typedef enum BuiltinTrancheIds
LWTRANCHE_SERIAL_BUFFER,
LWTRANCHE_WAL_INSERT,
LWTRANCHE_BUFFER_CONTENT,
LWTRANCHE_BUFFER_IO_IN_PROGRESS,
LWTRANCHE_REPLICATION_ORIGIN,
LWTRANCHE_REPLICATION_SLOT_IO_IN_PROGRESS,
LWTRANCHE_PROC,
LWTRANCHE_BUFFER_IO,
LWTRANCHE_REPLICATION_ORIGIN_STATE,
LWTRANCHE_REPLICATION_SLOT_IO,
LWTRANCHE_LOCK_FASTPATH,
LWTRANCHE_BUFFER_MAPPING,
LWTRANCHE_LOCK_MANAGER,
LWTRANCHE_PREDICATE_LOCK_MANAGER,
LWTRANCHE_PARALLEL_HASH_JOIN,
LWTRANCHE_PARALLEL_QUERY_DSA,
LWTRANCHE_SESSION_DSA,
LWTRANCHE_SESSION_RECORD_TABLE,
LWTRANCHE_SESSION_TYPMOD_TABLE,
LWTRANCHE_PER_SESSION_DSA,
LWTRANCHE_PER_SESSION_RECORD_TYPE,
LWTRANCHE_PER_SESSION_RECORD_TYPMOD,
LWTRANCHE_SHARED_TUPLESTORE,
LWTRANCHE_TBM,
LWTRANCHE_SHARED_TIDBITMAP,
LWTRANCHE_PARALLEL_APPEND,
LWTRANCHE_SXACT,
LWTRANCHE_PER_XACT_PREDICATE_LIST,
LWTRANCHE_FIRST_USER_DEFINED
} BuiltinTrancheIds;
......
......@@ -92,8 +92,12 @@ typedef struct SERIALIZABLEXACT
SHM_QUEUE finishedLink; /* list link in
* FinishedSerializableTransactions */
LWLock predicateLockListLock; /* protects predicateLocks in parallel
* mode */
/*
* perXactPredicateListLock is only used in parallel queries: it protects
* this SERIALIZABLEXACT's predicate lock list against other workers of
* the same session.
*/
LWLock perXactPredicateListLock;
/*
* for r/o transactions: list of concurrent r/w transactions that we could
......
......@@ -188,10 +188,8 @@ struct PGPROC
XLogRecPtr clogGroupMemberLsn; /* WAL location of commit record for clog
* group member */
/* Per-backend LWLock. Protects fields below (but not group fields). */
LWLock backendLock;
/* Lock manager data, recording fast-path locks taken by this backend. */
LWLock fpInfoLock; /* protects per-backend fast-path state */
uint64 fpLockBits; /* lock modes held for each fast-path slot */
Oid fpRelId[FP_LOCK_SLOTS_PER_BACKEND]; /* slots for rel oids */
bool fpVXIDLock; /* are we holding a fast-path VXID lock? */
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment