Commit 07ab1383 authored by Robert Haas's avatar Robert Haas

Fix two more bugs in fast-path relation locking.

First, the previous code failed to account for the fact that, during Hot
Standby operation, the startup process takes AccessExclusiveLocks on
relations without setting MyDatabaseId.  This resulted in fast path
strong lock counts failing to be incremented with the startup process
took locks, which in turn allowed conflicting lock requests to succeed
when they should not have.  Report by Erik Rijkers, diagnosis by Heikki
Linnakangas.

Second, LockReleaseAll() failed to honor the allLocks and lockmethodid
restrictions with respect to fast-path locks.  It's not clear to me
whether this produces any user-visible breakage at the moment, but it's
certainly wrong.  Rearrange order of operations in LockReleaseAll to fix.
Noted by Tom Lane.
parent 932ded2e
...@@ -192,14 +192,17 @@ static int FastPathLocalUseCount = 0; ...@@ -192,14 +192,17 @@ static int FastPathLocalUseCount = 0;
* self-conflicting, it can't use the fast-path mechanism; but it also does * self-conflicting, it can't use the fast-path mechanism; but it also does
* not conflict with any of the locks that do, so we can ignore it completely. * not conflict with any of the locks that do, so we can ignore it completely.
*/ */
#define FastPathTag(locktag) \ #define EligibleForRelationFastPath(locktag, mode) \
((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \ ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \ (locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 == MyDatabaseId && \ (locktag)->locktag_field1 == MyDatabaseId && \
MyDatabaseId != InvalidOid) MyDatabaseId != InvalidOid && \
#define FastPathWeakMode(mode) ((mode) < ShareUpdateExclusiveLock) (mode) < ShareUpdateExclusiveLock)
#define FastPathStrongMode(mode) ((mode) > ShareUpdateExclusiveLock) #define ConflictsWithRelationFastPath(locktag, mode) \
#define FastPathRelevantMode(mode) ((mode) != ShareUpdateExclusiveLock) ((locktag)->locktag_lockmethodid == DEFAULT_LOCKMETHOD && \
(locktag)->locktag_type == LOCKTAG_RELATION && \
(locktag)->locktag_field1 != InvalidOid && \
(mode) > ShareUpdateExclusiveLock)
static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode); static bool FastPathGrantRelationLock(Oid relid, LOCKMODE lockmode);
static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode); static bool FastPathUnGrantRelationLock(Oid relid, LOCKMODE lockmode);
...@@ -697,26 +700,20 @@ LockAcquireExtended(const LOCKTAG *locktag, ...@@ -697,26 +700,20 @@ LockAcquireExtended(const LOCKTAG *locktag,
log_lock = true; log_lock = true;
} }
/* Locks that participate in the fast path require special handling. */
if (FastPathTag(locktag) && FastPathRelevantMode(lockmode))
{
uint32 fasthashcode;
fasthashcode = FastPathStrongLockHashPartition(hashcode);
/* /*
* If we remember having filled up the fast path array, we don't * Attempt to take lock via fast path, if eligible. But if we remember
* attempt to make any further use of it until we release some locks. * having filled up the fast path array, we don't attempt to make any
* It's possible that some other backend has transferred some of those * further use of it until we release some locks. It's possible that some
* locks to the shared hash table, leaving space free, but it's not * other backend has transferred some of those locks to the shared hash
* worth acquiring the LWLock just to check. It's also possible that * table, leaving space free, but it's not worth acquiring the LWLock just
* we're acquiring a second or third lock type on a relation we have * to check. It's also possible that we're acquiring a second or third
* already locked using the fast-path, but for now we don't worry about * lock type on a relation we have already locked using the fast-path, but
* that case either. * for now we don't worry about that case either.
*/ */
if (FastPathWeakMode(lockmode) if (EligibleForRelationFastPath(locktag, lockmode)
&& FastPathLocalUseCount < FP_LOCK_SLOTS_PER_BACKEND) && FastPathLocalUseCount < FP_LOCK_SLOTS_PER_BACKEND)
{ {
uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
bool acquired; bool acquired;
/* /*
...@@ -738,8 +735,17 @@ LockAcquireExtended(const LOCKTAG *locktag, ...@@ -738,8 +735,17 @@ LockAcquireExtended(const LOCKTAG *locktag,
return LOCKACQUIRE_OK; return LOCKACQUIRE_OK;
} }
} }
else if (FastPathStrongMode(lockmode))
/*
* If this lock could potentially have been taken via the fast-path by
* some other backend, we must (temporarily) disable further use of the
* fast-path for this lock tag, and migrate any locks already taken via
* this method to the main lock table.
*/
if (ConflictsWithRelationFastPath(locktag, lockmode))
{ {
uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
BeginStrongLockAcquire(locallock, fasthashcode); BeginStrongLockAcquire(locallock, fasthashcode);
if (!FastPathTransferRelationLocks(lockMethodTable, locktag, if (!FastPathTransferRelationLocks(lockMethodTable, locktag,
hashcode)) hashcode))
...@@ -754,10 +760,11 @@ LockAcquireExtended(const LOCKTAG *locktag, ...@@ -754,10 +760,11 @@ LockAcquireExtended(const LOCKTAG *locktag,
return LOCKACQUIRE_NOT_AVAIL; return LOCKACQUIRE_NOT_AVAIL;
} }
} }
}
/* /*
* Otherwise we've got to mess with the shared lock table. * We didn't find the lock in our LOCALLOCK table, and we didn't manage
* to take it via the fast-path, either, so we've got to mess with the
* shared lock table.
*/ */
partitionLock = LockHashPartitionLock(hashcode); partitionLock = LockHashPartitionLock(hashcode);
...@@ -1688,8 +1695,8 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock) ...@@ -1688,8 +1695,8 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
if (locallock->nLocks > 0) if (locallock->nLocks > 0)
return TRUE; return TRUE;
/* Locks that participate in the fast path require special handling. */ /* Attempt fast release of any lock eligible for the fast path. */
if (FastPathTag(locktag) && FastPathWeakMode(lockmode) if (EligibleForRelationFastPath(locktag, lockmode)
&& FastPathLocalUseCount > 0) && FastPathLocalUseCount > 0)
{ {
bool released; bool released;
...@@ -1729,7 +1736,7 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock) ...@@ -1729,7 +1736,7 @@ LockRelease(const LOCKTAG *locktag, LOCKMODE lockmode, bool sessionLock)
PROCLOCKTAG proclocktag; PROCLOCKTAG proclocktag;
bool found; bool found;
Assert(FastPathTag(locktag) && FastPathWeakMode(lockmode)); Assert(EligibleForRelationFastPath(locktag, lockmode));
lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash, lock = (LOCK *) hash_search_with_hash_value(LockMethodLockHash,
(const void *) locktag, (const void *) locktag,
locallock->hashcode, locallock->hashcode,
...@@ -1830,11 +1837,6 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks) ...@@ -1830,11 +1837,6 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL) while ((locallock = (LOCALLOCK *) hash_seq_search(&status)) != NULL)
{ {
if (locallock->proclock == NULL || locallock->lock == NULL)
{
LOCKMODE lockmode = locallock->tag.mode;
Oid relid;
/* /*
* If the LOCALLOCK entry is unused, we must've run out of shared * If the LOCALLOCK entry is unused, we must've run out of shared
* memory while trying to set up this lock. Just forget the local * memory while trying to set up this lock. Just forget the local
...@@ -1846,12 +1848,52 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks) ...@@ -1846,12 +1848,52 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
continue; continue;
} }
/* Ignore items that are not of the lockmethod to be removed */
if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
continue;
/*
* If we are asked to release all locks, we can just zap the entry.
* Otherwise, must scan to see if there are session locks. We assume
* there is at most one lockOwners entry for session locks.
*/
if (!allLocks)
{
LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
/* If it's above array position 0, move it down to 0 */
for (i = locallock->numLockOwners - 1; i > 0; i--)
{
if (lockOwners[i].owner == NULL)
{
lockOwners[0] = lockOwners[i];
break;
}
}
if (locallock->numLockOwners > 0 &&
lockOwners[0].owner == NULL &&
lockOwners[0].nLocks > 0)
{
/* Fix the locallock to show just the session locks */
locallock->nLocks = lockOwners[0].nLocks;
locallock->numLockOwners = 1;
/* We aren't deleting this locallock, so done */
continue;
}
}
/* /*
* Otherwise, we should be dealing with a lock acquired via the * If the lock or proclock pointers are NULL, this lock was taken via
* fast-path. If not, we've got trouble. * the relation fast-path.
*/ */
if (!FastPathTag(&locallock->tag.lock) if (locallock->proclock == NULL || locallock->lock == NULL)
|| !FastPathWeakMode(lockmode)) {
LOCKMODE lockmode = locallock->tag.mode;
Oid relid;
/* Verify that a fast-path lock is what we've got. */
if (!EligibleForRelationFastPath(&locallock->tag.lock, lockmode))
elog(PANIC, "locallock table corrupted"); elog(PANIC, "locallock table corrupted");
/* /*
...@@ -1894,41 +1936,6 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks) ...@@ -1894,41 +1936,6 @@ LockReleaseAll(LOCKMETHODID lockmethodid, bool allLocks)
continue; continue;
} }
/* Ignore items that are not of the lockmethod to be removed */
if (LOCALLOCK_LOCKMETHOD(*locallock) != lockmethodid)
continue;
/*
* If we are asked to release all locks, we can just zap the entry.
* Otherwise, must scan to see if there are session locks. We assume
* there is at most one lockOwners entry for session locks.
*/
if (!allLocks)
{
LOCALLOCKOWNER *lockOwners = locallock->lockOwners;
/* If it's above array position 0, move it down to 0 */
for (i = locallock->numLockOwners - 1; i > 0; i--)
{
if (lockOwners[i].owner == NULL)
{
lockOwners[0] = lockOwners[i];
break;
}
}
if (locallock->numLockOwners > 0 &&
lockOwners[0].owner == NULL &&
lockOwners[0].nLocks > 0)
{
/* Fix the locallock to show just the session locks */
locallock->nLocks = lockOwners[0].nLocks;
locallock->numLockOwners = 1;
/* We aren't deleting this locallock, so done */
continue;
}
}
/* Mark the proclock to show we need to release this lockmode */ /* Mark the proclock to show we need to release this lockmode */
if (locallock->nLocks > 0) if (locallock->nLocks > 0)
locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode); locallock->proclock->releaseMask |= LOCKBIT_ON(locallock->tag.mode);
...@@ -2481,10 +2488,10 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode) ...@@ -2481,10 +2488,10 @@ GetLockConflicts(const LOCKTAG *locktag, LOCKMODE lockmode)
/* /*
* Fast path locks might not have been entered in the primary lock table. * Fast path locks might not have been entered in the primary lock table.
* But only strong locks can conflict with anything that might have been * If the lock we're dealing with could conflict with such a lock, we must
* taken via the fast-path mechanism. * examine each backend's fast-path array for conflicts.
*/ */
if (FastPathTag(locktag) && FastPathStrongMode(lockmode)) if (ConflictsWithRelationFastPath(locktag, lockmode))
{ {
int i; int i;
Oid relid = locktag->locktag_field2; Oid relid = locktag->locktag_field2;
...@@ -2722,7 +2729,7 @@ LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc, ...@@ -2722,7 +2729,7 @@ LockRefindAndRelease(LockMethod lockMethodTable, PGPROC *proc,
* Decrement strong lock count. This logic is needed only for 2PC. * Decrement strong lock count. This logic is needed only for 2PC.
*/ */
if (decrement_strong_lock_count if (decrement_strong_lock_count
&& FastPathTag(&lock->tag) && FastPathStrongMode(lockmode)) && ConflictsWithRelationFastPath(&lock->tag, lockmode))
{ {
uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode); uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
SpinLockAcquire(&FastPathStrongRelationLocks->mutex); SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
...@@ -3604,7 +3611,7 @@ lock_twophase_recover(TransactionId xid, uint16 info, ...@@ -3604,7 +3611,7 @@ lock_twophase_recover(TransactionId xid, uint16 info,
* Bump strong lock count, to make sure any fast-path lock requests won't * Bump strong lock count, to make sure any fast-path lock requests won't
* be granted without consulting the primary lock table. * be granted without consulting the primary lock table.
*/ */
if (FastPathTag(&lock->tag) && FastPathStrongMode(lockmode)) if (ConflictsWithRelationFastPath(&lock->tag, lockmode))
{ {
uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode); uint32 fasthashcode = FastPathStrongLockHashPartition(hashcode);
SpinLockAcquire(&FastPathStrongRelationLocks->mutex); SpinLockAcquire(&FastPathStrongRelationLocks->mutex);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment