Commit 195f1642 authored by Tom Lane's avatar Tom Lane

Get rid of the SpinLockAcquire/SpinLockAcquire_NoHoldoff distinction

in favor of having just one set of macros that don't do HOLD/RESUME_INTERRUPTS
(hence, these correspond to the old SpinLockAcquire_NoHoldoff case).
Given our coding rules for spinlock use, there is no reason to allow
CHECK_FOR_INTERRUPTS to be done while holding a spinlock, and also there
is no situation where ImmediateInterruptOK will be true while holding a
spinlock.  Therefore doing HOLD/RESUME_INTERRUPTS while taking/releasing a
spinlock is just a waste of cycles.  Qingqing Zhou and Tom Lane.
parent e135d963
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
* $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.224 2005/12/28 23:22:50 tgl Exp $ * $PostgreSQL: pgsql/src/backend/access/transam/xlog.c,v 1.225 2005/12/29 18:08:05 tgl Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
...@@ -695,10 +695,10 @@ begin:; ...@@ -695,10 +695,10 @@ begin:;
/* use volatile pointer to prevent code rearrangement */ /* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl; volatile XLogCtlData *xlogctl = XLogCtl;
SpinLockAcquire_NoHoldoff(&xlogctl->info_lck); SpinLockAcquire(&xlogctl->info_lck);
LogwrtRqst = xlogctl->LogwrtRqst; LogwrtRqst = xlogctl->LogwrtRqst;
LogwrtResult = xlogctl->LogwrtResult; LogwrtResult = xlogctl->LogwrtResult;
SpinLockRelease_NoHoldoff(&xlogctl->info_lck); SpinLockRelease(&xlogctl->info_lck);
} }
/* /*
...@@ -940,13 +940,13 @@ begin:; ...@@ -940,13 +940,13 @@ begin:;
/* use volatile pointer to prevent code rearrangement */ /* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl; volatile XLogCtlData *xlogctl = XLogCtl;
SpinLockAcquire_NoHoldoff(&xlogctl->info_lck); SpinLockAcquire(&xlogctl->info_lck);
/* advance global request to include new block(s) */ /* advance global request to include new block(s) */
if (XLByteLT(xlogctl->LogwrtRqst.Write, WriteRqst)) if (XLByteLT(xlogctl->LogwrtRqst.Write, WriteRqst))
xlogctl->LogwrtRqst.Write = WriteRqst; xlogctl->LogwrtRqst.Write = WriteRqst;
/* update local result copy while I have the chance */ /* update local result copy while I have the chance */
LogwrtResult = xlogctl->LogwrtResult; LogwrtResult = xlogctl->LogwrtResult;
SpinLockRelease_NoHoldoff(&xlogctl->info_lck); SpinLockRelease(&xlogctl->info_lck);
} }
ProcLastRecEnd = RecPtr; ProcLastRecEnd = RecPtr;
...@@ -1175,11 +1175,11 @@ AdvanceXLInsertBuffer(void) ...@@ -1175,11 +1175,11 @@ AdvanceXLInsertBuffer(void)
/* use volatile pointer to prevent code rearrangement */ /* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl; volatile XLogCtlData *xlogctl = XLogCtl;
SpinLockAcquire_NoHoldoff(&xlogctl->info_lck); SpinLockAcquire(&xlogctl->info_lck);
if (XLByteLT(xlogctl->LogwrtRqst.Write, FinishedPageRqstPtr)) if (XLByteLT(xlogctl->LogwrtRqst.Write, FinishedPageRqstPtr))
xlogctl->LogwrtRqst.Write = FinishedPageRqstPtr; xlogctl->LogwrtRqst.Write = FinishedPageRqstPtr;
LogwrtResult = xlogctl->LogwrtResult; LogwrtResult = xlogctl->LogwrtResult;
SpinLockRelease_NoHoldoff(&xlogctl->info_lck); SpinLockRelease(&xlogctl->info_lck);
} }
update_needed = false; /* Did the shared-request update */ update_needed = false; /* Did the shared-request update */
...@@ -1560,13 +1560,13 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) ...@@ -1560,13 +1560,13 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
/* use volatile pointer to prevent code rearrangement */ /* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl; volatile XLogCtlData *xlogctl = XLogCtl;
SpinLockAcquire_NoHoldoff(&xlogctl->info_lck); SpinLockAcquire(&xlogctl->info_lck);
xlogctl->LogwrtResult = LogwrtResult; xlogctl->LogwrtResult = LogwrtResult;
if (XLByteLT(xlogctl->LogwrtRqst.Write, LogwrtResult.Write)) if (XLByteLT(xlogctl->LogwrtRqst.Write, LogwrtResult.Write))
xlogctl->LogwrtRqst.Write = LogwrtResult.Write; xlogctl->LogwrtRqst.Write = LogwrtResult.Write;
if (XLByteLT(xlogctl->LogwrtRqst.Flush, LogwrtResult.Flush)) if (XLByteLT(xlogctl->LogwrtRqst.Flush, LogwrtResult.Flush))
xlogctl->LogwrtRqst.Flush = LogwrtResult.Flush; xlogctl->LogwrtRqst.Flush = LogwrtResult.Flush;
SpinLockRelease_NoHoldoff(&xlogctl->info_lck); SpinLockRelease(&xlogctl->info_lck);
} }
Write->LogwrtResult = LogwrtResult; Write->LogwrtResult = LogwrtResult;
...@@ -1618,11 +1618,11 @@ XLogFlush(XLogRecPtr record) ...@@ -1618,11 +1618,11 @@ XLogFlush(XLogRecPtr record)
/* use volatile pointer to prevent code rearrangement */ /* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl; volatile XLogCtlData *xlogctl = XLogCtl;
SpinLockAcquire_NoHoldoff(&xlogctl->info_lck); SpinLockAcquire(&xlogctl->info_lck);
if (XLByteLT(WriteRqstPtr, xlogctl->LogwrtRqst.Write)) if (XLByteLT(WriteRqstPtr, xlogctl->LogwrtRqst.Write))
WriteRqstPtr = xlogctl->LogwrtRqst.Write; WriteRqstPtr = xlogctl->LogwrtRqst.Write;
LogwrtResult = xlogctl->LogwrtResult; LogwrtResult = xlogctl->LogwrtResult;
SpinLockRelease_NoHoldoff(&xlogctl->info_lck); SpinLockRelease(&xlogctl->info_lck);
} }
/* done already? */ /* done already? */
...@@ -4984,10 +4984,10 @@ GetRedoRecPtr(void) ...@@ -4984,10 +4984,10 @@ GetRedoRecPtr(void)
/* use volatile pointer to prevent code rearrangement */ /* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl; volatile XLogCtlData *xlogctl = XLogCtl;
SpinLockAcquire_NoHoldoff(&xlogctl->info_lck); SpinLockAcquire(&xlogctl->info_lck);
Assert(XLByteLE(RedoRecPtr, xlogctl->Insert.RedoRecPtr)); Assert(XLByteLE(RedoRecPtr, xlogctl->Insert.RedoRecPtr));
RedoRecPtr = xlogctl->Insert.RedoRecPtr; RedoRecPtr = xlogctl->Insert.RedoRecPtr;
SpinLockRelease_NoHoldoff(&xlogctl->info_lck); SpinLockRelease(&xlogctl->info_lck);
return RedoRecPtr; return RedoRecPtr;
} }
...@@ -5165,9 +5165,9 @@ CreateCheckPoint(bool shutdown, bool force) ...@@ -5165,9 +5165,9 @@ CreateCheckPoint(bool shutdown, bool force)
/* use volatile pointer to prevent code rearrangement */ /* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl; volatile XLogCtlData *xlogctl = XLogCtl;
SpinLockAcquire_NoHoldoff(&xlogctl->info_lck); SpinLockAcquire(&xlogctl->info_lck);
RedoRecPtr = xlogctl->Insert.RedoRecPtr = checkPoint.redo; RedoRecPtr = xlogctl->Insert.RedoRecPtr = checkPoint.redo;
SpinLockRelease_NoHoldoff(&xlogctl->info_lck); SpinLockRelease(&xlogctl->info_lck);
} }
/* /*
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.200 2005/11/22 18:17:19 momjian Exp $ * $PostgreSQL: pgsql/src/backend/storage/buffer/bufmgr.c,v 1.201 2005/12/29 18:08:05 tgl Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
...@@ -442,7 +442,7 @@ BufferAlloc(Relation reln, ...@@ -442,7 +442,7 @@ BufferAlloc(Relation reln,
/* /*
* Need to lock the buffer header too in order to change its tag. * Need to lock the buffer header too in order to change its tag.
*/ */
LockBufHdr_NoHoldoff(buf); LockBufHdr(buf);
/* /*
* Somebody could have pinned or re-dirtied the buffer while we were * Somebody could have pinned or re-dirtied the buffer while we were
...@@ -453,7 +453,7 @@ BufferAlloc(Relation reln, ...@@ -453,7 +453,7 @@ BufferAlloc(Relation reln,
if (buf->refcount == 1 && !(buf->flags & BM_DIRTY)) if (buf->refcount == 1 && !(buf->flags & BM_DIRTY))
break; break;
UnlockBufHdr_NoHoldoff(buf); UnlockBufHdr(buf);
BufTableDelete(&newTag); BufTableDelete(&newTag);
LWLockRelease(BufMappingLock); LWLockRelease(BufMappingLock);
UnpinBuffer(buf, true, false /* evidently recently used */ ); UnpinBuffer(buf, true, false /* evidently recently used */ );
...@@ -473,7 +473,7 @@ BufferAlloc(Relation reln, ...@@ -473,7 +473,7 @@ BufferAlloc(Relation reln,
buf->flags |= BM_TAG_VALID; buf->flags |= BM_TAG_VALID;
buf->usage_count = 0; buf->usage_count = 0;
UnlockBufHdr_NoHoldoff(buf); UnlockBufHdr(buf);
if (oldFlags & BM_TAG_VALID) if (oldFlags & BM_TAG_VALID)
BufTableDelete(&oldTag); BufTableDelete(&oldTag);
...@@ -529,13 +529,13 @@ retry: ...@@ -529,13 +529,13 @@ retry:
*/ */
LWLockAcquire(BufMappingLock, LW_EXCLUSIVE); LWLockAcquire(BufMappingLock, LW_EXCLUSIVE);
/* Re-lock the buffer header (NoHoldoff since we have an LWLock) */ /* Re-lock the buffer header */
LockBufHdr_NoHoldoff(buf); LockBufHdr(buf);
/* If it's changed while we were waiting for lock, do nothing */ /* If it's changed while we were waiting for lock, do nothing */
if (!BUFFERTAGS_EQUAL(buf->tag, oldTag)) if (!BUFFERTAGS_EQUAL(buf->tag, oldTag))
{ {
UnlockBufHdr_NoHoldoff(buf); UnlockBufHdr(buf);
LWLockRelease(BufMappingLock); LWLockRelease(BufMappingLock);
return; return;
} }
...@@ -551,7 +551,7 @@ retry: ...@@ -551,7 +551,7 @@ retry:
*/ */
if (buf->refcount != 0) if (buf->refcount != 0)
{ {
UnlockBufHdr_NoHoldoff(buf); UnlockBufHdr(buf);
LWLockRelease(BufMappingLock); LWLockRelease(BufMappingLock);
/* safety check: should definitely not be our *own* pin */ /* safety check: should definitely not be our *own* pin */
if (PrivateRefCount[buf->buf_id] != 0) if (PrivateRefCount[buf->buf_id] != 0)
...@@ -569,7 +569,7 @@ retry: ...@@ -569,7 +569,7 @@ retry:
buf->flags = 0; buf->flags = 0;
buf->usage_count = 0; buf->usage_count = 0;
UnlockBufHdr_NoHoldoff(buf); UnlockBufHdr(buf);
/* /*
* Remove the buffer from the lookup hashtable, if it was in there. * Remove the buffer from the lookup hashtable, if it was in there.
...@@ -729,15 +729,10 @@ PinBuffer(volatile BufferDesc *buf) ...@@ -729,15 +729,10 @@ PinBuffer(volatile BufferDesc *buf)
if (PrivateRefCount[b] == 0) if (PrivateRefCount[b] == 0)
{ {
/* LockBufHdr(buf);
* Use NoHoldoff here because we don't want the unlock to be a
* potential place to honor a QueryCancel request. (The caller should
* be holding off interrupts anyway.)
*/
LockBufHdr_NoHoldoff(buf);
buf->refcount++; buf->refcount++;
result = (buf->flags & BM_VALID) != 0; result = (buf->flags & BM_VALID) != 0;
UnlockBufHdr_NoHoldoff(buf); UnlockBufHdr(buf);
} }
else else
{ {
...@@ -766,14 +761,11 @@ PinBuffer_Locked(volatile BufferDesc *buf) ...@@ -766,14 +761,11 @@ PinBuffer_Locked(volatile BufferDesc *buf)
if (PrivateRefCount[b] == 0) if (PrivateRefCount[b] == 0)
buf->refcount++; buf->refcount++;
/* NoHoldoff since we mustn't accept cancel interrupt here */ UnlockBufHdr(buf);
UnlockBufHdr_NoHoldoff(buf);
PrivateRefCount[b]++; PrivateRefCount[b]++;
Assert(PrivateRefCount[b] > 0); Assert(PrivateRefCount[b] > 0);
ResourceOwnerRememberBuffer(CurrentResourceOwner, ResourceOwnerRememberBuffer(CurrentResourceOwner,
BufferDescriptorGetBuffer(buf)); BufferDescriptorGetBuffer(buf));
/* Now we can accept cancel */
RESUME_INTERRUPTS();
} }
/* /*
...@@ -811,8 +803,7 @@ UnpinBuffer(volatile BufferDesc *buf, bool fixOwner, bool normalAccess) ...@@ -811,8 +803,7 @@ UnpinBuffer(volatile BufferDesc *buf, bool fixOwner, bool normalAccess)
Assert(!LWLockHeldByMe(buf->content_lock)); Assert(!LWLockHeldByMe(buf->content_lock));
Assert(!LWLockHeldByMe(buf->io_in_progress_lock)); Assert(!LWLockHeldByMe(buf->io_in_progress_lock));
/* NoHoldoff ensures we don't lose control before sending signal */ LockBufHdr(buf);
LockBufHdr_NoHoldoff(buf);
/* Decrement the shared reference count */ /* Decrement the shared reference count */
Assert(buf->refcount > 0); Assert(buf->refcount > 0);
...@@ -841,11 +832,11 @@ UnpinBuffer(volatile BufferDesc *buf, bool fixOwner, bool normalAccess) ...@@ -841,11 +832,11 @@ UnpinBuffer(volatile BufferDesc *buf, bool fixOwner, bool normalAccess)
int wait_backend_pid = buf->wait_backend_pid; int wait_backend_pid = buf->wait_backend_pid;
buf->flags &= ~BM_PIN_COUNT_WAITER; buf->flags &= ~BM_PIN_COUNT_WAITER;
UnlockBufHdr_NoHoldoff(buf); UnlockBufHdr(buf);
ProcSendSignal(wait_backend_pid); ProcSendSignal(wait_backend_pid);
} }
else else
UnlockBufHdr_NoHoldoff(buf); UnlockBufHdr(buf);
/* /*
* If VACUUM is releasing an otherwise-unused buffer, send it to the * If VACUUM is releasing an otherwise-unused buffer, send it to the
...@@ -1300,9 +1291,9 @@ FlushBuffer(volatile BufferDesc *buf, SMgrRelation reln) ...@@ -1300,9 +1291,9 @@ FlushBuffer(volatile BufferDesc *buf, SMgrRelation reln)
*/ */
/* To check if block content changes while flushing. - vadim 01/17/97 */ /* To check if block content changes while flushing. - vadim 01/17/97 */
LockBufHdr_NoHoldoff(buf); LockBufHdr(buf);
buf->flags &= ~BM_JUST_DIRTIED; buf->flags &= ~BM_JUST_DIRTIED;
UnlockBufHdr_NoHoldoff(buf); UnlockBufHdr(buf);
smgrwrite(reln, smgrwrite(reln,
buf->tag.blockNum, buf->tag.blockNum,
...@@ -1693,7 +1684,7 @@ UnlockBuffers(void) ...@@ -1693,7 +1684,7 @@ UnlockBuffers(void)
{ {
HOLD_INTERRUPTS(); /* don't want to die() partway through... */ HOLD_INTERRUPTS(); /* don't want to die() partway through... */
LockBufHdr_NoHoldoff(buf); LockBufHdr(buf);
/* /*
* Don't complain if flag bit not set; it could have been reset but we * Don't complain if flag bit not set; it could have been reset but we
...@@ -1703,7 +1694,7 @@ UnlockBuffers(void) ...@@ -1703,7 +1694,7 @@ UnlockBuffers(void)
buf->wait_backend_pid == MyProcPid) buf->wait_backend_pid == MyProcPid)
buf->flags &= ~BM_PIN_COUNT_WAITER; buf->flags &= ~BM_PIN_COUNT_WAITER;
UnlockBufHdr_NoHoldoff(buf); UnlockBufHdr(buf);
ProcCancelWaitForSignal(); ProcCancelWaitForSignal();
...@@ -1741,9 +1732,9 @@ LockBuffer(Buffer buffer, int mode) ...@@ -1741,9 +1732,9 @@ LockBuffer(Buffer buffer, int mode)
* that it's critical to set dirty bit *before* logging changes with * that it's critical to set dirty bit *before* logging changes with
* XLogInsert() - see comments in SyncOneBuffer(). * XLogInsert() - see comments in SyncOneBuffer().
*/ */
LockBufHdr_NoHoldoff(buf); LockBufHdr(buf);
buf->flags |= (BM_DIRTY | BM_JUST_DIRTIED); buf->flags |= (BM_DIRTY | BM_JUST_DIRTIED);
UnlockBufHdr_NoHoldoff(buf); UnlockBufHdr(buf);
} }
else else
elog(ERROR, "unrecognized buffer lock mode: %d", mode); elog(ERROR, "unrecognized buffer lock mode: %d", mode);
...@@ -1773,9 +1764,9 @@ ConditionalLockBuffer(Buffer buffer) ...@@ -1773,9 +1764,9 @@ ConditionalLockBuffer(Buffer buffer)
* that it's critical to set dirty bit *before* logging changes with * that it's critical to set dirty bit *before* logging changes with
* XLogInsert() - see comments in SyncOneBuffer(). * XLogInsert() - see comments in SyncOneBuffer().
*/ */
LockBufHdr_NoHoldoff(buf); LockBufHdr(buf);
buf->flags |= (BM_DIRTY | BM_JUST_DIRTIED); buf->flags |= (BM_DIRTY | BM_JUST_DIRTIED);
UnlockBufHdr_NoHoldoff(buf); UnlockBufHdr(buf);
return true; return true;
} }
...@@ -1827,25 +1818,25 @@ LockBufferForCleanup(Buffer buffer) ...@@ -1827,25 +1818,25 @@ LockBufferForCleanup(Buffer buffer)
{ {
/* Try to acquire lock */ /* Try to acquire lock */
LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE); LockBuffer(buffer, BUFFER_LOCK_EXCLUSIVE);
LockBufHdr_NoHoldoff(bufHdr); LockBufHdr(bufHdr);
Assert(bufHdr->refcount > 0); Assert(bufHdr->refcount > 0);
if (bufHdr->refcount == 1) if (bufHdr->refcount == 1)
{ {
/* Successfully acquired exclusive lock with pincount 1 */ /* Successfully acquired exclusive lock with pincount 1 */
UnlockBufHdr_NoHoldoff(bufHdr); UnlockBufHdr(bufHdr);
return; return;
} }
/* Failed, so mark myself as waiting for pincount 1 */ /* Failed, so mark myself as waiting for pincount 1 */
if (bufHdr->flags & BM_PIN_COUNT_WAITER) if (bufHdr->flags & BM_PIN_COUNT_WAITER)
{ {
UnlockBufHdr_NoHoldoff(bufHdr); UnlockBufHdr(bufHdr);
LockBuffer(buffer, BUFFER_LOCK_UNLOCK); LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
elog(ERROR, "multiple backends attempting to wait for pincount 1"); elog(ERROR, "multiple backends attempting to wait for pincount 1");
} }
bufHdr->wait_backend_pid = MyProcPid; bufHdr->wait_backend_pid = MyProcPid;
bufHdr->flags |= BM_PIN_COUNT_WAITER; bufHdr->flags |= BM_PIN_COUNT_WAITER;
PinCountWaitBuf = bufHdr; PinCountWaitBuf = bufHdr;
UnlockBufHdr_NoHoldoff(bufHdr); UnlockBufHdr(bufHdr);
LockBuffer(buffer, BUFFER_LOCK_UNLOCK); LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
/* Wait to be signaled by UnpinBuffer() */ /* Wait to be signaled by UnpinBuffer() */
ProcWaitForSignal(); ProcWaitForSignal();
...@@ -1926,8 +1917,7 @@ StartBufferIO(volatile BufferDesc *buf, bool forInput) ...@@ -1926,8 +1917,7 @@ StartBufferIO(volatile BufferDesc *buf, bool forInput)
*/ */
LWLockAcquire(buf->io_in_progress_lock, LW_EXCLUSIVE); LWLockAcquire(buf->io_in_progress_lock, LW_EXCLUSIVE);
/* NoHoldoff is OK since we now have an LWLock */ LockBufHdr(buf);
LockBufHdr_NoHoldoff(buf);
if (!(buf->flags & BM_IO_IN_PROGRESS)) if (!(buf->flags & BM_IO_IN_PROGRESS))
break; break;
...@@ -1938,7 +1928,7 @@ StartBufferIO(volatile BufferDesc *buf, bool forInput) ...@@ -1938,7 +1928,7 @@ StartBufferIO(volatile BufferDesc *buf, bool forInput)
* an error (see AbortBufferIO). If that's the case, we must wait for * an error (see AbortBufferIO). If that's the case, we must wait for
* him to get unwedged. * him to get unwedged.
*/ */
UnlockBufHdr_NoHoldoff(buf); UnlockBufHdr(buf);
LWLockRelease(buf->io_in_progress_lock); LWLockRelease(buf->io_in_progress_lock);
WaitIO(buf); WaitIO(buf);
} }
...@@ -1948,14 +1938,14 @@ StartBufferIO(volatile BufferDesc *buf, bool forInput) ...@@ -1948,14 +1938,14 @@ StartBufferIO(volatile BufferDesc *buf, bool forInput)
if (forInput ? (buf->flags & BM_VALID) : !(buf->flags & BM_DIRTY)) if (forInput ? (buf->flags & BM_VALID) : !(buf->flags & BM_DIRTY))
{ {
/* someone else already did the I/O */ /* someone else already did the I/O */
UnlockBufHdr_NoHoldoff(buf); UnlockBufHdr(buf);
LWLockRelease(buf->io_in_progress_lock); LWLockRelease(buf->io_in_progress_lock);
return false; return false;
} }
buf->flags |= BM_IO_IN_PROGRESS; buf->flags |= BM_IO_IN_PROGRESS;
UnlockBufHdr_NoHoldoff(buf); UnlockBufHdr(buf);
InProgressBuf = buf; InProgressBuf = buf;
IsForInput = forInput; IsForInput = forInput;
...@@ -1986,8 +1976,7 @@ TerminateBufferIO(volatile BufferDesc *buf, bool clear_dirty, ...@@ -1986,8 +1976,7 @@ TerminateBufferIO(volatile BufferDesc *buf, bool clear_dirty,
{ {
Assert(buf == InProgressBuf); Assert(buf == InProgressBuf);
/* NoHoldoff is OK since we must have an LWLock */ LockBufHdr(buf);
LockBufHdr_NoHoldoff(buf);
Assert(buf->flags & BM_IO_IN_PROGRESS); Assert(buf->flags & BM_IO_IN_PROGRESS);
buf->flags &= ~(BM_IO_IN_PROGRESS | BM_IO_ERROR); buf->flags &= ~(BM_IO_IN_PROGRESS | BM_IO_ERROR);
...@@ -1995,7 +1984,7 @@ TerminateBufferIO(volatile BufferDesc *buf, bool clear_dirty, ...@@ -1995,7 +1984,7 @@ TerminateBufferIO(volatile BufferDesc *buf, bool clear_dirty,
buf->flags &= ~BM_DIRTY; buf->flags &= ~BM_DIRTY;
buf->flags |= set_flag_bits; buf->flags |= set_flag_bits;
UnlockBufHdr_NoHoldoff(buf); UnlockBufHdr(buf);
InProgressBuf = NULL; InProgressBuf = NULL;
...@@ -2026,15 +2015,14 @@ AbortBufferIO(void) ...@@ -2026,15 +2015,14 @@ AbortBufferIO(void)
*/ */
LWLockAcquire(buf->io_in_progress_lock, LW_EXCLUSIVE); LWLockAcquire(buf->io_in_progress_lock, LW_EXCLUSIVE);
/* NoHoldoff is OK since we now have an LWLock */ LockBufHdr(buf);
LockBufHdr_NoHoldoff(buf);
Assert(buf->flags & BM_IO_IN_PROGRESS); Assert(buf->flags & BM_IO_IN_PROGRESS);
if (IsForInput) if (IsForInput)
{ {
Assert(!(buf->flags & BM_DIRTY)); Assert(!(buf->flags & BM_DIRTY));
/* We'd better not think buffer is valid yet */ /* We'd better not think buffer is valid yet */
Assert(!(buf->flags & BM_VALID)); Assert(!(buf->flags & BM_VALID));
UnlockBufHdr_NoHoldoff(buf); UnlockBufHdr(buf);
} }
else else
{ {
...@@ -2042,7 +2030,7 @@ AbortBufferIO(void) ...@@ -2042,7 +2030,7 @@ AbortBufferIO(void)
sv_flags = buf->flags; sv_flags = buf->flags;
Assert(sv_flags & BM_DIRTY); Assert(sv_flags & BM_DIRTY);
UnlockBufHdr_NoHoldoff(buf); UnlockBufHdr(buf);
/* Issue notice if this is not the first failure... */ /* Issue notice if this is not the first failure... */
if (sv_flags & BM_IO_ERROR) if (sv_flags & BM_IO_ERROR)
{ {
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.88 2005/11/22 18:17:20 momjian Exp $ * $PostgreSQL: pgsql/src/backend/storage/ipc/shmem.c,v 1.89 2005/12/29 18:08:05 tgl Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
...@@ -58,6 +58,7 @@ ...@@ -58,6 +58,7 @@
#include "postgres.h" #include "postgres.h"
#include "access/transam.h" #include "access/transam.h"
#include "miscadmin.h"
#include "storage/pg_shmem.h" #include "storage/pg_shmem.h"
#include "storage/spin.h" #include "storage/spin.h"
#include "utils/tqual.h" #include "utils/tqual.h"
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
* IDENTIFICATION * IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.36 2005/12/11 21:02:18 tgl Exp $ * $PostgreSQL: pgsql/src/backend/storage/lmgr/lwlock.c,v 1.37 2005/12/29 18:08:05 tgl Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "access/clog.h" #include "access/clog.h"
#include "access/multixact.h" #include "access/multixact.h"
#include "access/subtrans.h" #include "access/subtrans.h"
#include "miscadmin.h"
#include "storage/lwlock.h" #include "storage/lwlock.h"
#include "storage/proc.h" #include "storage/proc.h"
#include "storage/spin.h" #include "storage/spin.h"
...@@ -301,7 +302,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode) ...@@ -301,7 +302,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
bool mustwait; bool mustwait;
/* Acquire mutex. Time spent holding mutex should be short! */ /* Acquire mutex. Time spent holding mutex should be short! */
SpinLockAcquire_NoHoldoff(&lock->mutex); SpinLockAcquire(&lock->mutex);
/* If retrying, allow LWLockRelease to release waiters again */ /* If retrying, allow LWLockRelease to release waiters again */
if (retry) if (retry)
...@@ -340,7 +341,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode) ...@@ -340,7 +341,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
* memory initialization. * memory initialization.
*/ */
if (proc == NULL) if (proc == NULL)
elog(FATAL, "cannot wait without a PGPROC structure"); elog(PANIC, "cannot wait without a PGPROC structure");
proc->lwWaiting = true; proc->lwWaiting = true;
proc->lwExclusive = (mode == LW_EXCLUSIVE); proc->lwExclusive = (mode == LW_EXCLUSIVE);
...@@ -352,7 +353,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode) ...@@ -352,7 +353,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
lock->tail = proc; lock->tail = proc;
/* Can release the mutex now */ /* Can release the mutex now */
SpinLockRelease_NoHoldoff(&lock->mutex); SpinLockRelease(&lock->mutex);
/* /*
* Wait until awakened. * Wait until awakened.
...@@ -384,7 +385,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode) ...@@ -384,7 +385,7 @@ LWLockAcquire(LWLockId lockid, LWLockMode mode)
} }
/* We are done updating shared state of the lock itself. */ /* We are done updating shared state of the lock itself. */
SpinLockRelease_NoHoldoff(&lock->mutex); SpinLockRelease(&lock->mutex);
/* Add lock to list of locks held by this backend */ /* Add lock to list of locks held by this backend */
held_lwlocks[num_held_lwlocks++] = lockid; held_lwlocks[num_held_lwlocks++] = lockid;
...@@ -423,7 +424,7 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode) ...@@ -423,7 +424,7 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
HOLD_INTERRUPTS(); HOLD_INTERRUPTS();
/* Acquire mutex. Time spent holding mutex should be short! */ /* Acquire mutex. Time spent holding mutex should be short! */
SpinLockAcquire_NoHoldoff(&lock->mutex); SpinLockAcquire(&lock->mutex);
/* If I can get the lock, do so quickly. */ /* If I can get the lock, do so quickly. */
if (mode == LW_EXCLUSIVE) if (mode == LW_EXCLUSIVE)
...@@ -448,7 +449,7 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode) ...@@ -448,7 +449,7 @@ LWLockConditionalAcquire(LWLockId lockid, LWLockMode mode)
} }
/* We are done updating shared state of the lock itself. */ /* We are done updating shared state of the lock itself. */
SpinLockRelease_NoHoldoff(&lock->mutex); SpinLockRelease(&lock->mutex);
if (mustwait) if (mustwait)
{ {
...@@ -494,7 +495,7 @@ LWLockRelease(LWLockId lockid) ...@@ -494,7 +495,7 @@ LWLockRelease(LWLockId lockid)
held_lwlocks[i] = held_lwlocks[i + 1]; held_lwlocks[i] = held_lwlocks[i + 1];
/* Acquire mutex. Time spent holding mutex should be short! */ /* Acquire mutex. Time spent holding mutex should be short! */
SpinLockAcquire_NoHoldoff(&lock->mutex); SpinLockAcquire(&lock->mutex);
/* Release my hold on lock */ /* Release my hold on lock */
if (lock->exclusive > 0) if (lock->exclusive > 0)
...@@ -542,7 +543,7 @@ LWLockRelease(LWLockId lockid) ...@@ -542,7 +543,7 @@ LWLockRelease(LWLockId lockid)
} }
/* We are done updating shared state of the lock itself. */ /* We are done updating shared state of the lock itself. */
SpinLockRelease_NoHoldoff(&lock->mutex); SpinLockRelease(&lock->mutex);
/* /*
* Awaken any waiters I removed from the queue. * Awaken any waiters I removed from the queue.
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
* $PostgreSQL: pgsql/src/include/storage/buf_internals.h,v 1.83 2005/11/22 18:17:31 momjian Exp $ * $PostgreSQL: pgsql/src/include/storage/buf_internals.h,v 1.84 2005/12/29 18:08:05 tgl Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
...@@ -138,24 +138,16 @@ typedef struct sbufdesc ...@@ -138,24 +138,16 @@ typedef struct sbufdesc
#define FREENEXT_NOT_IN_LIST (-2) #define FREENEXT_NOT_IN_LIST (-2)
/* /*
* Macros for acquiring/releasing a buffer header's spinlock. The * Macros for acquiring/releasing a shared buffer header's spinlock.
* NoHoldoff cases may be used when we know that we hold some LWLock * Do not apply these to local buffers!
* and therefore interrupts are already held off. Do not apply these
* to local buffers!
* *
* Note: as a general coding rule, if you are using these then you probably * Note: as a general coding rule, if you are using these then you probably
* want to be using a volatile-qualified pointer to the buffer header, to * need to be using a volatile-qualified pointer to the buffer header, to
* ensure that the compiler doesn't rearrange accesses to the header to * ensure that the compiler doesn't rearrange accesses to the header to
* occur before or after the spinlock is acquired/released. * occur before or after the spinlock is acquired/released.
*/ */
#define LockBufHdr(bufHdr) \ #define LockBufHdr(bufHdr) SpinLockAcquire(&(bufHdr)->buf_hdr_lock)
SpinLockAcquire(&(bufHdr)->buf_hdr_lock) #define UnlockBufHdr(bufHdr) SpinLockRelease(&(bufHdr)->buf_hdr_lock)
#define UnlockBufHdr(bufHdr) \
SpinLockRelease(&(bufHdr)->buf_hdr_lock)
#define LockBufHdr_NoHoldoff(bufHdr) \
SpinLockAcquire_NoHoldoff(&(bufHdr)->buf_hdr_lock)
#define UnlockBufHdr_NoHoldoff(bufHdr) \
SpinLockRelease_NoHoldoff(&(bufHdr)->buf_hdr_lock)
/* in buf_init.c */ /* in buf_init.c */
......
...@@ -14,17 +14,9 @@ ...@@ -14,17 +14,9 @@
* Acquire a spinlock, waiting if necessary. * Acquire a spinlock, waiting if necessary.
* Time out and abort() if unable to acquire the lock in a * Time out and abort() if unable to acquire the lock in a
* "reasonable" amount of time --- typically ~ 1 minute. * "reasonable" amount of time --- typically ~ 1 minute.
* Cancel/die interrupts are held off until the lock is released.
* *
* void SpinLockRelease(volatile slock_t *lock) * void SpinLockRelease(volatile slock_t *lock)
* Unlock a previously acquired lock. * Unlock a previously acquired lock.
* Release the cancel/die interrupt holdoff.
*
* void SpinLockAcquire_NoHoldoff(volatile slock_t *lock)
* void SpinLockRelease_NoHoldoff(volatile slock_t *lock)
* Same as above, except no interrupt holdoff processing is done.
* This pair of macros may be used when there is a surrounding
* interrupt holdoff.
* *
* bool SpinLockFree(slock_t *lock) * bool SpinLockFree(slock_t *lock)
* Tests if the lock is free. Returns TRUE if free, FALSE if locked. * Tests if the lock is free. Returns TRUE if free, FALSE if locked.
...@@ -43,14 +35,21 @@ ...@@ -43,14 +35,21 @@
* protects shared data with a spinlock MUST reference that shared * protects shared data with a spinlock MUST reference that shared
* data through a volatile pointer. * data through a volatile pointer.
* *
* Keep in mind the coding rule that spinlocks must not be held for more
* than a few instructions. In particular, we assume it is not possible
* for a CHECK_FOR_INTERRUPTS() to occur while holding a spinlock, and so
* it is not necessary to do HOLD/RESUME_INTERRUPTS() in these macros.
*
* These macros are implemented in terms of hardware-dependent macros * These macros are implemented in terms of hardware-dependent macros
* supplied by s_lock.h. * supplied by s_lock.h. There is not currently any extra functionality
* added by this header, but there has been in the past and may someday
* be again.
* *
* *
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group * Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
* *
* $PostgreSQL: pgsql/src/include/storage/spin.h,v 1.26 2005/10/13 06:17:34 neilc Exp $ * $PostgreSQL: pgsql/src/include/storage/spin.h,v 1.27 2005/12/29 18:08:05 tgl Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
...@@ -58,26 +57,13 @@ ...@@ -58,26 +57,13 @@
#define SPIN_H #define SPIN_H
#include "storage/s_lock.h" #include "storage/s_lock.h"
#include "miscadmin.h"
#define SpinLockInit(lock) S_INIT_LOCK(lock) #define SpinLockInit(lock) S_INIT_LOCK(lock)
#define SpinLockAcquire(lock) \ #define SpinLockAcquire(lock) S_LOCK(lock)
do { \
HOLD_INTERRUPTS(); \
S_LOCK(lock); \
} while (0)
#define SpinLockAcquire_NoHoldoff(lock) S_LOCK(lock)
#define SpinLockRelease(lock) \
do { \
S_UNLOCK(lock); \
RESUME_INTERRUPTS(); \
} while (0)
#define SpinLockRelease_NoHoldoff(lock) S_UNLOCK(lock) #define SpinLockRelease(lock) S_UNLOCK(lock)
#define SpinLockFree(lock) S_LOCK_FREE(lock) #define SpinLockFree(lock) S_LOCK_FREE(lock)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment