Commit 6ba4ecbf authored by Andres Freund's avatar Andres Freund

Remove most volatile qualifiers from xlog.c

For the reason outlined in df4077cd also remove volatile qualifiers
from xlog.c. Some of these uses of volatile have been added after
noticing problems back when spinlocks didn't imply compiler
barriers. So they are a good test - in fact removing the volatiles
breaks when done without the barriers in spinlocks present.

Several uses of volatile remain where they are explicitly used to
access shared memory without locks. These locations are ok with
slightly out of date data, but removing the volatile might lead to the
variables never being reread from memory. These uses could also be
replaced by barriers, but that's a separate change of doubtful value.
parent df4077cd
...@@ -1220,16 +1220,13 @@ begin:; ...@@ -1220,16 +1220,13 @@ begin:;
*/ */
if (StartPos / XLOG_BLCKSZ != EndPos / XLOG_BLCKSZ) if (StartPos / XLOG_BLCKSZ != EndPos / XLOG_BLCKSZ)
{ {
/* use volatile pointer to prevent code rearrangement */ SpinLockAcquire(&XLogCtl->info_lck);
volatile XLogCtlData *xlogctl = XLogCtl;
SpinLockAcquire(&xlogctl->info_lck);
/* advance global request to include new block(s) */ /* advance global request to include new block(s) */
if (xlogctl->LogwrtRqst.Write < EndPos) if (XLogCtl->LogwrtRqst.Write < EndPos)
xlogctl->LogwrtRqst.Write = EndPos; XLogCtl->LogwrtRqst.Write = EndPos;
/* update local result copy while I have the chance */ /* update local result copy while I have the chance */
LogwrtResult = xlogctl->LogwrtResult; LogwrtResult = XLogCtl->LogwrtResult;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
} }
/* /*
...@@ -1324,7 +1321,7 @@ static void ...@@ -1324,7 +1321,7 @@ static void
ReserveXLogInsertLocation(int size, XLogRecPtr *StartPos, XLogRecPtr *EndPos, ReserveXLogInsertLocation(int size, XLogRecPtr *StartPos, XLogRecPtr *EndPos,
XLogRecPtr *PrevPtr) XLogRecPtr *PrevPtr)
{ {
volatile XLogCtlInsert *Insert = &XLogCtl->Insert; XLogCtlInsert *Insert = &XLogCtl->Insert;
uint64 startbytepos; uint64 startbytepos;
uint64 endbytepos; uint64 endbytepos;
uint64 prevbytepos; uint64 prevbytepos;
...@@ -1379,7 +1376,7 @@ ReserveXLogInsertLocation(int size, XLogRecPtr *StartPos, XLogRecPtr *EndPos, ...@@ -1379,7 +1376,7 @@ ReserveXLogInsertLocation(int size, XLogRecPtr *StartPos, XLogRecPtr *EndPos,
static bool static bool
ReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr) ReserveXLogSwitch(XLogRecPtr *StartPos, XLogRecPtr *EndPos, XLogRecPtr *PrevPtr)
{ {
volatile XLogCtlInsert *Insert = &XLogCtl->Insert; XLogCtlInsert *Insert = &XLogCtl->Insert;
uint64 startbytepos; uint64 startbytepos;
uint64 endbytepos; uint64 endbytepos;
uint64 prevbytepos; uint64 prevbytepos;
...@@ -1697,7 +1694,7 @@ WaitXLogInsertionsToFinish(XLogRecPtr upto) ...@@ -1697,7 +1694,7 @@ WaitXLogInsertionsToFinish(XLogRecPtr upto)
uint64 bytepos; uint64 bytepos;
XLogRecPtr reservedUpto; XLogRecPtr reservedUpto;
XLogRecPtr finishedUpto; XLogRecPtr finishedUpto;
volatile XLogCtlInsert *Insert = &XLogCtl->Insert; XLogCtlInsert *Insert = &XLogCtl->Insert;
int i; int i;
if (MyProc == NULL) if (MyProc == NULL)
...@@ -2132,16 +2129,11 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic) ...@@ -2132,16 +2129,11 @@ AdvanceXLInsertBuffer(XLogRecPtr upto, bool opportunistic)
break; break;
/* Before waiting, get info_lck and update LogwrtResult */ /* Before waiting, get info_lck and update LogwrtResult */
{ SpinLockAcquire(&XLogCtl->info_lck);
/* use volatile pointer to prevent code rearrangement */ if (XLogCtl->LogwrtRqst.Write < OldPageRqstPtr)
volatile XLogCtlData *xlogctl = XLogCtl; XLogCtl->LogwrtRqst.Write = OldPageRqstPtr;
LogwrtResult = XLogCtl->LogwrtResult;
SpinLockAcquire(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
if (xlogctl->LogwrtRqst.Write < OldPageRqstPtr)
xlogctl->LogwrtRqst.Write = OldPageRqstPtr;
LogwrtResult = xlogctl->LogwrtResult;
SpinLockRelease(&xlogctl->info_lck);
}
/* /*
* Now that we have an up-to-date LogwrtResult value, see if we * Now that we have an up-to-date LogwrtResult value, see if we
...@@ -2549,16 +2541,13 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible) ...@@ -2549,16 +2541,13 @@ XLogWrite(XLogwrtRqst WriteRqst, bool flexible)
* code in a couple of places. * code in a couple of places.
*/ */
{ {
/* use volatile pointer to prevent code rearrangement */ SpinLockAcquire(&XLogCtl->info_lck);
volatile XLogCtlData *xlogctl = XLogCtl; XLogCtl->LogwrtResult = LogwrtResult;
if (XLogCtl->LogwrtRqst.Write < LogwrtResult.Write)
SpinLockAcquire(&xlogctl->info_lck); XLogCtl->LogwrtRqst.Write = LogwrtResult.Write;
xlogctl->LogwrtResult = LogwrtResult; if (XLogCtl->LogwrtRqst.Flush < LogwrtResult.Flush)
if (xlogctl->LogwrtRqst.Write < LogwrtResult.Write) XLogCtl->LogwrtRqst.Flush = LogwrtResult.Flush;
xlogctl->LogwrtRqst.Write = LogwrtResult.Write; SpinLockRelease(&XLogCtl->info_lck);
if (xlogctl->LogwrtRqst.Flush < LogwrtResult.Flush)
xlogctl->LogwrtRqst.Flush = LogwrtResult.Flush;
SpinLockRelease(&xlogctl->info_lck);
} }
} }
...@@ -2573,15 +2562,12 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN) ...@@ -2573,15 +2562,12 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
XLogRecPtr WriteRqstPtr = asyncXactLSN; XLogRecPtr WriteRqstPtr = asyncXactLSN;
bool sleeping; bool sleeping;
/* use volatile pointer to prevent code rearrangement */ SpinLockAcquire(&XLogCtl->info_lck);
volatile XLogCtlData *xlogctl = XLogCtl; LogwrtResult = XLogCtl->LogwrtResult;
sleeping = XLogCtl->WalWriterSleeping;
SpinLockAcquire(&xlogctl->info_lck); if (XLogCtl->asyncXactLSN < asyncXactLSN)
LogwrtResult = xlogctl->LogwrtResult; XLogCtl->asyncXactLSN = asyncXactLSN;
sleeping = xlogctl->WalWriterSleeping; SpinLockRelease(&XLogCtl->info_lck);
if (xlogctl->asyncXactLSN < asyncXactLSN)
xlogctl->asyncXactLSN = asyncXactLSN;
SpinLockRelease(&xlogctl->info_lck);
/* /*
* If the WALWriter is sleeping, we should kick it to make it come out of * If the WALWriter is sleeping, we should kick it to make it come out of
...@@ -2614,12 +2600,9 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN) ...@@ -2614,12 +2600,9 @@ XLogSetAsyncXactLSN(XLogRecPtr asyncXactLSN)
void void
XLogSetReplicationSlotMinimumLSN(XLogRecPtr lsn) XLogSetReplicationSlotMinimumLSN(XLogRecPtr lsn)
{ {
/* use volatile pointer to prevent code rearrangement */ SpinLockAcquire(&XLogCtl->info_lck);
volatile XLogCtlData *xlogctl = XLogCtl; XLogCtl->replicationSlotMinLSN = lsn;
SpinLockRelease(&XLogCtl->info_lck);
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->replicationSlotMinLSN = lsn;
SpinLockRelease(&xlogctl->info_lck);
} }
...@@ -2630,13 +2613,11 @@ XLogSetReplicationSlotMinimumLSN(XLogRecPtr lsn) ...@@ -2630,13 +2613,11 @@ XLogSetReplicationSlotMinimumLSN(XLogRecPtr lsn)
static XLogRecPtr static XLogRecPtr
XLogGetReplicationSlotMinimumLSN(void) XLogGetReplicationSlotMinimumLSN(void)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
XLogRecPtr retval; XLogRecPtr retval;
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
retval = xlogctl->replicationSlotMinLSN; retval = XLogCtl->replicationSlotMinLSN;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
return retval; return retval;
} }
...@@ -2672,8 +2653,6 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force) ...@@ -2672,8 +2653,6 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
updateMinRecoveryPoint = false; updateMinRecoveryPoint = false;
else if (force || minRecoveryPoint < lsn) else if (force || minRecoveryPoint < lsn)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
XLogRecPtr newMinRecoveryPoint; XLogRecPtr newMinRecoveryPoint;
TimeLineID newMinRecoveryPointTLI; TimeLineID newMinRecoveryPointTLI;
...@@ -2690,10 +2669,10 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force) ...@@ -2690,10 +2669,10 @@ UpdateMinRecoveryPoint(XLogRecPtr lsn, bool force)
* all. Instead, we just log a warning and continue with recovery. * all. Instead, we just log a warning and continue with recovery.
* (See also the comments about corrupt LSNs in XLogFlush.) * (See also the comments about corrupt LSNs in XLogFlush.)
*/ */
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
newMinRecoveryPoint = xlogctl->replayEndRecPtr; newMinRecoveryPoint = XLogCtl->replayEndRecPtr;
newMinRecoveryPointTLI = xlogctl->replayEndTLI; newMinRecoveryPointTLI = XLogCtl->replayEndTLI;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
if (!force && newMinRecoveryPoint < lsn) if (!force && newMinRecoveryPoint < lsn)
elog(WARNING, elog(WARNING,
...@@ -2777,16 +2756,14 @@ XLogFlush(XLogRecPtr record) ...@@ -2777,16 +2756,14 @@ XLogFlush(XLogRecPtr record)
*/ */
for (;;) for (;;)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
XLogRecPtr insertpos; XLogRecPtr insertpos;
/* read LogwrtResult and update local state */ /* read LogwrtResult and update local state */
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
if (WriteRqstPtr < xlogctl->LogwrtRqst.Write) if (WriteRqstPtr < XLogCtl->LogwrtRqst.Write)
WriteRqstPtr = xlogctl->LogwrtRqst.Write; WriteRqstPtr = XLogCtl->LogwrtRqst.Write;
LogwrtResult = xlogctl->LogwrtResult; LogwrtResult = XLogCtl->LogwrtResult;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
/* done already? */ /* done already? */
if (record <= LogwrtResult.Flush) if (record <= LogwrtResult.Flush)
...@@ -2923,15 +2900,10 @@ XLogBackgroundFlush(void) ...@@ -2923,15 +2900,10 @@ XLogBackgroundFlush(void)
return false; return false;
/* read LogwrtResult and update local state */ /* read LogwrtResult and update local state */
{ SpinLockAcquire(&XLogCtl->info_lck);
/* use volatile pointer to prevent code rearrangement */ LogwrtResult = XLogCtl->LogwrtResult;
volatile XLogCtlData *xlogctl = XLogCtl; WriteRqstPtr = XLogCtl->LogwrtRqst.Write;
SpinLockRelease(&XLogCtl->info_lck);
SpinLockAcquire(&xlogctl->info_lck);
LogwrtResult = xlogctl->LogwrtResult;
WriteRqstPtr = xlogctl->LogwrtRqst.Write;
SpinLockRelease(&xlogctl->info_lck);
}
/* back off to last completed page boundary */ /* back off to last completed page boundary */
WriteRqstPtr -= WriteRqstPtr % XLOG_BLCKSZ; WriteRqstPtr -= WriteRqstPtr % XLOG_BLCKSZ;
...@@ -2939,12 +2911,9 @@ XLogBackgroundFlush(void) ...@@ -2939,12 +2911,9 @@ XLogBackgroundFlush(void)
/* if we have already flushed that far, consider async commit records */ /* if we have already flushed that far, consider async commit records */
if (WriteRqstPtr <= LogwrtResult.Flush) if (WriteRqstPtr <= LogwrtResult.Flush)
{ {
/* use volatile pointer to prevent code rearrangement */ SpinLockAcquire(&XLogCtl->info_lck);
volatile XLogCtlData *xlogctl = XLogCtl; WriteRqstPtr = XLogCtl->asyncXactLSN;
SpinLockRelease(&XLogCtl->info_lck);
SpinLockAcquire(&xlogctl->info_lck);
WriteRqstPtr = xlogctl->asyncXactLSN;
SpinLockRelease(&xlogctl->info_lck);
flexible = false; /* ensure it all gets written */ flexible = false; /* ensure it all gets written */
} }
...@@ -3055,14 +3024,9 @@ XLogNeedsFlush(XLogRecPtr record) ...@@ -3055,14 +3024,9 @@ XLogNeedsFlush(XLogRecPtr record)
return false; return false;
/* read LogwrtResult and update local state */ /* read LogwrtResult and update local state */
{ SpinLockAcquire(&XLogCtl->info_lck);
/* use volatile pointer to prevent code rearrangement */ LogwrtResult = XLogCtl->LogwrtResult;
volatile XLogCtlData *xlogctl = XLogCtl; SpinLockRelease(&XLogCtl->info_lck);
SpinLockAcquire(&xlogctl->info_lck);
LogwrtResult = xlogctl->LogwrtResult;
SpinLockRelease(&xlogctl->info_lck);
}
/* check again */ /* check again */
if (record <= LogwrtResult.Flush) if (record <= LogwrtResult.Flush)
...@@ -3684,13 +3648,11 @@ PreallocXlogFiles(XLogRecPtr endptr) ...@@ -3684,13 +3648,11 @@ PreallocXlogFiles(XLogRecPtr endptr)
void void
CheckXLogRemoved(XLogSegNo segno, TimeLineID tli) CheckXLogRemoved(XLogSegNo segno, TimeLineID tli)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
XLogSegNo lastRemovedSegNo; XLogSegNo lastRemovedSegNo;
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
lastRemovedSegNo = xlogctl->lastRemovedSegNo; lastRemovedSegNo = XLogCtl->lastRemovedSegNo;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
if (segno <= lastRemovedSegNo) if (segno <= lastRemovedSegNo)
{ {
...@@ -3714,13 +3676,11 @@ CheckXLogRemoved(XLogSegNo segno, TimeLineID tli) ...@@ -3714,13 +3676,11 @@ CheckXLogRemoved(XLogSegNo segno, TimeLineID tli)
XLogSegNo XLogSegNo
XLogGetLastRemovedSegno(void) XLogGetLastRemovedSegno(void)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
XLogSegNo lastRemovedSegNo; XLogSegNo lastRemovedSegNo;
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
lastRemovedSegNo = xlogctl->lastRemovedSegNo; lastRemovedSegNo = XLogCtl->lastRemovedSegNo;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
return lastRemovedSegNo; return lastRemovedSegNo;
} }
...@@ -3732,17 +3692,15 @@ XLogGetLastRemovedSegno(void) ...@@ -3732,17 +3692,15 @@ XLogGetLastRemovedSegno(void)
static void static void
UpdateLastRemovedPtr(char *filename) UpdateLastRemovedPtr(char *filename)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
uint32 tli; uint32 tli;
XLogSegNo segno; XLogSegNo segno;
XLogFromFileName(filename, &tli, &segno); XLogFromFileName(filename, &tli, &segno);
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
if (segno > xlogctl->lastRemovedSegNo) if (segno > XLogCtl->lastRemovedSegNo)
xlogctl->lastRemovedSegNo = segno; XLogCtl->lastRemovedSegNo = segno;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
} }
/* /*
...@@ -4700,13 +4658,10 @@ GetFakeLSNForUnloggedRel(void) ...@@ -4700,13 +4658,10 @@ GetFakeLSNForUnloggedRel(void)
{ {
XLogRecPtr nextUnloggedLSN; XLogRecPtr nextUnloggedLSN;
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
/* increment the unloggedLSN counter, need SpinLock */ /* increment the unloggedLSN counter, need SpinLock */
SpinLockAcquire(&xlogctl->ulsn_lck); SpinLockAcquire(&XLogCtl->ulsn_lck);
nextUnloggedLSN = xlogctl->unloggedLSN++; nextUnloggedLSN = XLogCtl->unloggedLSN++;
SpinLockRelease(&xlogctl->ulsn_lck); SpinLockRelease(&XLogCtl->ulsn_lck);
return nextUnloggedLSN; return nextUnloggedLSN;
} }
...@@ -5738,13 +5693,11 @@ recoveryPausesHere(void) ...@@ -5738,13 +5693,11 @@ recoveryPausesHere(void)
bool bool
RecoveryIsPaused(void) RecoveryIsPaused(void)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
bool recoveryPause; bool recoveryPause;
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
recoveryPause = xlogctl->recoveryPause; recoveryPause = XLogCtl->recoveryPause;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
return recoveryPause; return recoveryPause;
} }
...@@ -5752,12 +5705,9 @@ RecoveryIsPaused(void) ...@@ -5752,12 +5705,9 @@ RecoveryIsPaused(void)
void void
SetRecoveryPause(bool recoveryPause) SetRecoveryPause(bool recoveryPause)
{ {
/* use volatile pointer to prevent code rearrangement */ SpinLockAcquire(&XLogCtl->info_lck);
volatile XLogCtlData *xlogctl = XLogCtl; XLogCtl->recoveryPause = recoveryPause;
SpinLockRelease(&XLogCtl->info_lck);
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->recoveryPause = recoveryPause;
SpinLockRelease(&xlogctl->info_lck);
} }
/* /*
...@@ -5855,12 +5805,9 @@ recoveryApplyDelay(XLogRecord *record) ...@@ -5855,12 +5805,9 @@ recoveryApplyDelay(XLogRecord *record)
static void static void
SetLatestXTime(TimestampTz xtime) SetLatestXTime(TimestampTz xtime)
{ {
/* use volatile pointer to prevent code rearrangement */ SpinLockAcquire(&XLogCtl->info_lck);
volatile XLogCtlData *xlogctl = XLogCtl; XLogCtl->recoveryLastXTime = xtime;
SpinLockRelease(&XLogCtl->info_lck);
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->recoveryLastXTime = xtime;
SpinLockRelease(&xlogctl->info_lck);
} }
/* /*
...@@ -5869,13 +5816,11 @@ SetLatestXTime(TimestampTz xtime) ...@@ -5869,13 +5816,11 @@ SetLatestXTime(TimestampTz xtime)
TimestampTz TimestampTz
GetLatestXTime(void) GetLatestXTime(void)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
TimestampTz xtime; TimestampTz xtime;
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
xtime = xlogctl->recoveryLastXTime; xtime = XLogCtl->recoveryLastXTime;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
return xtime; return xtime;
} }
...@@ -5889,12 +5834,9 @@ GetLatestXTime(void) ...@@ -5889,12 +5834,9 @@ GetLatestXTime(void)
static void static void
SetCurrentChunkStartTime(TimestampTz xtime) SetCurrentChunkStartTime(TimestampTz xtime)
{ {
/* use volatile pointer to prevent code rearrangement */ SpinLockAcquire(&XLogCtl->info_lck);
volatile XLogCtlData *xlogctl = XLogCtl; XLogCtl->currentChunkStartTime = xtime;
SpinLockRelease(&XLogCtl->info_lck);
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->currentChunkStartTime = xtime;
SpinLockRelease(&xlogctl->info_lck);
} }
/* /*
...@@ -5904,13 +5846,11 @@ SetCurrentChunkStartTime(TimestampTz xtime) ...@@ -5904,13 +5846,11 @@ SetCurrentChunkStartTime(TimestampTz xtime)
TimestampTz TimestampTz
GetCurrentChunkReplayStartTime(void) GetCurrentChunkReplayStartTime(void)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
TimestampTz xtime; TimestampTz xtime;
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
xtime = xlogctl->currentChunkStartTime; xtime = XLogCtl->currentChunkStartTime;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
return xtime; return xtime;
} }
...@@ -6434,9 +6374,6 @@ StartupXLOG(void) ...@@ -6434,9 +6374,6 @@ StartupXLOG(void)
{ {
int rmid; int rmid;
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
/* /*
* Update pg_control to show that we are recovering and to show the * Update pg_control to show that we are recovering and to show the
* selected checkpoint as the place we are starting from. We also mark * selected checkpoint as the place we are starting from. We also mark
...@@ -6623,18 +6560,18 @@ StartupXLOG(void) ...@@ -6623,18 +6560,18 @@ StartupXLOG(void)
* if we had just replayed the record before the REDO location (or the * if we had just replayed the record before the REDO location (or the
* checkpoint record itself, if it's a shutdown checkpoint). * checkpoint record itself, if it's a shutdown checkpoint).
*/ */
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
if (checkPoint.redo < RecPtr) if (checkPoint.redo < RecPtr)
xlogctl->replayEndRecPtr = checkPoint.redo; XLogCtl->replayEndRecPtr = checkPoint.redo;
else else
xlogctl->replayEndRecPtr = EndRecPtr; XLogCtl->replayEndRecPtr = EndRecPtr;
xlogctl->replayEndTLI = ThisTimeLineID; XLogCtl->replayEndTLI = ThisTimeLineID;
xlogctl->lastReplayedEndRecPtr = xlogctl->replayEndRecPtr; XLogCtl->lastReplayedEndRecPtr = XLogCtl->replayEndRecPtr;
xlogctl->lastReplayedTLI = xlogctl->replayEndTLI; XLogCtl->lastReplayedTLI = XLogCtl->replayEndTLI;
xlogctl->recoveryLastXTime = 0; XLogCtl->recoveryLastXTime = 0;
xlogctl->currentChunkStartTime = 0; XLogCtl->currentChunkStartTime = 0;
xlogctl->recoveryPause = false; XLogCtl->recoveryPause = false;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
/* Also ensure XLogReceiptTime has a sane value */ /* Also ensure XLogReceiptTime has a sane value */
XLogReceiptTime = GetCurrentTimestamp(); XLogReceiptTime = GetCurrentTimestamp();
...@@ -6733,7 +6670,7 @@ StartupXLOG(void) ...@@ -6733,7 +6670,7 @@ StartupXLOG(void)
* otherwise would is a minor issue, so it doesn't seem worth * otherwise would is a minor issue, so it doesn't seem worth
* adding another spinlock cycle to prevent that. * adding another spinlock cycle to prevent that.
*/ */
if (xlogctl->recoveryPause) if (((volatile XLogCtlData *) XLogCtl)->recoveryPause)
recoveryPausesHere(); recoveryPausesHere();
/* /*
...@@ -6758,7 +6695,7 @@ StartupXLOG(void) ...@@ -6758,7 +6695,7 @@ StartupXLOG(void)
* here otherwise pausing during the delay-wait wouldn't * here otherwise pausing during the delay-wait wouldn't
* work. * work.
*/ */
if (xlogctl->recoveryPause) if (((volatile XLogCtlData *) XLogCtl)->recoveryPause)
recoveryPausesHere(); recoveryPausesHere();
} }
...@@ -6831,10 +6768,10 @@ StartupXLOG(void) ...@@ -6831,10 +6768,10 @@ StartupXLOG(void)
* Update shared replayEndRecPtr before replaying this record, * Update shared replayEndRecPtr before replaying this record,
* so that XLogFlush will update minRecoveryPoint correctly. * so that XLogFlush will update minRecoveryPoint correctly.
*/ */
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
xlogctl->replayEndRecPtr = EndRecPtr; XLogCtl->replayEndRecPtr = EndRecPtr;
xlogctl->replayEndTLI = ThisTimeLineID; XLogCtl->replayEndTLI = ThisTimeLineID;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
/* /*
* If we are attempting to enter Hot Standby mode, process * If we are attempting to enter Hot Standby mode, process
...@@ -6854,10 +6791,10 @@ StartupXLOG(void) ...@@ -6854,10 +6791,10 @@ StartupXLOG(void)
* Update lastReplayedEndRecPtr after this record has been * Update lastReplayedEndRecPtr after this record has been
* successfully replayed. * successfully replayed.
*/ */
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
xlogctl->lastReplayedEndRecPtr = EndRecPtr; XLogCtl->lastReplayedEndRecPtr = EndRecPtr;
xlogctl->lastReplayedTLI = ThisTimeLineID; XLogCtl->lastReplayedTLI = ThisTimeLineID;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
/* Remember this record as the last-applied one */ /* Remember this record as the last-applied one */
LastRec = ReadRecPtr; LastRec = ReadRecPtr;
...@@ -7267,14 +7204,9 @@ StartupXLOG(void) ...@@ -7267,14 +7204,9 @@ StartupXLOG(void)
* there are no race conditions concerning visibility of other recent * there are no race conditions concerning visibility of other recent
* updates to shared memory.) * updates to shared memory.)
*/ */
{ SpinLockAcquire(&XLogCtl->info_lck);
/* use volatile pointer to prevent code rearrangement */ XLogCtl->SharedRecoveryInProgress = false;
volatile XLogCtlData *xlogctl = XLogCtl; SpinLockRelease(&XLogCtl->info_lck);
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->SharedRecoveryInProgress = false;
SpinLockRelease(&xlogctl->info_lck);
}
/* /*
* If there were cascading standby servers connected to us, nudge any wal * If there were cascading standby servers connected to us, nudge any wal
...@@ -7377,12 +7309,9 @@ CheckRecoveryConsistency(void) ...@@ -7377,12 +7309,9 @@ CheckRecoveryConsistency(void)
reachedConsistency && reachedConsistency &&
IsUnderPostmaster) IsUnderPostmaster)
{ {
/* use volatile pointer to prevent code rearrangement */ SpinLockAcquire(&XLogCtl->info_lck);
volatile XLogCtlData *xlogctl = XLogCtl; XLogCtl->SharedHotStandbyActive = true;
SpinLockRelease(&XLogCtl->info_lck);
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->SharedHotStandbyActive = true;
SpinLockRelease(&xlogctl->info_lck);
LocalHotStandbyActive = true; LocalHotStandbyActive = true;
...@@ -7467,13 +7396,10 @@ HotStandbyActive(void) ...@@ -7467,13 +7396,10 @@ HotStandbyActive(void)
return true; return true;
else else
{ {
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
/* spinlock is essential on machines with weak memory ordering! */ /* spinlock is essential on machines with weak memory ordering! */
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
LocalHotStandbyActive = xlogctl->SharedHotStandbyActive; LocalHotStandbyActive = XLogCtl->SharedHotStandbyActive;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
return LocalHotStandbyActive; return LocalHotStandbyActive;
} }
...@@ -7688,8 +7614,6 @@ InitXLOGAccess(void) ...@@ -7688,8 +7614,6 @@ InitXLOGAccess(void)
XLogRecPtr XLogRecPtr
GetRedoRecPtr(void) GetRedoRecPtr(void)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
XLogRecPtr ptr; XLogRecPtr ptr;
/* /*
...@@ -7697,9 +7621,9 @@ GetRedoRecPtr(void) ...@@ -7697,9 +7621,9 @@ GetRedoRecPtr(void)
* grabbed a WAL insertion lock to read the master copy, someone might * grabbed a WAL insertion lock to read the master copy, someone might
* update it just after we've released the lock. * update it just after we've released the lock.
*/ */
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
ptr = xlogctl->RedoRecPtr; ptr = XLogCtl->RedoRecPtr;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
if (RedoRecPtr < ptr) if (RedoRecPtr < ptr)
RedoRecPtr = ptr; RedoRecPtr = ptr;
...@@ -7718,13 +7642,11 @@ GetRedoRecPtr(void) ...@@ -7718,13 +7642,11 @@ GetRedoRecPtr(void)
XLogRecPtr XLogRecPtr
GetInsertRecPtr(void) GetInsertRecPtr(void)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
XLogRecPtr recptr; XLogRecPtr recptr;
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
recptr = xlogctl->LogwrtRqst.Write; recptr = XLogCtl->LogwrtRqst.Write;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
return recptr; return recptr;
} }
...@@ -7736,13 +7658,11 @@ GetInsertRecPtr(void) ...@@ -7736,13 +7658,11 @@ GetInsertRecPtr(void)
XLogRecPtr XLogRecPtr
GetFlushRecPtr(void) GetFlushRecPtr(void)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
XLogRecPtr recptr; XLogRecPtr recptr;
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
recptr = xlogctl->LogwrtResult.Flush; recptr = XLogCtl->LogwrtResult.Flush;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
return recptr; return recptr;
} }
...@@ -7779,15 +7699,10 @@ GetNextXidAndEpoch(TransactionId *xid, uint32 *epoch) ...@@ -7779,15 +7699,10 @@ GetNextXidAndEpoch(TransactionId *xid, uint32 *epoch)
TransactionId nextXid; TransactionId nextXid;
/* Must read checkpoint info first, else have race condition */ /* Must read checkpoint info first, else have race condition */
{ SpinLockAcquire(&XLogCtl->info_lck);
/* use volatile pointer to prevent code rearrangement */ ckptXidEpoch = XLogCtl->ckptXidEpoch;
volatile XLogCtlData *xlogctl = XLogCtl; ckptXid = XLogCtl->ckptXid;
SpinLockRelease(&XLogCtl->info_lck);
SpinLockAcquire(&xlogctl->info_lck);
ckptXidEpoch = xlogctl->ckptXidEpoch;
ckptXid = xlogctl->ckptXid;
SpinLockRelease(&xlogctl->info_lck);
}
/* Now fetch current nextXid */ /* Now fetch current nextXid */
nextXid = ReadNewTransactionId(); nextXid = ReadNewTransactionId();
...@@ -7990,8 +7905,6 @@ LogCheckpointEnd(bool restartpoint) ...@@ -7990,8 +7905,6 @@ LogCheckpointEnd(bool restartpoint)
void void
CreateCheckPoint(int flags) CreateCheckPoint(int flags)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
bool shutdown; bool shutdown;
CheckPoint checkPoint; CheckPoint checkPoint;
XLogRecPtr recptr; XLogRecPtr recptr;
...@@ -8151,7 +8064,7 @@ CreateCheckPoint(int flags) ...@@ -8151,7 +8064,7 @@ CreateCheckPoint(int flags)
* XLogInserts that happen while we are dumping buffers must assume that * XLogInserts that happen while we are dumping buffers must assume that
* their buffer changes are not included in the checkpoint. * their buffer changes are not included in the checkpoint.
*/ */
RedoRecPtr = xlogctl->Insert.RedoRecPtr = checkPoint.redo; RedoRecPtr = XLogCtl->Insert.RedoRecPtr = checkPoint.redo;
/* /*
* Now we can release the WAL insertion locks, allowing other xacts to * Now we can release the WAL insertion locks, allowing other xacts to
...@@ -8160,9 +8073,9 @@ CreateCheckPoint(int flags) ...@@ -8160,9 +8073,9 @@ CreateCheckPoint(int flags)
WALInsertLockRelease(); WALInsertLockRelease();
/* Update the info_lck-protected copy of RedoRecPtr as well */ /* Update the info_lck-protected copy of RedoRecPtr as well */
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
xlogctl->RedoRecPtr = checkPoint.redo; XLogCtl->RedoRecPtr = checkPoint.redo;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
/* /*
* If enabled, log checkpoint start. We postpone this until now so as not * If enabled, log checkpoint start. We postpone this until now so as not
...@@ -8334,15 +8247,10 @@ CreateCheckPoint(int flags) ...@@ -8334,15 +8247,10 @@ CreateCheckPoint(int flags)
LWLockRelease(ControlFileLock); LWLockRelease(ControlFileLock);
/* Update shared-memory copy of checkpoint XID/epoch */ /* Update shared-memory copy of checkpoint XID/epoch */
{ SpinLockAcquire(&XLogCtl->info_lck);
/* use volatile pointer to prevent code rearrangement */ XLogCtl->ckptXidEpoch = checkPoint.nextXidEpoch;
volatile XLogCtlData *xlogctl = XLogCtl; XLogCtl->ckptXid = checkPoint.nextXid;
SpinLockRelease(&XLogCtl->info_lck);
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->ckptXidEpoch = checkPoint.nextXidEpoch;
xlogctl->ckptXid = checkPoint.nextXid;
SpinLockRelease(&xlogctl->info_lck);
}
/* /*
* We are now done with critical updates; no need for system panic if we * We are now done with critical updates; no need for system panic if we
...@@ -8497,9 +8405,6 @@ CheckPointGuts(XLogRecPtr checkPointRedo, int flags) ...@@ -8497,9 +8405,6 @@ CheckPointGuts(XLogRecPtr checkPointRedo, int flags)
static void static void
RecoveryRestartPoint(const CheckPoint *checkPoint) RecoveryRestartPoint(const CheckPoint *checkPoint)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
/* /*
* Also refrain from creating a restartpoint if we have seen any * Also refrain from creating a restartpoint if we have seen any
* references to non-existent pages. Restarting recovery from the * references to non-existent pages. Restarting recovery from the
...@@ -8521,10 +8426,10 @@ RecoveryRestartPoint(const CheckPoint *checkPoint) ...@@ -8521,10 +8426,10 @@ RecoveryRestartPoint(const CheckPoint *checkPoint)
* Copy the checkpoint record to shared memory, so that checkpointer can * Copy the checkpoint record to shared memory, so that checkpointer can
* work out the next time it wants to perform a restartpoint. * work out the next time it wants to perform a restartpoint.
*/ */
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
xlogctl->lastCheckPointRecPtr = ReadRecPtr; XLogCtl->lastCheckPointRecPtr = ReadRecPtr;
xlogctl->lastCheckPoint = *checkPoint; XLogCtl->lastCheckPoint = *checkPoint;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
} }
/* /*
...@@ -8546,9 +8451,6 @@ CreateRestartPoint(int flags) ...@@ -8546,9 +8451,6 @@ CreateRestartPoint(int flags)
XLogSegNo _logSegNo; XLogSegNo _logSegNo;
TimestampTz xtime; TimestampTz xtime;
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
/* /*
* Acquire CheckpointLock to ensure only one restartpoint or checkpoint * Acquire CheckpointLock to ensure only one restartpoint or checkpoint
* happens at a time. * happens at a time.
...@@ -8556,10 +8458,10 @@ CreateRestartPoint(int flags) ...@@ -8556,10 +8458,10 @@ CreateRestartPoint(int flags)
LWLockAcquire(CheckpointLock, LW_EXCLUSIVE); LWLockAcquire(CheckpointLock, LW_EXCLUSIVE);
/* Get a local copy of the last safe checkpoint record. */ /* Get a local copy of the last safe checkpoint record. */
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
lastCheckPointRecPtr = xlogctl->lastCheckPointRecPtr; lastCheckPointRecPtr = XLogCtl->lastCheckPointRecPtr;
lastCheckPoint = xlogctl->lastCheckPoint; lastCheckPoint = XLogCtl->lastCheckPoint;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
/* /*
* Check that we're still in recovery mode. It's ok if we exit recovery * Check that we're still in recovery mode. It's ok if we exit recovery
...@@ -8618,13 +8520,13 @@ CreateRestartPoint(int flags) ...@@ -8618,13 +8520,13 @@ CreateRestartPoint(int flags)
* happening. * happening.
*/ */
WALInsertLockAcquireExclusive(); WALInsertLockAcquireExclusive();
xlogctl->Insert.RedoRecPtr = lastCheckPoint.redo; XLogCtl->Insert.RedoRecPtr = lastCheckPoint.redo;
WALInsertLockRelease(); WALInsertLockRelease();
/* Also update the info_lck-protected copy */ /* Also update the info_lck-protected copy */
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
xlogctl->RedoRecPtr = lastCheckPoint.redo; XLogCtl->RedoRecPtr = lastCheckPoint.redo;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
/* /*
* Prepare to accumulate statistics. * Prepare to accumulate statistics.
...@@ -9384,15 +9286,10 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) ...@@ -9384,15 +9286,10 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
ControlFile->checkPointCopy.nextXid = checkPoint.nextXid; ControlFile->checkPointCopy.nextXid = checkPoint.nextXid;
/* Update shared-memory copy of checkpoint XID/epoch */ /* Update shared-memory copy of checkpoint XID/epoch */
{ SpinLockAcquire(&XLogCtl->info_lck);
/* use volatile pointer to prevent code rearrangement */ XLogCtl->ckptXidEpoch = checkPoint.nextXidEpoch;
volatile XLogCtlData *xlogctl = XLogCtl; XLogCtl->ckptXid = checkPoint.nextXid;
SpinLockRelease(&XLogCtl->info_lck);
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->ckptXidEpoch = checkPoint.nextXidEpoch;
xlogctl->ckptXid = checkPoint.nextXid;
SpinLockRelease(&xlogctl->info_lck);
}
/* /*
* We should've already switched to the new TLI before replaying this * We should've already switched to the new TLI before replaying this
...@@ -9436,15 +9333,10 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) ...@@ -9436,15 +9333,10 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
ControlFile->checkPointCopy.nextXid = checkPoint.nextXid; ControlFile->checkPointCopy.nextXid = checkPoint.nextXid;
/* Update shared-memory copy of checkpoint XID/epoch */ /* Update shared-memory copy of checkpoint XID/epoch */
{ SpinLockAcquire(&XLogCtl->info_lck);
/* use volatile pointer to prevent code rearrangement */ XLogCtl->ckptXidEpoch = checkPoint.nextXidEpoch;
volatile XLogCtlData *xlogctl = XLogCtl; XLogCtl->ckptXid = checkPoint.nextXid;
SpinLockRelease(&XLogCtl->info_lck);
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->ckptXidEpoch = checkPoint.nextXidEpoch;
xlogctl->ckptXid = checkPoint.nextXid;
SpinLockRelease(&xlogctl->info_lck);
}
/* TLI should not change in an on-line checkpoint */ /* TLI should not change in an on-line checkpoint */
if (checkPoint.ThisTimeLineID != ThisTimeLineID) if (checkPoint.ThisTimeLineID != ThisTimeLineID)
...@@ -9581,8 +9473,6 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) ...@@ -9581,8 +9473,6 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
} }
else if (info == XLOG_FPW_CHANGE) else if (info == XLOG_FPW_CHANGE)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
bool fpw; bool fpw;
memcpy(&fpw, XLogRecGetData(record), sizeof(bool)); memcpy(&fpw, XLogRecGetData(record), sizeof(bool));
...@@ -9594,10 +9484,10 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record) ...@@ -9594,10 +9484,10 @@ xlog_redo(XLogRecPtr lsn, XLogRecord *record)
*/ */
if (!fpw) if (!fpw)
{ {
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
if (xlogctl->lastFpwDisableRecPtr < ReadRecPtr) if (XLogCtl->lastFpwDisableRecPtr < ReadRecPtr)
xlogctl->lastFpwDisableRecPtr = ReadRecPtr; XLogCtl->lastFpwDisableRecPtr = ReadRecPtr;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
} }
/* Keep track of full_page_writes */ /* Keep track of full_page_writes */
...@@ -9972,8 +9862,6 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, ...@@ -9972,8 +9862,6 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
if (backup_started_in_recovery) if (backup_started_in_recovery)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
XLogRecPtr recptr; XLogRecPtr recptr;
/* /*
...@@ -9981,9 +9869,9 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p, ...@@ -9981,9 +9869,9 @@ do_pg_start_backup(const char *backupidstr, bool fast, TimeLineID *starttli_p,
* (i.e., since last restartpoint used as backup starting * (i.e., since last restartpoint used as backup starting
* checkpoint) contain full-page writes. * checkpoint) contain full-page writes.
*/ */
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
recptr = xlogctl->lastFpwDisableRecPtr; recptr = XLogCtl->lastFpwDisableRecPtr;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
if (!checkpointfpw || startpoint <= recptr) if (!checkpointfpw || startpoint <= recptr)
ereport(ERROR, ereport(ERROR,
...@@ -10326,17 +10214,15 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p) ...@@ -10326,17 +10214,15 @@ do_pg_stop_backup(char *labelfile, bool waitforarchive, TimeLineID *stoptli_p)
*/ */
if (backup_started_in_recovery) if (backup_started_in_recovery)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
XLogRecPtr recptr; XLogRecPtr recptr;
/* /*
* Check to see if all WAL replayed during online backup contain * Check to see if all WAL replayed during online backup contain
* full-page writes. * full-page writes.
*/ */
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
recptr = xlogctl->lastFpwDisableRecPtr; recptr = XLogCtl->lastFpwDisableRecPtr;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
if (startpoint <= recptr) if (startpoint <= recptr)
ereport(ERROR, ereport(ERROR,
...@@ -10523,15 +10409,13 @@ do_pg_abort_backup(void) ...@@ -10523,15 +10409,13 @@ do_pg_abort_backup(void)
XLogRecPtr XLogRecPtr
GetXLogReplayRecPtr(TimeLineID *replayTLI) GetXLogReplayRecPtr(TimeLineID *replayTLI)
{ {
/* use volatile pointer to prevent code rearrangement */
volatile XLogCtlData *xlogctl = XLogCtl;
XLogRecPtr recptr; XLogRecPtr recptr;
TimeLineID tli; TimeLineID tli;
SpinLockAcquire(&xlogctl->info_lck); SpinLockAcquire(&XLogCtl->info_lck);
recptr = xlogctl->lastReplayedEndRecPtr; recptr = XLogCtl->lastReplayedEndRecPtr;
tli = xlogctl->lastReplayedTLI; tli = XLogCtl->lastReplayedTLI;
SpinLockRelease(&xlogctl->info_lck); SpinLockRelease(&XLogCtl->info_lck);
if (replayTLI) if (replayTLI)
*replayTLI = tli; *replayTLI = tli;
...@@ -10544,7 +10428,7 @@ GetXLogReplayRecPtr(TimeLineID *replayTLI) ...@@ -10544,7 +10428,7 @@ GetXLogReplayRecPtr(TimeLineID *replayTLI)
XLogRecPtr XLogRecPtr
GetXLogInsertRecPtr(void) GetXLogInsertRecPtr(void)
{ {
volatile XLogCtlInsert *Insert = &XLogCtl->Insert; XLogCtlInsert *Insert = &XLogCtl->Insert;
uint64 current_bytepos; uint64 current_bytepos;
SpinLockAcquire(&Insert->insertpos_lck); SpinLockAcquire(&Insert->insertpos_lck);
...@@ -10560,14 +10444,9 @@ GetXLogInsertRecPtr(void) ...@@ -10560,14 +10444,9 @@ GetXLogInsertRecPtr(void)
XLogRecPtr XLogRecPtr
GetXLogWriteRecPtr(void) GetXLogWriteRecPtr(void)
{ {
{ SpinLockAcquire(&XLogCtl->info_lck);
/* use volatile pointer to prevent code rearrangement */ LogwrtResult = XLogCtl->LogwrtResult;
volatile XLogCtlData *xlogctl = XLogCtl; SpinLockRelease(&XLogCtl->info_lck);
SpinLockAcquire(&xlogctl->info_lck);
LogwrtResult = xlogctl->LogwrtResult;
SpinLockRelease(&xlogctl->info_lck);
}
return LogwrtResult.Write; return LogwrtResult.Write;
} }
...@@ -11393,10 +11272,7 @@ WakeupRecovery(void) ...@@ -11393,10 +11272,7 @@ WakeupRecovery(void)
void void
SetWalWriterSleeping(bool sleeping) SetWalWriterSleeping(bool sleeping)
{ {
/* use volatile pointer to prevent code rearrangement */ SpinLockAcquire(&XLogCtl->info_lck);
volatile XLogCtlData *xlogctl = XLogCtl; XLogCtl->WalWriterSleeping = sleeping;
SpinLockRelease(&XLogCtl->info_lck);
SpinLockAcquire(&xlogctl->info_lck);
xlogctl->WalWriterSleeping = sleeping;
SpinLockRelease(&xlogctl->info_lck);
} }
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment