Commit 5ab3af46 authored by Alvaro Herrera's avatar Alvaro Herrera

Remove obsolete XLogRecPtr macros

This gets rid of XLByteLT, XLByteLE, XLByteEQ and XLByteAdvance.
These were useful for brevity when XLogRecPtrs were split in
xlogid/xrecoff; but now that they are simple uint64's, they are just
clutter.  The only downside to making this change would be ease of
backporting patches, but that has been negated by other substantive
changes to the involved code anyway.  The clarity of simpler expressions
makes the change worthwhile.

Most of the changes are mechanical, but in a couple of places, the patch
author chose to invert the operator sense, making the code flow more
logical (and more in line with preceding comments).

Author: Andres Freund
Eyeballed by Dimitri Fontaine and Alvaro Herrera
parent 24eca797
......@@ -177,7 +177,7 @@ ginRedoInsert(XLogRecPtr lsn, XLogRecord *record)
return; /* page was deleted, nothing to do */
page = (Page) BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
if (data->isData)
{
......@@ -393,7 +393,7 @@ ginRedoVacuumPage(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
if (GinPageIsData(page))
{
......@@ -448,7 +448,7 @@ ginRedoDeletePage(XLogRecPtr lsn, XLogRecord *record)
if (BufferIsValid(dbuffer))
{
page = BufferGetPage(dbuffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
Assert(GinPageIsData(page));
GinPageGetOpaque(page)->flags = GIN_DELETED;
......@@ -467,7 +467,7 @@ ginRedoDeletePage(XLogRecPtr lsn, XLogRecord *record)
if (BufferIsValid(pbuffer))
{
page = BufferGetPage(pbuffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
Assert(GinPageIsData(page));
Assert(!GinPageIsLeaf(page));
......@@ -487,7 +487,7 @@ ginRedoDeletePage(XLogRecPtr lsn, XLogRecord *record)
if (BufferIsValid(lbuffer))
{
page = BufferGetPage(lbuffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
Assert(GinPageIsData(page));
GinPageGetOpaque(page)->rightlink = data->rightLink;
......@@ -518,7 +518,7 @@ ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record)
return; /* assume index was deleted, nothing to do */
metapage = BufferGetPage(metabuffer);
if (!XLByteLE(lsn, PageGetLSN(metapage)))
if (lsn > PageGetLSN(metapage))
{
memcpy(GinPageGetMeta(metapage), &data->metadata, sizeof(GinMetaPageData));
PageSetLSN(metapage, lsn);
......@@ -540,7 +540,7 @@ ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record)
{
Page page = BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
OffsetNumber l,
off = (PageIsEmpty(page)) ? FirstOffsetNumber :
......@@ -590,7 +590,7 @@ ginRedoUpdateMetapage(XLogRecPtr lsn, XLogRecord *record)
{
Page page = BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
GinPageGetOpaque(page)->rightlink = data->newRightlink;
......@@ -677,7 +677,7 @@ ginRedoDeleteListPages(XLogRecPtr lsn, XLogRecord *record)
return; /* assume index was deleted, nothing to do */
metapage = BufferGetPage(metabuffer);
if (!XLByteLE(lsn, PageGetLSN(metapage)))
if (lsn > PageGetLSN(metapage))
{
memcpy(GinPageGetMeta(metapage), &data->metadata, sizeof(GinMetaPageData));
PageSetLSN(metapage, lsn);
......@@ -703,7 +703,7 @@ ginRedoDeleteListPages(XLogRecPtr lsn, XLogRecord *record)
{
Page page = BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
GinPageGetOpaque(page)->flags = GIN_DELETED;
......
......@@ -561,8 +561,7 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
}
if (stack->blkno != GIST_ROOT_BLKNO &&
XLByteLT(stack->parent->lsn,
GistPageGetOpaque(stack->page)->nsn))
stack->parent->lsn < GistPageGetOpaque(stack->page)->nsn)
{
/*
* Concurrent split detected. There's no guarantee that the
......@@ -620,7 +619,7 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
xlocked = true;
stack->page = (Page) BufferGetPage(stack->buffer);
if (!XLByteEQ(PageGetLSN(stack->page), stack->lsn))
if (PageGetLSN(stack->page) != stack->lsn)
{
/* the page was changed while we unlocked it, retry */
continue;
......@@ -708,8 +707,8 @@ gistdoinsert(Relation r, IndexTuple itup, Size freespace, GISTSTATE *giststate)
*/
}
else if (GistFollowRight(stack->page) ||
XLByteLT(stack->parent->lsn,
GistPageGetOpaque(stack->page)->nsn))
stack->parent->lsn <
GistPageGetOpaque(stack->page)->nsn)
{
/*
* The page was split while we momentarily unlocked the
......@@ -794,7 +793,7 @@ gistFindPath(Relation r, BlockNumber child, OffsetNumber *downlinkoffnum)
if (GistFollowRight(page))
elog(ERROR, "concurrent GiST page split was incomplete");
if (top->parent && XLByteLT(top->parent->lsn, GistPageGetOpaque(page)->nsn) &&
if (top->parent && top->parent->lsn < GistPageGetOpaque(page)->nsn &&
GistPageGetOpaque(page)->rightlink != InvalidBlockNumber /* sanity check */ )
{
/*
......@@ -864,7 +863,8 @@ gistFindCorrectParent(Relation r, GISTInsertStack *child)
parent->page = (Page) BufferGetPage(parent->buffer);
/* here we don't need to distinguish between split and page update */
if (child->downlinkoffnum == InvalidOffsetNumber || !XLByteEQ(parent->lsn, PageGetLSN(parent->page)))
if (child->downlinkoffnum == InvalidOffsetNumber ||
parent->lsn != PageGetLSN(parent->page))
{
/* parent is changed, look child in right links until found */
OffsetNumber i,
......
......@@ -263,7 +263,7 @@ gistScanPage(IndexScanDesc scan, GISTSearchItem *pageItem, double *myDistances,
*/
if (!XLogRecPtrIsInvalid(pageItem->data.parentlsn) &&
(GistFollowRight(page) ||
XLByteLT(pageItem->data.parentlsn, opaque->nsn)) &&
pageItem->data.parentlsn < opaque->nsn) &&
opaque->rightlink != InvalidBlockNumber /* sanity check */ )
{
/* There was a page split, follow right link to add pages */
......
......@@ -114,7 +114,7 @@ pushStackIfSplited(Page page, GistBDItem *stack)
GISTPageOpaque opaque = GistPageGetOpaque(page);
if (stack->blkno != GIST_ROOT_BLKNO && !XLogRecPtrIsInvalid(stack->parentlsn) &&
(GistFollowRight(page) || XLByteLT(stack->parentlsn, opaque->nsn)) &&
(GistFollowRight(page) || stack->parentlsn < opaque->nsn) &&
opaque->rightlink != InvalidBlockNumber /* sanity check */ )
{
/* split page detected, install right link to the stack */
......
......@@ -64,7 +64,7 @@ gistRedoClearFollowRight(XLogRecPtr lsn, XLogRecord *record, int block_index,
* of this record, because the updated NSN is not included in the full
* page image.
*/
if (!XLByteLT(lsn, PageGetLSN(page)))
if (lsn >= PageGetLSN(page))
{
GistPageGetOpaque(page)->nsn = lsn;
GistClearFollowRight(page);
......@@ -119,7 +119,7 @@ gistRedoPageUpdateRecord(XLogRecPtr lsn, XLogRecord *record)
page = (Page) BufferGetPage(buffer);
/* nothing more to do if change already applied */
if (XLByteLE(lsn, PageGetLSN(page)))
if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
return;
......
......@@ -4700,7 +4700,7 @@ heap_xlog_clean(XLogRecPtr lsn, XLogRecord *record)
LockBufferForCleanup(buffer);
page = (Page) BufferGetPage(buffer);
if (XLByteLE(lsn, PageGetLSN(page)))
if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
return;
......@@ -4770,7 +4770,7 @@ heap_xlog_freeze(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
if (XLByteLE(lsn, PageGetLSN(page)))
if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
return;
......@@ -4854,7 +4854,7 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
* XLOG record's LSN, we mustn't mark the page all-visible, because
* the subsequent update won't be replayed to clear the flag.
*/
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
PageSetAllVisible(page);
MarkBufferDirty(buffer);
......@@ -4891,7 +4891,7 @@ heap_xlog_visible(XLogRecPtr lsn, XLogRecord *record)
* we did for the heap page. If this results in a dropped bit, no
* real harm is done; and the next VACUUM will fix it.
*/
if (!XLByteLE(lsn, PageGetLSN(BufferGetPage(vmbuffer))))
if (lsn > PageGetLSN(BufferGetPage(vmbuffer)))
visibilitymap_set(reln, xlrec->block, lsn, vmbuffer,
xlrec->cutoff_xid);
......@@ -4977,7 +4977,7 @@ heap_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
......@@ -5072,7 +5072,7 @@ heap_xlog_insert(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
......@@ -5207,7 +5207,7 @@ heap_xlog_multi_insert(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
......@@ -5349,7 +5349,7 @@ heap_xlog_update(XLogRecPtr lsn, XLogRecord *record, bool hot_update)
goto newt;
page = (Page) BufferGetPage(obuffer);
if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
if (lsn <= PageGetLSN(page)) /* changes are applied */
{
if (samepage)
{
......@@ -5449,7 +5449,7 @@ newt:;
return;
page = (Page) BufferGetPage(nbuffer);
if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(nbuffer);
if (BufferIsValid(obuffer))
......@@ -5549,7 +5549,7 @@ heap_xlog_lock(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
......@@ -5612,7 +5612,7 @@ heap_xlog_inplace(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
if (XLByteLE(lsn, PageGetLSN(page))) /* changes are applied */
if (lsn <= PageGetLSN(page)) /* changes are applied */
{
UnlockReleaseBuffer(buffer);
return;
......
......@@ -229,7 +229,7 @@ btree_xlog_insert(bool isleaf, bool ismeta,
{
page = (Page) BufferGetPage(buffer);
if (XLByteLE(lsn, PageGetLSN(page)))
if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
}
......@@ -381,7 +381,7 @@ btree_xlog_split(bool onleft, bool isroot,
Page lpage = (Page) BufferGetPage(lbuf);
BTPageOpaque lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
if (!XLByteLE(lsn, PageGetLSN(lpage)))
if (lsn > PageGetLSN(lpage))
{
OffsetNumber off;
OffsetNumber maxoff = PageGetMaxOffsetNumber(lpage);
......@@ -459,7 +459,7 @@ btree_xlog_split(bool onleft, bool isroot,
{
Page page = (Page) BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
BTPageOpaque pageop = (BTPageOpaque) PageGetSpecialPointer(page);
......@@ -537,7 +537,7 @@ btree_xlog_vacuum(XLogRecPtr lsn, XLogRecord *record)
LockBufferForCleanup(buffer);
page = (Page) BufferGetPage(buffer);
if (XLByteLE(lsn, PageGetLSN(page)))
if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
return;
......@@ -757,7 +757,7 @@ btree_xlog_delete(XLogRecPtr lsn, XLogRecord *record)
return;
page = (Page) BufferGetPage(buffer);
if (XLByteLE(lsn, PageGetLSN(page)))
if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
return;
......@@ -820,7 +820,7 @@ btree_xlog_delete_page(uint8 info, XLogRecPtr lsn, XLogRecord *record)
{
page = (Page) BufferGetPage(buffer);
pageop = (BTPageOpaque) PageGetSpecialPointer(page);
if (XLByteLE(lsn, PageGetLSN(page)))
if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
}
......@@ -867,7 +867,7 @@ btree_xlog_delete_page(uint8 info, XLogRecPtr lsn, XLogRecord *record)
if (BufferIsValid(buffer))
{
page = (Page) BufferGetPage(buffer);
if (XLByteLE(lsn, PageGetLSN(page)))
if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
}
......@@ -895,7 +895,7 @@ btree_xlog_delete_page(uint8 info, XLogRecPtr lsn, XLogRecord *record)
if (BufferIsValid(buffer))
{
page = (Page) BufferGetPage(buffer);
if (XLByteLE(lsn, PageGetLSN(page)))
if (lsn <= PageGetLSN(page))
{
UnlockReleaseBuffer(buffer);
}
......
......@@ -139,7 +139,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
SpGistInitBuffer(buffer,
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
/* insert new tuple */
if (xldata->offnumLeaf != xldata->offnumHeadLeaf)
......@@ -187,7 +187,7 @@ spgRedoAddLeaf(XLogRecPtr lsn, XLogRecord *record)
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
SpGistInnerTuple tuple;
......@@ -251,7 +251,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
SpGistInitBuffer(buffer,
SPGIST_LEAF | (xldata->storesNulls ? SPGIST_NULLS : 0));
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
int i;
......@@ -280,7 +280,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
spgPageIndexMultiDelete(&state, page, toDelete, xldata->nMoves,
state.isBuild ? SPGIST_PLACEHOLDER : SPGIST_REDIRECT,
......@@ -305,7 +305,7 @@ spgRedoMoveLeafs(XLogRecPtr lsn, XLogRecord *record)
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
SpGistInnerTuple tuple;
......@@ -353,7 +353,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
PageIndexTupleDelete(page, xldata->offnum);
if (PageAddItem(page, (Item) innerTuple, innerTuple->size,
......@@ -399,7 +399,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
if (xldata->newPage)
SpGistInitBuffer(buffer, 0);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
addOrReplaceTuple(page, (Item) innerTuple,
innerTuple->size, xldata->offnumNew);
......@@ -430,7 +430,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
SpGistDeadTuple dt;
......@@ -495,7 +495,7 @@ spgRedoAddNode(XLogRecPtr lsn, XLogRecord *record)
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
SpGistInnerTuple innerTuple;
......@@ -552,7 +552,7 @@ spgRedoSplitTuple(XLogRecPtr lsn, XLogRecord *record)
if (xldata->newPage)
SpGistInitBuffer(buffer, 0);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
addOrReplaceTuple(page, (Item) postfixTuple,
postfixTuple->size, xldata->offnumPostfix);
......@@ -574,7 +574,7 @@ spgRedoSplitTuple(XLogRecPtr lsn, XLogRecord *record)
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
PageIndexTupleDelete(page, xldata->offnumPrefix);
if (PageAddItem(page, (Item) prefixTuple, prefixTuple->size,
......@@ -670,7 +670,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
if (BufferIsValid(srcBuffer))
{
srcPage = BufferGetPage(srcBuffer);
if (!XLByteLE(lsn, PageGetLSN(srcPage)))
if (lsn > PageGetLSN(srcPage))
{
/*
* We have it a bit easier here than in doPickSplit(),
......@@ -737,7 +737,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
if (BufferIsValid(destBuffer))
{
destPage = (Page) BufferGetPage(destBuffer);
if (XLByteLE(lsn, PageGetLSN(destPage)))
if (lsn <= PageGetLSN(destPage))
destPage = NULL; /* don't do any page updates */
}
else
......@@ -790,7 +790,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
SpGistInitBuffer(buffer,
(xldata->storesNulls ? SPGIST_NULLS : 0));
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
addOrReplaceTuple(page, (Item) innerTuple, innerTuple->size,
xldata->offnumInner);
......@@ -842,7 +842,7 @@ spgRedoPickSplit(XLogRecPtr lsn, XLogRecord *record)
{
page = BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
SpGistInnerTuple parent;
......@@ -900,7 +900,7 @@ spgRedoVacuumLeaf(XLogRecPtr lsn, XLogRecord *record)
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
spgPageIndexMultiDelete(&state, page,
toDead, xldata->nDead,
......@@ -971,7 +971,7 @@ spgRedoVacuumRoot(XLogRecPtr lsn, XLogRecord *record)
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
/* The tuple numbers are in order */
PageIndexMultiDelete(page, toDelete, xldata->nDelete);
......@@ -1017,7 +1017,7 @@ spgRedoVacuumRedirect(XLogRecPtr lsn, XLogRecord *record)
if (BufferIsValid(buffer))
{
page = BufferGetPage(buffer);
if (!XLByteLE(lsn, PageGetLSN(page)))
if (lsn > PageGetLSN(page))
{
SpGistPageOpaque opaque = SpGistPageGetOpaque(page);
int i;
......
......@@ -365,7 +365,7 @@ TransactionIdSetStatusBit(TransactionId xid, XidStatus status, XLogRecPtr lsn, i
{
int lsnindex = GetLSNIndex(slotno, xid);
if (XLByteLT(ClogCtl->shared->group_lsn[lsnindex], lsn))
if (ClogCtl->shared->group_lsn[lsnindex] < lsn)
ClogCtl->shared->group_lsn[lsnindex] = lsn;
}
}
......
......@@ -685,7 +685,7 @@ SlruPhysicalWritePage(SlruCtl ctl, int pageno, int slotno, SlruFlush fdata)
{
XLogRecPtr this_lsn = shared->group_lsn[lsnindex++];
if (XLByteLT(max_lsn, this_lsn))
if (max_lsn < this_lsn)
max_lsn = this_lsn;
}
......
......@@ -522,8 +522,8 @@ tliOfPointInHistory(XLogRecPtr ptr, List *history)
foreach(cell, history)
{
TimeLineHistoryEntry *tle = (TimeLineHistoryEntry *) lfirst(cell);
if ((XLogRecPtrIsInvalid(tle->begin) || XLByteLE(tle->begin, ptr)) &&
(XLogRecPtrIsInvalid(tle->end) || XLByteLT(ptr, tle->end)))
if ((XLogRecPtrIsInvalid(tle->begin) || tle->begin <= ptr) &&
(XLogRecPtrIsInvalid(tle->end) || ptr < tle->end))
{
/* found it */
return tle->tli;
......
......@@ -1559,7 +1559,7 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
PGXACT *pgxact = &ProcGlobal->allPgXact[gxact->pgprocno];
if (gxact->valid &&
XLByteLE(gxact->prepare_lsn, redo_horizon))
gxact->prepare_lsn <= redo_horizon)
xids[nxids++] = pgxact->xid;
}
......
This diff is collapsed.
......@@ -607,7 +607,7 @@ nextval_internal(Oid relid)
{
XLogRecPtr redoptr = GetRedoRecPtr();
if (XLByteLE(PageGetLSN(page), redoptr))
if (PageGetLSN(page) <= redoptr)
{
/* last update of seq was before checkpoint */
fetch = log = fetch + SEQ_LOG_VALS;
......
......@@ -120,7 +120,7 @@ SyncRepWaitForLSN(XLogRecPtr XactCommitLSN)
* be a low cost check.
*/
if (!WalSndCtl->sync_standbys_defined ||
XLByteLE(XactCommitLSN, WalSndCtl->lsn[mode]))
XactCommitLSN <= WalSndCtl->lsn[mode])
{
LWLockRelease(SyncRepLock);
return;
......@@ -287,7 +287,7 @@ SyncRepQueueInsert(int mode)
* Stop at the queue element that we should after to ensure the queue
* is ordered by LSN.
*/
if (XLByteLT(proc->waitLSN, MyProc->waitLSN))
if (proc->waitLSN < MyProc->waitLSN)
break;
proc = (PGPROC *) SHMQueuePrev(&(WalSndCtl->SyncRepQueue[mode]),
......@@ -428,12 +428,12 @@ SyncRepReleaseWaiters(void)
* Set the lsn first so that when we wake backends they will release up to
* this location.
*/
if (XLByteLT(walsndctl->lsn[SYNC_REP_WAIT_WRITE], MyWalSnd->write))
if (walsndctl->lsn[SYNC_REP_WAIT_WRITE] < MyWalSnd->write)
{
walsndctl->lsn[SYNC_REP_WAIT_WRITE] = MyWalSnd->write;
numwrite = SyncRepWakeQueue(false, SYNC_REP_WAIT_WRITE);
}
if (XLByteLT(walsndctl->lsn[SYNC_REP_WAIT_FLUSH], MyWalSnd->flush))
if (walsndctl->lsn[SYNC_REP_WAIT_FLUSH] < MyWalSnd->flush)
{
walsndctl->lsn[SYNC_REP_WAIT_FLUSH] = MyWalSnd->flush;
numflush = SyncRepWakeQueue(false, SYNC_REP_WAIT_FLUSH);
......@@ -543,7 +543,7 @@ SyncRepWakeQueue(bool all, int mode)
/*
* Assume the queue is ordered by LSN
*/
if (!all && XLByteLT(walsndctl->lsn[mode], proc->waitLSN))
if (!all && walsndctl->lsn[mode] < proc->waitLSN)
return numprocs;
/*
......@@ -640,7 +640,7 @@ SyncRepQueueIsOrderedByLSN(int mode)
* Check the queue is ordered by LSN and that multiple procs don't
* have matching LSNs
*/
if (XLByteLE(proc->waitLSN, lastLSN))
if (proc->waitLSN <= lastLSN)
return false;
lastLSN = proc->waitLSN;
......
......@@ -914,7 +914,7 @@ XLogWalRcvWrite(char *buf, Size nbytes, XLogRecPtr recptr)
}
/* Update state for write */
XLByteAdvance(recptr, byteswritten);
recptr += byteswritten;
recvOff += byteswritten;
nbytes -= byteswritten;
......@@ -933,7 +933,7 @@ XLogWalRcvWrite(char *buf, Size nbytes, XLogRecPtr recptr)
static void
XLogWalRcvFlush(bool dying)
{
if (XLByteLT(LogstreamResult.Flush, LogstreamResult.Write))
if (LogstreamResult.Flush < LogstreamResult.Write)
{
/* use volatile pointer to prevent code rearrangement */
volatile WalRcvData *walrcv = WalRcv;
......@@ -944,7 +944,7 @@ XLogWalRcvFlush(bool dying)
/* Update shared-memory status */
SpinLockAcquire(&walrcv->mutex);
if (XLByteLT(walrcv->receivedUpto, LogstreamResult.Flush))
if (walrcv->receivedUpto < LogstreamResult.Flush)
{
walrcv->latestChunkStart = walrcv->receivedUpto;
walrcv->receivedUpto = LogstreamResult.Flush;
......@@ -1016,8 +1016,8 @@ XLogWalRcvSendReply(bool force, bool requestReply)
* probably OK.
*/
if (!force
&& XLByteEQ(writePtr, LogstreamResult.Write)
&& XLByteEQ(flushPtr, LogstreamResult.Flush)
&& writePtr == LogstreamResult.Write
&& flushPtr == LogstreamResult.Flush
&& !TimestampDifferenceExceeds(sendTime, now,
wal_receiver_status_interval * 1000))
return;
......@@ -1126,7 +1126,7 @@ ProcessWalSndrMessage(XLogRecPtr walEnd, TimestampTz sendTime)
/* Update shared-memory status */
SpinLockAcquire(&walrcv->mutex);
if (XLByteLT(walrcv->latestWalEnd, walEnd))
if (walrcv->latestWalEnd < walEnd)
walrcv->latestWalEndTime = sendTime;
walrcv->latestWalEnd = walEnd;
walrcv->lastMsgSendTime = sendTime;
......
......@@ -326,7 +326,7 @@ GetReplicationApplyDelay(void)
replayPtr = GetXLogReplayRecPtr(NULL);
if (XLByteEQ(receivePtr, replayPtr))
if (receivePtr == replayPtr)
return 0;
TimestampDifference(GetCurrentChunkReplayStartTime(),
......
......@@ -471,7 +471,7 @@ StartReplication(StartReplicationCmd *cmd)
* WAL segment.
*/
if (!XLogRecPtrIsInvalid(switchpoint) &&
XLByteLT(switchpoint, cmd->startpoint))
switchpoint < cmd->startpoint)
{
ereport(ERROR,
(errmsg("requested starting point %X/%X on timeline %u is not in this server's history",
......@@ -497,7 +497,7 @@ StartReplication(StartReplicationCmd *cmd)
/* If there is nothing to stream, don't even enter COPY mode */
if (!sendTimeLineIsHistoric ||
XLByteLT(cmd->startpoint, sendTimeLineValidUpto))
cmd->startpoint < sendTimeLineValidUpto)
{
/*
* When we first start replication the standby will be behind the primary.
......@@ -520,7 +520,7 @@ StartReplication(StartReplicationCmd *cmd)
* Don't allow a request to stream from a future point in WAL that
* hasn't been flushed to disk in this server yet.
*/
if (XLByteLT(FlushPtr, cmd->startpoint))
if (FlushPtr < cmd->startpoint)
{
ereport(ERROR,
(errmsg("requested starting point %X/%X is ahead of the WAL flush position of this server %X/%X",
......@@ -1249,7 +1249,7 @@ retry:
}
/* Update state for read */
XLByteAdvance(recptr, readbytes);
recptr += readbytes;
sendOff += readbytes;
nbytes -= readbytes;
......@@ -1384,11 +1384,11 @@ XLogSend(bool *caughtup)
history = readTimeLineHistory(ThisTimeLineID);
sendTimeLineValidUpto = tliSwitchPoint(sendTimeLine, history);
Assert(XLByteLE(sentPtr, sendTimeLineValidUpto));
Assert(sentPtr <= sendTimeLineValidUpto);
list_free_deep(history);
/* the switchpoint should be >= current send pointer */
if (!XLByteLE(sentPtr, sendTimeLineValidUpto))
/* the current send pointer should be <= the switchpoint */
if (!(sentPtr <= sendTimeLineValidUpto))
elog(ERROR, "server switched off timeline %u at %X/%X, but walsender already streamed up to %X/%X",
sendTimeLine,
(uint32) (sendTimeLineValidUpto >> 32),
......@@ -1420,7 +1420,7 @@ XLogSend(bool *caughtup)
* If this is a historic timeline and we've reached the point where we
* forked to the next timeline, stop streaming.
*/
if (sendTimeLineIsHistoric && XLByteLE(sendTimeLineValidUpto, sentPtr))
if (sendTimeLineIsHistoric && sendTimeLineValidUpto <= sentPtr)
{
/* close the current file. */
if (sendFile >= 0)
......@@ -1436,8 +1436,8 @@ XLogSend(bool *caughtup)
}
/* Do we have any work to do? */
Assert(XLByteLE(sentPtr, SendRqstPtr));
if (XLByteLE(SendRqstPtr, sentPtr))
Assert(sentPtr <= SendRqstPtr);
if (SendRqstPtr <= sentPtr)
{
*caughtup = true;
return;
......@@ -1456,10 +1456,10 @@ XLogSend(bool *caughtup)
*/
startptr = sentPtr;
endptr = startptr;
XLByteAdvance(endptr, MAX_SEND_SIZE);
endptr += MAX_SEND_SIZE;
/* if we went beyond SendRqstPtr, back off */
if (XLByteLE(SendRqstPtr, endptr))
if (SendRqstPtr <= endptr)
{
endptr = SendRqstPtr;
if (sendTimeLineIsHistoric)
......@@ -1968,7 +1968,7 @@ GetOldestWALSendPointer(void)
if (recptr.xlogid == 0 && recptr.xrecoff == 0)
continue;
if (!found || XLByteLT(recptr, oldest))
if (!found || recptr < oldest)
oldest = recptr;
found = true;
}
......
......@@ -636,7 +636,7 @@ ReceiveXlogStream(PGconn *conn, XLogRecPtr startpos, uint32 timeline,
/* Write was successful, advance our position */
bytes_written += bytes_to_write;
bytes_left -= bytes_to_write;
XLByteAdvance(blockpos, bytes_to_write);
blockpos += bytes_to_write;
xlogoff += bytes_to_write;
/* Did we reach the end of a WAL segment? */
......
......@@ -28,20 +28,6 @@ typedef uint64 XLogRecPtr;
#define InvalidXLogRecPtr 0
#define XLogRecPtrIsInvalid(r) ((r) == InvalidXLogRecPtr)
/*
* Macros for comparing XLogRecPtrs
*/
#define XLByteLT(a, b) ((a) < (b))
#define XLByteLE(a, b) ((a) <= (b))
#define XLByteEQ(a, b) ((a) == (b))
/*
* Macro for advancing a record pointer by the specified number of bytes.
*/
#define XLByteAdvance(recptr, nbytes) \
(recptr) += nbytes \
/*
* XLogSegNo - physical log file sequence number.
*/
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment