Commit a892234f authored by Robert Haas's avatar Robert Haas

Change the format of the VM fork to add a second bit per page.

The new bit indicates whether every tuple on the page is already frozen.
It is cleared only when the all-visible bit is cleared, and it can be
set only when we vacuum a page and find that every tuple on that page is
both visible to every transaction and in no need of any future
vacuuming.

A future commit will use this new bit to optimize away full-table scans
that would otherwise be triggered by XID wraparound considerations.  A
page which is merely all-visible must still be scanned in that case, but
a page which is all-frozen need not be.  This commit does not attempt
that optimization, although that optimization is the goal here.  It
seems better to get the basic infrastructure in place first.

Per discussion, it's very desirable for pg_upgrade to automatically
migrate existing VM forks from the old format to the new format.  That,
too, will be handled in a follow-on patch.

Masahiko Sawada, reviewed by Kyotaro Horiguchi, Fujii Masao, Amit
Kapila, Simon Riggs, Andres Freund, and others, and substantially
revised by me.
parent 68c521eb
......@@ -87,7 +87,7 @@ statapprox_heap(Relation rel, output_type *stat)
* If the page has only visible tuples, then we can find out the free
* space from the FSM and move on.
*/
if (visibilitymap_test(rel, blkno, &vmbuffer))
if (VM_ALL_VISIBLE(rel, blkno, &vmbuffer))
{
freespace = GetRecordedFreeSpace(rel, blkno);
stat->tuple_len += BLCKSZ - freespace;
......
......@@ -623,7 +623,8 @@ can be used to examine the information stored in free space maps.
<para>
Each heap relation has a Visibility Map
(VM) to keep track of which pages contain only tuples that are known to be
visible to all active transactions. It's stored
visible to all active transactions; it also keeps track of which pages contain
only unfrozen tuples. It's stored
alongside the main relation data in a separate relation fork, named after the
filenode number of the relation, plus a <literal>_vm</> suffix. For example,
if the filenode of a relation is 12345, the VM is stored in a file called
......@@ -632,11 +633,12 @@ Note that indexes do not have VMs.
</para>
<para>
The visibility map simply stores one bit per heap page. A set bit means
that all tuples on the page are known to be visible to all transactions.
This means that the page does not contain any tuples that need to be vacuumed.
The visibility map stores two bits per heap page. The first bit, if set,
indicates that the page is all-visible, or in other words that the page does
not contain any tuples that need to be vacuumed.
This information can also be used by <firstterm>index-only scans</> to answer
queries using only the index tuple.
The second bit, if set, means that all tuples on the page have been frozen.
</para>
<para>
......
......@@ -6951,6 +6951,55 @@ ConditionalMultiXactIdWait(MultiXactId multi, MultiXactStatus status,
rel, NULL, XLTW_None, remaining);
}
/*
* heap_tuple_needs_eventual_freeze
*
* Check to see whether any of the XID fields of a tuple (xmin, xmax, xvac)
* will eventually require freezing. Similar to heap_tuple_needs_freeze,
* but there's no cutoff, since we're trying to figure out whether freezing
* will ever be needed, not whether it's needed now.
*/
bool
heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple)
{
TransactionId xid;
/*
* If xmin is a normal transaction ID, this tuple is definitely not
* frozen.
*/
xid = HeapTupleHeaderGetXmin(tuple);
if (TransactionIdIsNormal(xid))
return true;
/*
* If xmax is a valid xact or multixact, this tuple is also not frozen.
*/
if (tuple->t_infomask & HEAP_XMAX_IS_MULTI)
{
MultiXactId multi;
multi = HeapTupleHeaderGetRawXmax(tuple);
if (MultiXactIdIsValid(multi))
return true;
}
else
{
xid = HeapTupleHeaderGetRawXmax(tuple);
if (TransactionIdIsNormal(xid))
return true;
}
if (tuple->t_infomask & HEAP_MOVED)
{
xid = HeapTupleHeaderGetXvac(tuple);
if (TransactionIdIsNormal(xid))
return true;
}
return false;
}
/*
* heap_tuple_needs_freeze
*
......@@ -7205,7 +7254,7 @@ log_heap_freeze(Relation reln, Buffer buffer, TransactionId cutoff_xid,
*/
XLogRecPtr
log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer,
TransactionId cutoff_xid)
TransactionId cutoff_xid, uint8 vmflags)
{
xl_heap_visible xlrec;
XLogRecPtr recptr;
......@@ -7215,6 +7264,7 @@ log_heap_visible(RelFileNode rnode, Buffer heap_buffer, Buffer vm_buffer,
Assert(BufferIsValid(vm_buffer));
xlrec.cutoff_xid = cutoff_xid;
xlrec.flags = vmflags;
XLogBeginInsert();
XLogRegisterData((char *) &xlrec, SizeOfHeapVisible);
......@@ -7804,7 +7854,12 @@ heap_xlog_visible(XLogReaderState *record)
* the subsequent update won't be replayed to clear the flag.
*/
page = BufferGetPage(buffer);
if (xlrec->flags & VISIBILITYMAP_ALL_VISIBLE)
PageSetAllVisible(page);
if (xlrec->flags & VISIBILITYMAP_ALL_FROZEN)
PageSetAllFrozen(page);
MarkBufferDirty(buffer);
}
else if (action == BLK_RESTORED)
......@@ -7856,7 +7911,7 @@ heap_xlog_visible(XLogReaderState *record)
*/
if (lsn > PageGetLSN(vmpage))
visibilitymap_set(reln, blkno, InvalidBuffer, lsn, vmbuffer,
xlrec->cutoff_xid);
xlrec->cutoff_xid, xlrec->flags);
ReleaseBuffer(vmbuffer);
FreeFakeRelcacheEntry(reln);
......
This diff is collapsed.
......@@ -1920,7 +1920,7 @@ index_update_stats(Relation rel,
BlockNumber relallvisible;
if (rd_rel->relkind != RELKIND_INDEX)
relallvisible = visibilitymap_count(rel);
visibilitymap_count(rel, &relallvisible, NULL);
else /* don't bother for indexes */
relallvisible = 0;
......
......@@ -569,14 +569,20 @@ do_analyze_rel(Relation onerel, int options, VacuumParams *params,
* inherited stats.
*/
if (!inh)
{
BlockNumber relallvisible;
visibilitymap_count(onerel, &relallvisible, NULL);
vac_update_relstats(onerel,
relpages,
totalrows,
visibilitymap_count(onerel),
relallvisible,
hasindex,
InvalidTransactionId,
InvalidMultiXactId,
in_outer_xact);
}
/*
* Same for indexes. Vacuum always scans all indexes, so if we're part of
......
......@@ -157,7 +157,7 @@ static void lazy_record_dead_tuple(LVRelStats *vacrelstats,
static bool lazy_tid_reaped(ItemPointer itemptr, void *state);
static int vac_cmp_itemptr(const void *left, const void *right);
static bool heap_page_is_all_visible(Relation rel, Buffer buf,
TransactionId *visibility_cutoff_xid);
TransactionId *visibility_cutoff_xid, bool *all_frozen);
/*
......@@ -295,7 +295,7 @@ lazy_vacuum_rel(Relation onerel, int options, VacuumParams *params,
new_rel_tuples = vacrelstats->old_rel_tuples;
}
new_rel_allvisible = visibilitymap_count(onerel);
visibilitymap_count(onerel, &new_rel_allvisible, NULL);
if (new_rel_allvisible > new_rel_pages)
new_rel_allvisible = new_rel_pages;
......@@ -496,7 +496,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* maintain next_not_all_visible_block anyway, so as to set up the
* all_visible_according_to_vm flag correctly for each page.
*
* Note: The value returned by visibilitymap_test could be slightly
* Note: The value returned by visibilitymap_get_status could be slightly
* out-of-date, since we make this test before reading the corresponding
* heap page or locking the buffer. This is OK. If we mistakenly think
* that the page is all-visible when in fact the flag's just been cleared,
......@@ -518,7 +518,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
next_not_all_visible_block < nblocks;
next_not_all_visible_block++)
{
if (!visibilitymap_test(onerel, next_not_all_visible_block, &vmbuffer))
if (!VM_ALL_VISIBLE(onerel, next_not_all_visible_block, &vmbuffer))
break;
vacuum_delay_point();
}
......@@ -540,6 +540,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
Size freespace;
bool all_visible_according_to_vm;
bool all_visible;
bool all_frozen = true; /* provided all_visible is also true */
bool has_dead_tuples;
TransactionId visibility_cutoff_xid = InvalidTransactionId;
......@@ -554,8 +555,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
next_not_all_visible_block < nblocks;
next_not_all_visible_block++)
{
if (!visibilitymap_test(onerel, next_not_all_visible_block,
&vmbuffer))
if (!VM_ALL_VISIBLE(onerel, next_not_all_visible_block, &vmbuffer))
break;
vacuum_delay_point();
}
......@@ -743,7 +743,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
empty_pages++;
freespace = PageGetHeapFreeSpace(page);
/* empty pages are always all-visible */
/* empty pages are always all-visible and all-frozen */
if (!PageIsAllVisible(page))
{
START_CRIT_SECTION();
......@@ -766,8 +766,10 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
log_newpage_buffer(buf, true);
PageSetAllVisible(page);
PageSetAllFrozen(page);
visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
vmbuffer, InvalidTransactionId);
vmbuffer, InvalidTransactionId,
VISIBILITYMAP_ALL_VISIBLE | VISIBILITYMAP_ALL_FROZEN);
END_CRIT_SECTION();
}
......@@ -954,6 +956,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
if (heap_prepare_freeze_tuple(tuple.t_data, FreezeLimit,
MultiXactCutoff, &frozen[nfrozen]))
frozen[nfrozen++].offset = offnum;
else if (heap_tuple_needs_eventual_freeze(tuple.t_data))
all_frozen = false;
}
} /* scan along page */
......@@ -1018,6 +1022,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
/* mark page all-visible, if appropriate */
if (all_visible && !all_visible_according_to_vm)
{
uint8 flags = VISIBILITYMAP_ALL_VISIBLE;
/*
* It should never be the case that the visibility map page is set
* while the page-level bit is clear, but the reverse is allowed
......@@ -1032,9 +1038,14 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* rare cases after a crash, it is not worth optimizing.
*/
PageSetAllVisible(page);
if (all_frozen)
{
PageSetAllFrozen(page);
flags |= VISIBILITYMAP_ALL_FROZEN;
}
MarkBufferDirty(buf);
visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
vmbuffer, visibility_cutoff_xid);
vmbuffer, visibility_cutoff_xid, flags);
}
/*
......@@ -1045,7 +1056,7 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* that something bad has happened.
*/
else if (all_visible_according_to_vm && !PageIsAllVisible(page)
&& visibilitymap_test(onerel, blkno, &vmbuffer))
&& VM_ALL_VISIBLE(onerel, blkno, &vmbuffer))
{
elog(WARNING, "page is not marked all-visible but visibility map bit is set in relation \"%s\" page %u",
relname, blkno);
......@@ -1074,6 +1085,28 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
visibilitymap_clear(onerel, blkno, vmbuffer);
}
/*
* If the page is marked as all-visible but not all-frozen, we should
* so mark it. Note that all_frozen is only valid if all_visible is
* true, so we must check both.
*/
else if (all_visible_according_to_vm && all_visible && all_frozen &&
!VM_ALL_FROZEN(onerel, blkno, &vmbuffer))
{
/* Page is marked all-visible but should be all-frozen */
PageSetAllFrozen(page);
MarkBufferDirty(buf);
/*
* We can pass InvalidTransactionId as the cutoff XID here,
* because setting the all-frozen bit doesn't cause recovery
* conflicts.
*/
visibilitymap_set(onerel, blkno, buf, InvalidXLogRecPtr,
vmbuffer, InvalidTransactionId,
VISIBILITYMAP_ALL_FROZEN);
}
UnlockReleaseBuffer(buf);
/* Remember the location of the last page with nonremovable tuples */
......@@ -1257,6 +1290,7 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
OffsetNumber unused[MaxOffsetNumber];
int uncnt = 0;
TransactionId visibility_cutoff_xid;
bool all_frozen;
START_CRIT_SECTION();
......@@ -1308,19 +1342,34 @@ lazy_vacuum_page(Relation onerel, BlockNumber blkno, Buffer buffer,
* dirty, exclusively locked, and, if needed, a full page image has been
* emitted in the log_heap_clean() above.
*/
if (heap_page_is_all_visible(onerel, buffer, &visibility_cutoff_xid))
if (heap_page_is_all_visible(onerel, buffer, &visibility_cutoff_xid,
&all_frozen))
{
PageSetAllVisible(page);
if (all_frozen)
PageSetAllFrozen(page);
}
/*
* All the changes to the heap page have been done. If the all-visible
* flag is now set, also set the VM bit.
* flag is now set, also set the VM all-visible bit (and, if possible,
* the all-frozen bit) unless this has already been done previously.
*/
if (PageIsAllVisible(page) &&
!visibilitymap_test(onerel, blkno, vmbuffer))
if (PageIsAllVisible(page))
{
uint8 vm_status = visibilitymap_get_status(onerel, blkno, vmbuffer);
uint8 flags = 0;
/* Set the VM all-frozen bit to flag, if needed */
if ((vm_status & VISIBILITYMAP_ALL_VISIBLE) == 0)
flags |= VISIBILITYMAP_ALL_VISIBLE;
if ((vm_status & VISIBILITYMAP_ALL_FROZEN) == 0 && all_frozen)
flags |= VISIBILITYMAP_ALL_FROZEN;
Assert(BufferIsValid(*vmbuffer));
visibilitymap_set(onerel, blkno, buffer, InvalidXLogRecPtr, *vmbuffer,
visibility_cutoff_xid);
if (flags != 0)
visibilitymap_set(onerel, blkno, buffer, InvalidXLogRecPtr,
*vmbuffer, visibility_cutoff_xid, flags);
}
return tupindex;
......@@ -1842,10 +1891,13 @@ vac_cmp_itemptr(const void *left, const void *right)
/*
* Check if every tuple in the given page is visible to all current and future
* transactions. Also return the visibility_cutoff_xid which is the highest
* xmin amongst the visible tuples.
* xmin amongst the visible tuples. Set *all_frozen to true if every tuple
* on this page is frozen.
*/
static bool
heap_page_is_all_visible(Relation rel, Buffer buf, TransactionId *visibility_cutoff_xid)
heap_page_is_all_visible(Relation rel, Buffer buf,
TransactionId *visibility_cutoff_xid,
bool *all_frozen)
{
Page page = BufferGetPage(buf);
BlockNumber blockno = BufferGetBlockNumber(buf);
......@@ -1854,6 +1906,7 @@ heap_page_is_all_visible(Relation rel, Buffer buf, TransactionId *visibility_cut
bool all_visible = true;
*visibility_cutoff_xid = InvalidTransactionId;
*all_frozen = true;
/*
* This is a stripped down version of the line pointer scan in
......@@ -1918,6 +1971,11 @@ heap_page_is_all_visible(Relation rel, Buffer buf, TransactionId *visibility_cut
/* Track newest xmin on page. */
if (TransactionIdFollows(xmin, *visibility_cutoff_xid))
*visibility_cutoff_xid = xmin;
/* Check whether this tuple is already frozen or not */
if (all_visible && *all_frozen &&
heap_tuple_needs_eventual_freeze(tuple.t_data))
*all_frozen = false;
}
break;
......@@ -1934,5 +1992,14 @@ heap_page_is_all_visible(Relation rel, Buffer buf, TransactionId *visibility_cut
}
} /* scan along page */
/*
* We don't bother clearing *all_frozen when the page is discovered not
* to be all-visible, so do that now if necessary. The page might fail
* to be all-frozen for other reasons anyway, but if it's not all-visible,
* then it definitely isn't all-frozen.
*/
if (!all_visible)
*all_frozen = false;
return all_visible;
}
......@@ -85,9 +85,9 @@ IndexOnlyNext(IndexOnlyScanState *node)
* which all tuples are known visible to everybody. In any case,
* we'll use the index tuple not the heap tuple as the data source.
*
* Note on Memory Ordering Effects: visibilitymap_test does not lock
* the visibility map buffer, and therefore the result we read here
* could be slightly stale. However, it can't be stale enough to
* Note on Memory Ordering Effects: visibilitymap_get_status does not
* lock the visibility map buffer, and therefore the result we read
* here could be slightly stale. However, it can't be stale enough to
* matter.
*
* We need to detect clearing a VM bit due to an insert right away,
......@@ -114,7 +114,7 @@ IndexOnlyNext(IndexOnlyScanState *node)
* It's worth going through this complexity to avoid needing to lock
* the VM buffer, which could cause significant contention.
*/
if (!visibilitymap_test(scandesc->heapRelation,
if (!VM_ALL_VISIBLE(scandesc->heapRelation,
ItemPointerGetBlockNumber(tid),
&node->ioss_VMBuffer))
{
......
......@@ -270,8 +270,10 @@ ResolveRecoveryConflictWithSnapshot(TransactionId latestRemovedXid, RelFileNode
* If we get passed InvalidTransactionId then we are a little surprised,
* but it is theoretically possible in normal running. It also happens
* when replaying already applied WAL records after a standby crash or
* restart. If latestRemovedXid is invalid then there is no conflict. That
* rule applies across all record types that suffer from this conflict.
* restart, or when replaying an XLOG_HEAP2_VISIBLE record that marks as
* frozen a page which was already all-visible. If latestRemovedXid is
* invalid then there is no conflict. That rule applies across all record
* types that suffer from this conflict.
*/
if (!TransactionIdIsValid(latestRemovedXid))
return;
......
......@@ -170,6 +170,7 @@ extern bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
TransactionId cutoff_multi);
extern bool heap_tuple_needs_freeze(HeapTupleHeader tuple, TransactionId cutoff_xid,
MultiXactId cutoff_multi, Buffer buf);
extern bool heap_tuple_needs_eventual_freeze(HeapTupleHeader tuple);
extern Oid simple_heap_insert(Relation relation, HeapTuple tup);
extern void simple_heap_delete(Relation relation, ItemPointer tid);
......
......@@ -320,9 +320,10 @@ typedef struct xl_heap_freeze_page
typedef struct xl_heap_visible
{
TransactionId cutoff_xid;
uint8 flags;
} xl_heap_visible;
#define SizeOfHeapVisible (offsetof(xl_heap_visible, cutoff_xid) + sizeof(TransactionId))
#define SizeOfHeapVisible (offsetof(xl_heap_visible, flags) + sizeof(uint8))
typedef struct xl_heap_new_cid
{
......@@ -389,6 +390,6 @@ extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple,
extern void heap_execute_freeze_tuple(HeapTupleHeader tuple,
xl_heap_freeze_tuple *xlrec_tp);
extern XLogRecPtr log_heap_visible(RelFileNode rnode, Buffer heap_buffer,
Buffer vm_buffer, TransactionId cutoff_xid);
Buffer vm_buffer, TransactionId cutoff_xid, uint8 flags);
#endif /* HEAPAM_XLOG_H */
......@@ -19,15 +19,30 @@
#include "storage/buf.h"
#include "utils/relcache.h"
#define BITS_PER_HEAPBLOCK 2
#define HEAPBLOCKS_PER_BYTE (BITS_PER_BYTE / BITS_PER_HEAPBLOCK)
/* Flags for bit map */
#define VISIBILITYMAP_ALL_VISIBLE 0x01
#define VISIBILITYMAP_ALL_FROZEN 0x02
#define VISIBILITYMAP_VALID_BITS 0x03 /* OR of all valid visiblitymap flags bits */
/* Macros for visibilitymap test */
#define VM_ALL_VISIBLE(r, b, v) \
((visibilitymap_get_status((r), (b), (v)) & VISIBILITYMAP_ALL_VISIBLE) != 0)
#define VM_ALL_FROZEN(r, b, v) \
((visibilitymap_get_status((r), (b), (v)) & VISIBILITYMAP_ALL_FROZEN) != 0)
extern void visibilitymap_clear(Relation rel, BlockNumber heapBlk,
Buffer vmbuf);
extern void visibilitymap_pin(Relation rel, BlockNumber heapBlk,
Buffer *vmbuf);
extern bool visibilitymap_pin_ok(BlockNumber heapBlk, Buffer vmbuf);
extern void visibilitymap_set(Relation rel, BlockNumber heapBlk, Buffer heapBuf,
XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid);
extern bool visibilitymap_test(Relation rel, BlockNumber heapBlk, Buffer *vmbuf);
extern BlockNumber visibilitymap_count(Relation rel);
XLogRecPtr recptr, Buffer vmBuf, TransactionId cutoff_xid,
uint8 flags);
extern uint8 visibilitymap_get_status(Relation rel, BlockNumber heapBlk, Buffer *vmbuf);
extern void visibilitymap_count(Relation rel, BlockNumber *all_visible, BlockNumber *all_frozen);
extern void visibilitymap_truncate(Relation rel, BlockNumber nheapblocks);
#endif /* VISIBILITYMAP_H */
......@@ -53,6 +53,6 @@
*/
/* yyyymmddN */
#define CATALOG_VERSION_NO 201602221
#define CATALOG_VERSION_NO 201603011
#endif
......@@ -178,8 +178,10 @@ typedef PageHeaderData *PageHeader;
* tuple? */
#define PD_ALL_VISIBLE 0x0004 /* all tuples on page are visible to
* everyone */
#define PD_ALL_FROZEN 0x0008 /* all tuples on page are completely
frozen */
#define PD_VALID_FLAG_BITS 0x0007 /* OR of all valid pd_flags bits */
#define PD_VALID_FLAG_BITS 0x000F /* OR of all valid pd_flags bits */
/*
* Page layout version number 0 is for pre-7.3 Postgres releases.
......@@ -367,7 +369,12 @@ typedef PageHeaderData *PageHeader;
#define PageSetAllVisible(page) \
(((PageHeader) (page))->pd_flags |= PD_ALL_VISIBLE)
#define PageClearAllVisible(page) \
(((PageHeader) (page))->pd_flags &= ~PD_ALL_VISIBLE)
(((PageHeader) (page))->pd_flags &= ~(PD_ALL_VISIBLE | PD_ALL_FROZEN))
#define PageIsAllFrozen(page) \
(((PageHeader) (page))->pd_flags & PD_ALL_FROZEN)
#define PageSetAllFrozen(page) \
(((PageHeader) (page))->pd_flags |= PD_ALL_FROZEN)
#define PageIsPrunable(page, oldestxmin) \
( \
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment