Commit 38e9f90a authored by Robert Haas's avatar Robert Haas

Fix lazy_scan_heap so that it won't mark pages all-frozen too soon.

Commit a892234f added a new bit per
page to the visibility map fork indicating whether the page is
all-frozen, but incorrectly assumed that if lazy_scan_heap chose to
freeze a tuple then that tuple would not need to later be frozen
again. This turns out to be false, because xmin and xmax (and
conceivably xvac, if dealing with tuples from very old releases) could
be frozen at separate times.

Thanks to Andres Freund for help in uncovering and tracking down this
issue.
parent c7a25c24
......@@ -6377,7 +6377,9 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
* are older than the specified cutoff XID and cutoff MultiXactId. If so,
* setup enough state (in the *frz output argument) to later execute and
* WAL-log what we would need to do, and return TRUE. Return FALSE if nothing
* is to be changed.
* is to be changed. In addition, set *totally_frozen_p to true if the tuple
* will be totally frozen after these operations are performed and false if
* more freezing will eventually be required.
*
* Caller is responsible for setting the offset field, if appropriate.
*
......@@ -6402,12 +6404,12 @@ FreezeMultiXactId(MultiXactId multi, uint16 t_infomask,
bool
heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
TransactionId cutoff_multi,
xl_heap_freeze_tuple *frz)
xl_heap_freeze_tuple *frz, bool *totally_frozen_p)
{
bool changed = false;
bool freeze_xmax = false;
TransactionId xid;
bool totally_frozen = true;
frz->frzflags = 0;
frz->t_infomask2 = tuple->t_infomask2;
......@@ -6416,12 +6418,16 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
/* Process xmin */
xid = HeapTupleHeaderGetXmin(tuple);
if (TransactionIdIsNormal(xid) &&
TransactionIdPrecedes(xid, cutoff_xid))
if (TransactionIdIsNormal(xid))
{
if (TransactionIdPrecedes(xid, cutoff_xid))
{
frz->t_infomask |= HEAP_XMIN_FROZEN;
changed = true;
}
else
totally_frozen = false;
}
/*
* Process xmax. To thoroughly examine the current Xmax value we need to
......@@ -6458,6 +6464,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
if (flags & FRM_MARK_COMMITTED)
frz->t_infomask &= HEAP_XMAX_COMMITTED;
changed = true;
totally_frozen = false;
}
else if (flags & FRM_RETURN_IS_MULTI)
{
......@@ -6479,16 +6486,19 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
frz->xmax = newxmax;
changed = true;
totally_frozen = false;
}
else
{
Assert(flags & FRM_NOOP);
}
}
else if (TransactionIdIsNormal(xid) &&
TransactionIdPrecedes(xid, cutoff_xid))
else if (TransactionIdIsNormal(xid))
{
if (TransactionIdPrecedes(xid, cutoff_xid))
freeze_xmax = true;
else
totally_frozen = false;
}
if (freeze_xmax)
......@@ -6514,8 +6524,15 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
if (tuple->t_infomask & HEAP_MOVED)
{
xid = HeapTupleHeaderGetXvac(tuple);
if (TransactionIdIsNormal(xid) &&
TransactionIdPrecedes(xid, cutoff_xid))
/*
* For Xvac, we ignore the cutoff_xid and just always perform the
* freeze operation. The oldest release in which such a value can
* actually be set is PostgreSQL 8.4, because old-style VACUUM FULL
* was removed in PostgreSQL 9.0. Note that if we were to respect
* cutoff_xid here, we'd need to make surely to clear totally_frozen
* when we skipped freezing on that basis.
*/
if (TransactionIdIsNormal(xid))
{
/*
* If a MOVED_OFF tuple is not dead, the xvac transaction must
......@@ -6537,6 +6554,7 @@ heap_prepare_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
}
}
*totally_frozen_p = totally_frozen;
return changed;
}
......@@ -6587,9 +6605,10 @@ heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid,
{
xl_heap_freeze_tuple frz;
bool do_freeze;
bool tuple_totally_frozen;
do_freeze = heap_prepare_freeze_tuple(tuple, cutoff_xid, cutoff_multi,
&frz);
&frz, &tuple_totally_frozen);
/*
* Note that because this is not a WAL-logged operation, we don't need to
......
......@@ -1054,6 +1054,8 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
}
else
{
bool tuple_totally_frozen;
num_tuples += 1;
hastup = true;
......@@ -1062,9 +1064,11 @@ lazy_scan_heap(Relation onerel, LVRelStats *vacrelstats,
* freezing. Note we already have exclusive buffer lock.
*/
if (heap_prepare_freeze_tuple(tuple.t_data, FreezeLimit,
MultiXactCutoff, &frozen[nfrozen]))
MultiXactCutoff, &frozen[nfrozen],
&tuple_totally_frozen))
frozen[nfrozen++].offset = offnum;
else if (heap_tuple_needs_eventual_freeze(tuple.t_data))
if (!tuple_totally_frozen)
all_frozen = false;
}
} /* scan along page */
......
......@@ -386,7 +386,8 @@ extern XLogRecPtr log_heap_freeze(Relation reln, Buffer buffer,
extern bool heap_prepare_freeze_tuple(HeapTupleHeader tuple,
TransactionId cutoff_xid,
TransactionId cutoff_multi,
xl_heap_freeze_tuple *frz);
xl_heap_freeze_tuple *frz,
bool *totally_frozen);
extern void heap_execute_freeze_tuple(HeapTupleHeader tuple,
xl_heap_freeze_tuple *xlrec_tp);
extern XLogRecPtr log_heap_visible(RelFileNode rnode, Buffer heap_buffer,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment