Commit a9ce839a authored by Peter Geoghegan's avatar Peter Geoghegan

Sanitize line pointers within contrib/amcheck.

Adopt a more defensive approach to accessing index tuples in
contrib/amcheck: verify that each line pointer looks sane before
accessing associated tuple using pointer arithmetic based on line
pointer's offset.  This avoids undefined behavior and assertion failures
in cases where line pointers are corrupt.

Issue spotted following a complaint about an assertion failure by
Grigory Smolkin, which involved a test harness that deliberately
corrupts indexes.

This is arguably a bugfix, but no backpatch given the lack of field
reports beyond Grigory's.

Discussion: https://postgr.es/m/CAH2-WzmkurhCqnyLHxk0VkOZqd49+ZZsp1xAJOg7j2x7dmp_XQ@mail.gmail.com
parent 05b38c7e
...@@ -155,11 +155,14 @@ static inline bool invariant_g_offset(BtreeCheckState *state, BTScanInsert key, ...@@ -155,11 +155,14 @@ static inline bool invariant_g_offset(BtreeCheckState *state, BTScanInsert key,
OffsetNumber lowerbound); OffsetNumber lowerbound);
static inline bool invariant_l_nontarget_offset(BtreeCheckState *state, static inline bool invariant_l_nontarget_offset(BtreeCheckState *state,
BTScanInsert key, BTScanInsert key,
BlockNumber nontargetblock,
Page nontarget, Page nontarget,
OffsetNumber upperbound); OffsetNumber upperbound);
static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum); static Page palloc_btree_page(BtreeCheckState *state, BlockNumber blocknum);
static inline BTScanInsert bt_mkscankey_pivotsearch(Relation rel, static inline BTScanInsert bt_mkscankey_pivotsearch(Relation rel,
IndexTuple itup); IndexTuple itup);
static ItemId PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block,
Page page, OffsetNumber offset);
static inline ItemPointer BTreeTupleGetHeapTIDCareful(BtreeCheckState *state, static inline ItemPointer BTreeTupleGetHeapTIDCareful(BtreeCheckState *state,
IndexTuple itup, bool nonpivot); IndexTuple itup, bool nonpivot);
...@@ -710,7 +713,9 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level) ...@@ -710,7 +713,9 @@ bt_check_level_from_leftmost(BtreeCheckState *state, BtreeLevel level)
ItemId itemid; ItemId itemid;
/* Internal page -- downlink gets leftmost on next level */ /* Internal page -- downlink gets leftmost on next level */
itemid = PageGetItemId(state->target, P_FIRSTDATAKEY(opaque)); itemid = PageGetItemIdCareful(state, state->targetblock,
state->target,
P_FIRSTDATAKEY(opaque));
itup = (IndexTuple) PageGetItem(state->target, itemid); itup = (IndexTuple) PageGetItem(state->target, itemid);
nextleveldown.leftmost = BTreeInnerTupleGetDownLink(itup); nextleveldown.leftmost = BTreeInnerTupleGetDownLink(itup);
nextleveldown.level = opaque->btpo.level - 1; nextleveldown.level = opaque->btpo.level - 1;
...@@ -837,16 +842,18 @@ bt_target_page_check(BtreeCheckState *state) ...@@ -837,16 +842,18 @@ bt_target_page_check(BtreeCheckState *state)
* Check the number of attributes in high key. Note, rightmost page * Check the number of attributes in high key. Note, rightmost page
* doesn't contain a high key, so nothing to check * doesn't contain a high key, so nothing to check
*/ */
if (!P_RIGHTMOST(topaque) && if (!P_RIGHTMOST(topaque))
!_bt_check_natts(state->rel, state->heapkeyspace, state->target,
P_HIKEY))
{ {
ItemId itemid; ItemId itemid;
IndexTuple itup; IndexTuple itup;
itemid = PageGetItemId(state->target, P_HIKEY); /* Verify line pointer before checking tuple */
itemid = PageGetItemIdCareful(state, state->targetblock,
state->target, P_HIKEY);
if (!_bt_check_natts(state->rel, state->heapkeyspace, state->target,
P_HIKEY))
{
itup = (IndexTuple) PageGetItem(state->target, itemid); itup = (IndexTuple) PageGetItem(state->target, itemid);
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED), (errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("wrong number of high key index tuple attributes in index \"%s\"", errmsg("wrong number of high key index tuple attributes in index \"%s\"",
...@@ -858,6 +865,7 @@ bt_target_page_check(BtreeCheckState *state) ...@@ -858,6 +865,7 @@ bt_target_page_check(BtreeCheckState *state)
(uint32) (state->targetlsn >> 32), (uint32) (state->targetlsn >> 32),
(uint32) state->targetlsn))); (uint32) state->targetlsn)));
} }
}
/* /*
* Loop over page items, starting from first non-highkey item, not high * Loop over page items, starting from first non-highkey item, not high
...@@ -876,7 +884,8 @@ bt_target_page_check(BtreeCheckState *state) ...@@ -876,7 +884,8 @@ bt_target_page_check(BtreeCheckState *state)
CHECK_FOR_INTERRUPTS(); CHECK_FOR_INTERRUPTS();
itemid = PageGetItemId(state->target, offset); itemid = PageGetItemIdCareful(state, state->targetblock,
state->target, offset);
itup = (IndexTuple) PageGetItem(state->target, itemid); itup = (IndexTuple) PageGetItem(state->target, itemid);
tupsize = IndexTupleSize(itup); tupsize = IndexTupleSize(itup);
...@@ -1121,7 +1130,9 @@ bt_target_page_check(BtreeCheckState *state) ...@@ -1121,7 +1130,9 @@ bt_target_page_check(BtreeCheckState *state)
OffsetNumberNext(offset)); OffsetNumberNext(offset));
/* Reuse itup to get pointed-to heap location of second item */ /* Reuse itup to get pointed-to heap location of second item */
itemid = PageGetItemId(state->target, OffsetNumberNext(offset)); itemid = PageGetItemIdCareful(state, state->targetblock,
state->target,
OffsetNumberNext(offset));
itup = (IndexTuple) PageGetItem(state->target, itemid); itup = (IndexTuple) PageGetItem(state->target, itemid);
nhtid = psprintf("(%u,%u)", nhtid = psprintf("(%u,%u)",
ItemPointerGetBlockNumberNoCheck(&(itup->t_tid)), ItemPointerGetBlockNumberNoCheck(&(itup->t_tid)),
...@@ -1406,7 +1417,8 @@ bt_right_page_check_scankey(BtreeCheckState *state) ...@@ -1406,7 +1417,8 @@ bt_right_page_check_scankey(BtreeCheckState *state)
if (P_ISLEAF(opaque) && nline >= P_FIRSTDATAKEY(opaque)) if (P_ISLEAF(opaque) && nline >= P_FIRSTDATAKEY(opaque))
{ {
/* Return first data item (if any) */ /* Return first data item (if any) */
rightitem = PageGetItemId(rightpage, P_FIRSTDATAKEY(opaque)); rightitem = PageGetItemIdCareful(state, targetnext, rightpage,
P_FIRSTDATAKEY(opaque));
} }
else if (!P_ISLEAF(opaque) && else if (!P_ISLEAF(opaque) &&
nline >= OffsetNumberNext(P_FIRSTDATAKEY(opaque))) nline >= OffsetNumberNext(P_FIRSTDATAKEY(opaque)))
...@@ -1415,7 +1427,7 @@ bt_right_page_check_scankey(BtreeCheckState *state) ...@@ -1415,7 +1427,7 @@ bt_right_page_check_scankey(BtreeCheckState *state)
* Return first item after the internal page's "negative infinity" * Return first item after the internal page's "negative infinity"
* item * item
*/ */
rightitem = PageGetItemId(rightpage, rightitem = PageGetItemIdCareful(state, targetnext, rightpage,
OffsetNumberNext(P_FIRSTDATAKEY(opaque))); OffsetNumberNext(P_FIRSTDATAKEY(opaque)));
} }
else else
...@@ -1576,7 +1588,8 @@ bt_downlink_check(BtreeCheckState *state, BTScanInsert targetkey, ...@@ -1576,7 +1588,8 @@ bt_downlink_check(BtreeCheckState *state, BTScanInsert targetkey,
if (offset_is_negative_infinity(copaque, offset)) if (offset_is_negative_infinity(copaque, offset))
continue; continue;
if (!invariant_l_nontarget_offset(state, targetkey, child, offset)) if (!invariant_l_nontarget_offset(state, targetkey, childblock, child,
offset))
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED), (errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("down-link lower bound invariant violated for index \"%s\"", errmsg("down-link lower bound invariant violated for index \"%s\"",
...@@ -1687,7 +1700,8 @@ bt_downlink_missing_check(BtreeCheckState *state) ...@@ -1687,7 +1700,8 @@ bt_downlink_missing_check(BtreeCheckState *state)
RelationGetRelationName(state->rel)); RelationGetRelationName(state->rel));
level = topaque->btpo.level; level = topaque->btpo.level;
itemid = PageGetItemId(state->target, P_FIRSTDATAKEY(topaque)); itemid = PageGetItemIdCareful(state, state->targetblock, state->target,
P_FIRSTDATAKEY(topaque));
itup = (IndexTuple) PageGetItem(state->target, itemid); itup = (IndexTuple) PageGetItem(state->target, itemid);
childblk = BTreeInnerTupleGetDownLink(itup); childblk = BTreeInnerTupleGetDownLink(itup);
for (;;) for (;;)
...@@ -1711,7 +1725,8 @@ bt_downlink_missing_check(BtreeCheckState *state) ...@@ -1711,7 +1725,8 @@ bt_downlink_missing_check(BtreeCheckState *state)
level - 1, copaque->btpo.level))); level - 1, copaque->btpo.level)));
level = copaque->btpo.level; level = copaque->btpo.level;
itemid = PageGetItemId(child, P_FIRSTDATAKEY(copaque)); itemid = PageGetItemIdCareful(state, childblk, child,
P_FIRSTDATAKEY(copaque));
itup = (IndexTuple) PageGetItem(child, itemid); itup = (IndexTuple) PageGetItem(child, itemid);
childblk = BTreeInnerTupleGetDownLink(itup); childblk = BTreeInnerTupleGetDownLink(itup);
/* Be slightly more pro-active in freeing this memory, just in case */ /* Be slightly more pro-active in freeing this memory, just in case */
...@@ -1760,7 +1775,7 @@ bt_downlink_missing_check(BtreeCheckState *state) ...@@ -1760,7 +1775,7 @@ bt_downlink_missing_check(BtreeCheckState *state)
*/ */
if (P_ISHALFDEAD(copaque) && !P_RIGHTMOST(copaque)) if (P_ISHALFDEAD(copaque) && !P_RIGHTMOST(copaque))
{ {
itemid = PageGetItemId(child, P_HIKEY); itemid = PageGetItemIdCareful(state, childblk, child, P_HIKEY);
itup = (IndexTuple) PageGetItem(child, itemid); itup = (IndexTuple) PageGetItem(child, itemid);
if (BTreeTupleGetTopParent(itup) == state->targetblock) if (BTreeTupleGetTopParent(itup) == state->targetblock)
return; return;
...@@ -2087,6 +2102,8 @@ offset_is_negative_infinity(BTPageOpaque opaque, OffsetNumber offset) ...@@ -2087,6 +2102,8 @@ offset_is_negative_infinity(BTPageOpaque opaque, OffsetNumber offset)
* Does the invariant hold that the key is strictly less than a given upper * Does the invariant hold that the key is strictly less than a given upper
* bound offset item? * bound offset item?
* *
* Verifies line pointer on behalf of caller.
*
* If this function returns false, convention is that caller throws error due * If this function returns false, convention is that caller throws error due
* to corruption. * to corruption.
*/ */
...@@ -2094,10 +2111,14 @@ static inline bool ...@@ -2094,10 +2111,14 @@ static inline bool
invariant_l_offset(BtreeCheckState *state, BTScanInsert key, invariant_l_offset(BtreeCheckState *state, BTScanInsert key,
OffsetNumber upperbound) OffsetNumber upperbound)
{ {
ItemId itemid;
int32 cmp; int32 cmp;
Assert(key->pivotsearch); Assert(key->pivotsearch);
/* Verify line pointer before checking tuple */
itemid = PageGetItemIdCareful(state, state->targetblock, state->target,
upperbound);
/* pg_upgrade'd indexes may legally have equal sibling tuples */ /* pg_upgrade'd indexes may legally have equal sibling tuples */
if (!key->heapkeyspace) if (!key->heapkeyspace)
return invariant_leq_offset(state, key, upperbound); return invariant_leq_offset(state, key, upperbound);
...@@ -2116,13 +2137,11 @@ invariant_l_offset(BtreeCheckState *state, BTScanInsert key, ...@@ -2116,13 +2137,11 @@ invariant_l_offset(BtreeCheckState *state, BTScanInsert key,
if (cmp == 0) if (cmp == 0)
{ {
BTPageOpaque topaque; BTPageOpaque topaque;
ItemId itemid;
IndexTuple ritup; IndexTuple ritup;
int uppnkeyatts; int uppnkeyatts;
ItemPointer rheaptid; ItemPointer rheaptid;
bool nonpivot; bool nonpivot;
itemid = PageGetItemId(state->target, upperbound);
ritup = (IndexTuple) PageGetItem(state->target, itemid); ritup = (IndexTuple) PageGetItem(state->target, itemid);
topaque = (BTPageOpaque) PageGetSpecialPointer(state->target); topaque = (BTPageOpaque) PageGetSpecialPointer(state->target);
nonpivot = P_ISLEAF(topaque) && upperbound >= P_FIRSTDATAKEY(topaque); nonpivot = P_ISLEAF(topaque) && upperbound >= P_FIRSTDATAKEY(topaque);
...@@ -2145,6 +2164,9 @@ invariant_l_offset(BtreeCheckState *state, BTScanInsert key, ...@@ -2145,6 +2164,9 @@ invariant_l_offset(BtreeCheckState *state, BTScanInsert key,
* Does the invariant hold that the key is less than or equal to a given upper * Does the invariant hold that the key is less than or equal to a given upper
* bound offset item? * bound offset item?
* *
* Caller should have verified that upperbound's item pointer is consistent
* using PageGetItemIdCareful() call.
*
* If this function returns false, convention is that caller throws error due * If this function returns false, convention is that caller throws error due
* to corruption. * to corruption.
*/ */
...@@ -2165,6 +2187,9 @@ invariant_leq_offset(BtreeCheckState *state, BTScanInsert key, ...@@ -2165,6 +2187,9 @@ invariant_leq_offset(BtreeCheckState *state, BTScanInsert key,
* Does the invariant hold that the key is strictly greater than a given lower * Does the invariant hold that the key is strictly greater than a given lower
* bound offset item? * bound offset item?
* *
* Caller should have verified that lowerbound's item pointer is consistent
* using PageGetItemIdCareful() call.
*
* If this function returns false, convention is that caller throws error due * If this function returns false, convention is that caller throws error due
* to corruption. * to corruption.
*/ */
...@@ -2199,19 +2224,24 @@ invariant_g_offset(BtreeCheckState *state, BTScanInsert key, ...@@ -2199,19 +2224,24 @@ invariant_g_offset(BtreeCheckState *state, BTScanInsert key,
* *
* Caller's non-target page is a child page of the target, checked as part of * Caller's non-target page is a child page of the target, checked as part of
* checking a property of the target page (i.e. the key comes from the * checking a property of the target page (i.e. the key comes from the
* target). * target). Verifies line pointer on behalf of caller.
* *
* If this function returns false, convention is that caller throws error due * If this function returns false, convention is that caller throws error due
* to corruption. * to corruption.
*/ */
static inline bool static inline bool
invariant_l_nontarget_offset(BtreeCheckState *state, BTScanInsert key, invariant_l_nontarget_offset(BtreeCheckState *state, BTScanInsert key,
Page nontarget, OffsetNumber upperbound) BlockNumber nontargetblock, Page nontarget,
OffsetNumber upperbound)
{ {
ItemId itemid;
int32 cmp; int32 cmp;
Assert(key->pivotsearch); Assert(key->pivotsearch);
/* Verify line pointer before checking tuple */
itemid = PageGetItemIdCareful(state, nontargetblock, nontarget,
upperbound);
cmp = _bt_compare(state->rel, key, nontarget, upperbound); cmp = _bt_compare(state->rel, key, nontarget, upperbound);
/* pg_upgrade'd indexes may legally have equal sibling tuples */ /* pg_upgrade'd indexes may legally have equal sibling tuples */
...@@ -2221,14 +2251,12 @@ invariant_l_nontarget_offset(BtreeCheckState *state, BTScanInsert key, ...@@ -2221,14 +2251,12 @@ invariant_l_nontarget_offset(BtreeCheckState *state, BTScanInsert key,
/* See invariant_l_offset() for an explanation of this extra step */ /* See invariant_l_offset() for an explanation of this extra step */
if (cmp == 0) if (cmp == 0)
{ {
ItemId itemid;
IndexTuple child; IndexTuple child;
int uppnkeyatts; int uppnkeyatts;
ItemPointer childheaptid; ItemPointer childheaptid;
BTPageOpaque copaque; BTPageOpaque copaque;
bool nonpivot; bool nonpivot;
itemid = PageGetItemId(nontarget, upperbound);
child = (IndexTuple) PageGetItem(nontarget, itemid); child = (IndexTuple) PageGetItem(nontarget, itemid);
copaque = (BTPageOpaque) PageGetSpecialPointer(nontarget); copaque = (BTPageOpaque) PageGetSpecialPointer(nontarget);
nonpivot = P_ISLEAF(copaque) && upperbound >= P_FIRSTDATAKEY(copaque); nonpivot = P_ISLEAF(copaque) && upperbound >= P_FIRSTDATAKEY(copaque);
...@@ -2426,6 +2454,55 @@ bt_mkscankey_pivotsearch(Relation rel, IndexTuple itup) ...@@ -2426,6 +2454,55 @@ bt_mkscankey_pivotsearch(Relation rel, IndexTuple itup)
return skey; return skey;
} }
/*
* PageGetItemId() wrapper that validates returned line pointer.
*
* Buffer page/page item access macros generally trust that line pointers are
* not corrupt, which might cause problems for verification itself. For
* example, there is no bounds checking in PageGetItem(). Passing it a
* corrupt line pointer can cause it to return a tuple/pointer that is unsafe
* to dereference.
*
* Validating line pointers before tuples avoids undefined behavior and
* assertion failures with corrupt indexes, making the verification process
* more robust and predictable.
*/
static ItemId
PageGetItemIdCareful(BtreeCheckState *state, BlockNumber block, Page page,
OffsetNumber offset)
{
ItemId itemid = PageGetItemId(page, offset);
if (ItemIdGetOffset(itemid) + ItemIdGetLength(itemid) >
BLCKSZ - sizeof(BTPageOpaqueData))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("line pointer points past end of tuple space in index \"%s\"",
RelationGetRelationName(state->rel)),
errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.",
block, offset, ItemIdGetOffset(itemid),
ItemIdGetLength(itemid),
ItemIdGetFlags(itemid))));
/*
* Verify that line pointer isn't LP_REDIRECT or LP_UNUSED, since nbtree
* never uses either. Verify that line pointer has storage, too, since
* even LP_DEAD items should within nbtree.
*/
if (ItemIdIsRedirected(itemid) || !ItemIdIsUsed(itemid) ||
ItemIdGetLength(itemid) == 0)
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("invalid line pointer storage in index \"%s\"",
RelationGetRelationName(state->rel)),
errdetail_internal("Index tid=(%u,%u) lp_off=%u, lp_len=%u lp_flags=%u.",
block, offset, ItemIdGetOffset(itemid),
ItemIdGetLength(itemid),
ItemIdGetFlags(itemid))));
return itemid;
}
/* /*
* BTreeTupleGetHeapTID() wrapper that lets caller enforce that a heap TID must * BTreeTupleGetHeapTID() wrapper that lets caller enforce that a heap TID must
* be present in cases where that is mandatory. * be present in cases where that is mandatory.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment