Commit 9da50e1f authored by Bruce Momjian's avatar Bruce Momjian

Back out unindented modification to file.

parent cdc84adb
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.293 2004/10/07 14:15:50 momjian Exp $ * $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.294 2004/10/07 14:19:58 momjian Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
...@@ -240,27 +240,20 @@ vacuum(VacuumStmt *vacstmt) ...@@ -240,27 +240,20 @@ vacuum(VacuumStmt *vacstmt)
if (vacstmt->verbose) if (vacstmt->verbose)
elevel = INFO; elevel = INFO;
else else
/* bjm comment */
elevel = DEBUG2; elevel = DEBUG2;
if (1 == 0)
func();
else
/* bjm comment */
{
elevel = DEBUG2;
}
/* /*
* We cannot run VACUUM inside a user transaction block; if we were inside * We cannot run VACUUM inside a user transaction block; if we were
* a transaction, then our commit- and start-transaction-command calls * inside a transaction, then our commit- and
* would not have the intended effect! Furthermore, the forced commit that * start-transaction-command calls would not have the intended effect!
* occurs before truncating the relation's file would have the effect of * Furthermore, the forced commit that occurs before truncating the
* committing the rest of the user's transaction too, which would * relation's file would have the effect of committing the rest of the
* certainly not be the desired behavior. (This only applies to VACUUM * user's transaction too, which would certainly not be the desired
* FULL, though. We could in theory run lazy VACUUM inside a transaction * behavior. (This only applies to VACUUM FULL, though. We could in
* block, but we choose to disallow that case because we'd rather commit * theory run lazy VACUUM inside a transaction block, but we choose to
* as soon as possible after finishing the vacuum. This is mainly so * disallow that case because we'd rather commit as soon as possible
* that we can let go the AccessExclusiveLock that we may be holding.) * after finishing the vacuum. This is mainly so that we can let go
* the AccessExclusiveLock that we may be holding.)
* *
* ANALYZE (without VACUUM) can run either way. * ANALYZE (without VACUUM) can run either way.
*/ */
...@@ -272,15 +265,18 @@ vacuum(VacuumStmt *vacstmt) ...@@ -272,15 +265,18 @@ vacuum(VacuumStmt *vacstmt)
else else
in_outer_xact = IsInTransactionChain((void *) vacstmt); in_outer_xact = IsInTransactionChain((void *) vacstmt);
/* Send info about dead objects to the statistics collector */ /*
* Send info about dead objects to the statistics collector
*/
if (vacstmt->vacuum) if (vacstmt->vacuum)
pgstat_vacuum_tabstat(); pgstat_vacuum_tabstat();
/* /*
* Create special memory context for cross-transaction storage. * Create special memory context for cross-transaction storage.
* *
* Since it is a child of PortalContext, it will go away eventually even if * Since it is a child of PortalContext, it will go away eventually even
* we suffer an error; there's no need for special abort cleanup logic. * if we suffer an error; there's no need for special abort cleanup
* logic.
*/ */
vac_context = AllocSetContextCreate(PortalContext, vac_context = AllocSetContextCreate(PortalContext,
"Vacuum", "Vacuum",
...@@ -299,21 +295,21 @@ vacuum(VacuumStmt *vacstmt) ...@@ -299,21 +295,21 @@ vacuum(VacuumStmt *vacstmt)
/* /*
* It's a database-wide VACUUM. * It's a database-wide VACUUM.
* *
* Compute the initially applicable OldestXmin and FreezeLimit XIDs, so * Compute the initially applicable OldestXmin and FreezeLimit XIDs,
* that we can record these values at the end of the VACUUM. Note that * so that we can record these values at the end of the VACUUM.
* individual tables may well be processed with newer values, but we * Note that individual tables may well be processed with newer
* can guarantee that no (non-shared) relations are processed with * values, but we can guarantee that no (non-shared) relations are
* older ones. * processed with older ones.
* *
* It is okay to record non-shared values in pg_database, even though we * It is okay to record non-shared values in pg_database, even though
* may vacuum shared relations with older cutoffs, because only the * we may vacuum shared relations with older cutoffs, because only
* minimum of the values present in pg_database matters. We can be * the minimum of the values present in pg_database matters. We
* sure that shared relations have at some time been vacuumed with * can be sure that shared relations have at some time been
* cutoffs no worse than the global minimum; for, if there is a * vacuumed with cutoffs no worse than the global minimum; for, if
* backend in some other DB with xmin = OLDXMIN that's determining the * there is a backend in some other DB with xmin = OLDXMIN that's
* cutoff with which we vacuum shared relations, it is not possible * determining the cutoff with which we vacuum shared relations,
* for that database to have a cutoff newer than OLDXMIN recorded in * it is not possible for that database to have a cutoff newer
* pg_database. * than OLDXMIN recorded in pg_database.
*/ */
vacuum_set_xid_limits(vacstmt, false, vacuum_set_xid_limits(vacstmt, false,
&initialOldestXmin, &initialOldestXmin,
...@@ -323,15 +319,16 @@ vacuum(VacuumStmt *vacstmt) ...@@ -323,15 +319,16 @@ vacuum(VacuumStmt *vacstmt)
/* /*
* Decide whether we need to start/commit our own transactions. * Decide whether we need to start/commit our own transactions.
* *
* For VACUUM (with or without ANALYZE): always do so, so that we can release * For VACUUM (with or without ANALYZE): always do so, so that we can
* locks as soon as possible. (We could possibly use the outer * release locks as soon as possible. (We could possibly use the
* transaction for a one-table VACUUM, but handling TOAST tables would be * outer transaction for a one-table VACUUM, but handling TOAST tables
* problematic.) * would be problematic.)
* *
* For ANALYZE (no VACUUM): if inside a transaction block, we cannot * For ANALYZE (no VACUUM): if inside a transaction block, we cannot
* start/commit our own transactions. Also, there's no need to do so if * start/commit our own transactions. Also, there's no need to do so
* only processing one relation. For multiple relations when not within a * if only processing one relation. For multiple relations when not
* transaction block, use own transactions so we can release locks sooner. * within a transaction block, use own transactions so we can release
* locks sooner.
*/ */
if (vacstmt->vacuum) if (vacstmt->vacuum)
use_own_xacts = true; use_own_xacts = true;
...@@ -347,8 +344,8 @@ vacuum(VacuumStmt *vacstmt) ...@@ -347,8 +344,8 @@ vacuum(VacuumStmt *vacstmt)
} }
/* /*
* If we are running ANALYZE without per-table transactions, we'll need a * If we are running ANALYZE without per-table transactions, we'll
* memory context with table lifetime. * need a memory context with table lifetime.
*/ */
if (!use_own_xacts) if (!use_own_xacts)
anl_context = AllocSetContextCreate(PortalContext, anl_context = AllocSetContextCreate(PortalContext,
...@@ -358,12 +355,12 @@ vacuum(VacuumStmt *vacstmt) ...@@ -358,12 +355,12 @@ vacuum(VacuumStmt *vacstmt)
ALLOCSET_DEFAULT_MAXSIZE); ALLOCSET_DEFAULT_MAXSIZE);
/* /*
* vacuum_rel expects to be entered with no transaction active; it will * vacuum_rel expects to be entered with no transaction active; it
* start and commit its own transaction. But we are called by an SQL * will start and commit its own transaction. But we are called by an
* command, and so we are executing inside a transaction already. We * SQL command, and so we are executing inside a transaction already.
* commit the transaction started in PostgresMain() here, and start * We commit the transaction started in PostgresMain() here, and start
* another one before exiting to match the commit waiting for us back in * another one before exiting to match the commit waiting for us back
* PostgresMain(). * in PostgresMain().
*/ */
if (use_own_xacts) if (use_own_xacts)
{ {
...@@ -379,7 +376,9 @@ vacuum(VacuumStmt *vacstmt) ...@@ -379,7 +376,9 @@ vacuum(VacuumStmt *vacstmt)
VacuumCostActive = (VacuumCostDelay > 0); VacuumCostActive = (VacuumCostDelay > 0);
VacuumCostBalance = 0; VacuumCostBalance = 0;
/* Loop to process each selected relation. */ /*
* Loop to process each selected relation.
*/
foreach(cur, relations) foreach(cur, relations)
{ {
Oid relid = lfirst_oid(cur); Oid relid = lfirst_oid(cur);
...@@ -394,11 +393,11 @@ vacuum(VacuumStmt *vacstmt) ...@@ -394,11 +393,11 @@ vacuum(VacuumStmt *vacstmt)
MemoryContext old_context = NULL; MemoryContext old_context = NULL;
/* /*
* If using separate xacts, start one for analyze. Otherwise, * If using separate xacts, start one for analyze.
* we can use the outer transaction, but we still need to call * Otherwise, we can use the outer transaction, but we
* analyze_rel in a memory context that will be cleaned up on * still need to call analyze_rel in a memory context that
* return (else we leak memory while processing multiple * will be cleaned up on return (else we leak memory while
* tables). * processing multiple tables).
*/ */
if (use_own_xacts) if (use_own_xacts)
{ {
...@@ -410,8 +409,8 @@ vacuum(VacuumStmt *vacstmt) ...@@ -410,8 +409,8 @@ vacuum(VacuumStmt *vacstmt)
old_context = MemoryContextSwitchTo(anl_context); old_context = MemoryContextSwitchTo(anl_context);
/* /*
* Tell the buffer replacement strategy that vacuum is causing * Tell the buffer replacement strategy that vacuum is
* the IO * causing the IO
*/ */
StrategyHintVacuum(true); StrategyHintVacuum(true);
...@@ -440,7 +439,9 @@ vacuum(VacuumStmt *vacstmt) ...@@ -440,7 +439,9 @@ vacuum(VacuumStmt *vacstmt)
/* Turn off vacuum cost accounting */ /* Turn off vacuum cost accounting */
VacuumCostActive = false; VacuumCostActive = false;
/* Finish up processing. */ /*
* Finish up processing.
*/
if (use_own_xacts) if (use_own_xacts)
{ {
/* here, we are not in a transaction */ /* here, we are not in a transaction */
...@@ -455,16 +456,16 @@ vacuum(VacuumStmt *vacstmt) ...@@ -455,16 +456,16 @@ vacuum(VacuumStmt *vacstmt)
if (vacstmt->vacuum) if (vacstmt->vacuum)
{ {
/* /*
* If it was a database-wide VACUUM, print FSM usage statistics (we * If it was a database-wide VACUUM, print FSM usage statistics
* don't make you be superuser to see these). * (we don't make you be superuser to see these).
*/ */
if (vacstmt->relation == NULL) if (vacstmt->relation == NULL)
PrintFreeSpaceMapStatistics(elevel); PrintFreeSpaceMapStatistics(elevel);
/* /*
* If we completed a database-wide VACUUM without skipping any * If we completed a database-wide VACUUM without skipping any
* relations, update the database's pg_database row with info about * relations, update the database's pg_database row with info
* the transaction IDs used, and try to truncate pg_clog. * about the transaction IDs used, and try to truncate pg_clog.
*/ */
if (all_rels) if (all_rels)
{ {
...@@ -476,8 +477,8 @@ vacuum(VacuumStmt *vacstmt) ...@@ -476,8 +477,8 @@ vacuum(VacuumStmt *vacstmt)
/* /*
* Clean up working storage --- note we must do this after * Clean up working storage --- note we must do this after
* StartTransactionCommand, else we might be trying to delete the active * StartTransactionCommand, else we might be trying to delete the
* context! * active context!
*/ */
MemoryContextDelete(vac_context); MemoryContextDelete(vac_context);
vac_context = NULL; vac_context = NULL;
...@@ -570,11 +571,15 @@ vacuum_set_xid_limits(VacuumStmt *vacstmt, bool sharedRel, ...@@ -570,11 +571,15 @@ vacuum_set_xid_limits(VacuumStmt *vacstmt, bool sharedRel,
limit = GetCurrentTransactionId() - (MaxTransactionId >> 2); limit = GetCurrentTransactionId() - (MaxTransactionId >> 2);
} }
/* Be careful not to generate a "permanent" XID */ /*
* Be careful not to generate a "permanent" XID
*/
if (!TransactionIdIsNormal(limit)) if (!TransactionIdIsNormal(limit))
limit = FirstNormalTransactionId; limit = FirstNormalTransactionId;
/* Ensure sane relationship of limits */ /*
* Ensure sane relationship of limits
*/
if (TransactionIdFollows(limit, *oldestXmin)) if (TransactionIdFollows(limit, *oldestXmin))
{ {
ereport(WARNING, ereport(WARNING,
...@@ -616,7 +621,9 @@ vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples, ...@@ -616,7 +621,9 @@ vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples,
Form_pg_class pgcform; Form_pg_class pgcform;
Buffer buffer; Buffer buffer;
/* update number of tuples and number of pages in pg_class */ /*
* update number of tuples and number of pages in pg_class
*/
rd = heap_openr(RelationRelationName, RowExclusiveLock); rd = heap_openr(RelationRelationName, RowExclusiveLock);
ctup = SearchSysCache(RELOID, ctup = SearchSysCache(RELOID,
...@@ -652,10 +659,10 @@ vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples, ...@@ -652,10 +659,10 @@ vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples,
LockBuffer(buffer, BUFFER_LOCK_UNLOCK); LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
/* /*
* Invalidate the tuple in the catcaches; this also arranges to flush the * Invalidate the tuple in the catcaches; this also arranges to flush
* relation's relcache entry. (If we fail to commit for some reason, no * the relation's relcache entry. (If we fail to commit for some
* flush will occur, but no great harm is done since there are no * reason, no flush will occur, but no great harm is done since there
* noncritical state updates here.) * are no noncritical state updates here.)
*/ */
CacheInvalidateHeapTuple(rd, &rtup); CacheInvalidateHeapTuple(rd, &rtup);
...@@ -788,8 +795,8 @@ vac_truncate_clog(TransactionId vacuumXID, TransactionId frozenXID) ...@@ -788,8 +795,8 @@ vac_truncate_clog(TransactionId vacuumXID, TransactionId frozenXID)
heap_close(relation, AccessShareLock); heap_close(relation, AccessShareLock);
/* /*
* Do not truncate CLOG if we seem to have suffered wraparound already; * Do not truncate CLOG if we seem to have suffered wraparound
* the computed minimum XID might be bogus. * already; the computed minimum XID might be bogus.
*/ */
if (vacuumAlreadyWrapped) if (vacuumAlreadyWrapped)
{ {
...@@ -874,8 +881,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind) ...@@ -874,8 +881,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
CHECK_FOR_INTERRUPTS(); CHECK_FOR_INTERRUPTS();
/* /*
* Race condition -- if the pg_class tuple has gone away since the last * Race condition -- if the pg_class tuple has gone away since the
* time we saw it, we don't need to vacuum it. * last time we saw it, we don't need to vacuum it.
*/ */
if (!SearchSysCacheExists(RELOID, if (!SearchSysCacheExists(RELOID,
ObjectIdGetDatum(relid), ObjectIdGetDatum(relid),
...@@ -887,21 +894,24 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind) ...@@ -887,21 +894,24 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
} }
/* /*
* Determine the type of lock we want --- hard exclusive lock for a FULL * Determine the type of lock we want --- hard exclusive lock for a
* vacuum, but just ShareUpdateExclusiveLock for concurrent vacuum. Either * FULL vacuum, but just ShareUpdateExclusiveLock for concurrent
* way, we can be sure that no other backend is vacuuming the same table. * vacuum. Either way, we can be sure that no other backend is
* vacuuming the same table.
*/ */
lmode = vacstmt->full ? AccessExclusiveLock : ShareUpdateExclusiveLock; lmode = vacstmt->full ? AccessExclusiveLock : ShareUpdateExclusiveLock;
/* /*
* Open the class, get an appropriate lock on it, and check permissions. * Open the class, get an appropriate lock on it, and check
* permissions.
* *
* We allow the user to vacuum a table if he is superuser, the table owner, * We allow the user to vacuum a table if he is superuser, the table
* or the database owner (but in the latter case, only if it's not a * owner, or the database owner (but in the latter case, only if it's
* shared relation). pg_class_ownercheck includes the superuser case. * not a shared relation). pg_class_ownercheck includes the superuser
* case.
* *
* Note we choose to treat permissions failure as a WARNING and keep trying * Note we choose to treat permissions failure as a WARNING and keep
* to vacuum the rest of the DB --- is this appropriate? * trying to vacuum the rest of the DB --- is this appropriate?
*/ */
onerel = relation_open(relid, lmode); onerel = relation_open(relid, lmode);
...@@ -918,8 +928,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind) ...@@ -918,8 +928,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
} }
/* /*
* Check that it's a plain table; we used to do this in get_rel_oids() but * Check that it's a plain table; we used to do this in get_rel_oids()
* seems safer to check after we've locked the relation. * but seems safer to check after we've locked the relation.
*/ */
if (onerel->rd_rel->relkind != expected_relkind) if (onerel->rd_rel->relkind != expected_relkind)
{ {
...@@ -944,14 +954,15 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind) ...@@ -944,14 +954,15 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
relation_close(onerel, lmode); relation_close(onerel, lmode);
StrategyHintVacuum(false); StrategyHintVacuum(false);
CommitTransactionCommand(); CommitTransactionCommand();
return true; /* assume no long-lived data in temp tables */ return true; /* assume no long-lived data in temp
* tables */
} }
/* /*
* Get a session-level lock too. This will protect our access to the * Get a session-level lock too. This will protect our access to the
* relation across multiple transactions, so that we can vacuum the * relation across multiple transactions, so that we can vacuum the
* relation's TOAST table (if any) secure in the knowledge that no one is * relation's TOAST table (if any) secure in the knowledge that no one
* deleting the parent relation. * is deleting the parent relation.
* *
* NOTE: this cannot block, even if someone else is waiting for access, * NOTE: this cannot block, even if someone else is waiting for access,
* because the lock manager knows that both lock requests are from the * because the lock manager knows that both lock requests are from the
...@@ -960,10 +971,14 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind) ...@@ -960,10 +971,14 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
onerelid = onerel->rd_lockInfo.lockRelId; onerelid = onerel->rd_lockInfo.lockRelId;
LockRelationForSession(&onerelid, lmode); LockRelationForSession(&onerelid, lmode);
/* Remember the relation's TOAST relation for later */ /*
* Remember the relation's TOAST relation for later
*/
toast_relid = onerel->rd_rel->reltoastrelid; toast_relid = onerel->rd_rel->reltoastrelid;
/* Do the actual work --- either FULL or "lazy" vacuum */ /*
* Do the actual work --- either FULL or "lazy" vacuum
*/
if (vacstmt->full) if (vacstmt->full)
full_vacuum_rel(onerel, vacstmt); full_vacuum_rel(onerel, vacstmt);
else else
...@@ -974,16 +989,18 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind) ...@@ -974,16 +989,18 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
/* all done with this class, but hold lock until commit */ /* all done with this class, but hold lock until commit */
relation_close(onerel, NoLock); relation_close(onerel, NoLock);
/* Complete the transaction and free all temporary memory used. */ /*
* Complete the transaction and free all temporary memory used.
*/
StrategyHintVacuum(false); StrategyHintVacuum(false);
CommitTransactionCommand(); CommitTransactionCommand();
/* /*
* If the relation has a secondary toast rel, vacuum that too while we * If the relation has a secondary toast rel, vacuum that too while we
* still hold the session lock on the master table. Note however that * still hold the session lock on the master table. Note however that
* "analyze" will not get done on the toast table. This is good, because * "analyze" will not get done on the toast table. This is good,
* the toaster always uses hardcoded index access and statistics are * because the toaster always uses hardcoded index access and
* totally unimportant for toast relations. * statistics are totally unimportant for toast relations.
*/ */
if (toast_relid != InvalidOid) if (toast_relid != InvalidOid)
{ {
...@@ -991,7 +1008,9 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind) ...@@ -991,7 +1008,9 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
result = false; /* failed to vacuum the TOAST table? */ result = false; /* failed to vacuum the TOAST table? */
} }
/* Now release the session-level lock on the master table. */ /*
* Now release the session-level lock on the master table.
*/
UnlockRelationForSession(&onerelid, lmode); UnlockRelationForSession(&onerelid, lmode);
return result; return result;
...@@ -1020,8 +1039,8 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt) ...@@ -1020,8 +1039,8 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
{ {
VacPageListData vacuum_pages; /* List of pages to vacuum and/or VacPageListData vacuum_pages; /* List of pages to vacuum and/or
* clean indexes */ * clean indexes */
VacPageListData fraged_pages; /* List of pages with space enough for VacPageListData fraged_pages; /* List of pages with space enough
* re-using */ * for re-using */
Relation *Irel; Relation *Irel;
int nindexes, int nindexes,
i; i;
...@@ -1030,7 +1049,9 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt) ...@@ -1030,7 +1049,9 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
vacuum_set_xid_limits(vacstmt, onerel->rd_rel->relisshared, vacuum_set_xid_limits(vacstmt, onerel->rd_rel->relisshared,
&OldestXmin, &FreezeLimit); &OldestXmin, &FreezeLimit);
/* Set up statistics-gathering machinery. */ /*
* Set up statistics-gathering machinery.
*/
vacrelstats = (VRelStats *) palloc(sizeof(VRelStats)); vacrelstats = (VRelStats *) palloc(sizeof(VRelStats));
vacrelstats->rel_pages = 0; vacrelstats->rel_pages = 0;
vacrelstats->rel_tuples = 0; vacrelstats->rel_tuples = 0;
...@@ -1178,8 +1199,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, ...@@ -1178,8 +1199,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
VacPage vacpagecopy; VacPage vacpagecopy;
ereport(WARNING, ereport(WARNING,
(errmsg("relation \"%s\" page %u is uninitialized --- fixing", (errmsg("relation \"%s\" page %u is uninitialized --- fixing",
relname, blkno))); relname, blkno)));
PageInit(page, BufferGetPageSize(buf), 0); PageInit(page, BufferGetPageSize(buf), 0);
vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower; vacpage->free = ((PageHeader) page)->pd_upper - ((PageHeader) page)->pd_lower;
free_space += vacpage->free; free_space += vacpage->free;
...@@ -1244,8 +1265,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, ...@@ -1244,8 +1265,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
case HEAPTUPLE_LIVE: case HEAPTUPLE_LIVE:
/* /*
* Tuple is good. Consider whether to replace its xmin * Tuple is good. Consider whether to replace its
* value with FrozenTransactionId. * xmin value with FrozenTransactionId.
*/ */
if (TransactionIdIsNormal(HeapTupleHeaderGetXmin(tuple.t_data)) && if (TransactionIdIsNormal(HeapTupleHeaderGetXmin(tuple.t_data)) &&
TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data), TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
...@@ -1257,7 +1278,9 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, ...@@ -1257,7 +1278,9 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
pgchanged = true; pgchanged = true;
} }
/* Other checks... */ /*
* Other checks...
*/
if (onerel->rd_rel->relhasoids && if (onerel->rd_rel->relhasoids &&
!OidIsValid(HeapTupleGetOid(&tuple))) !OidIsValid(HeapTupleGetOid(&tuple)))
elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid", elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid",
...@@ -1266,14 +1289,15 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, ...@@ -1266,14 +1289,15 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
case HEAPTUPLE_RECENTLY_DEAD: case HEAPTUPLE_RECENTLY_DEAD:
/* /*
* If tuple is recently deleted then we must not remove it * If tuple is recently deleted then we must not
* from relation. * remove it from relation.
*/ */
nkeep += 1; nkeep += 1;
/* /*
* If we do shrinking and this tuple is updated one then * If we do shrinking and this tuple is updated one
* remember it to construct updated tuple dependencies. * then remember it to construct updated tuple
* dependencies.
*/ */
if (do_shrinking && if (do_shrinking &&
!(ItemPointerEquals(&(tuple.t_self), !(ItemPointerEquals(&(tuple.t_self),
...@@ -1283,8 +1307,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, ...@@ -1283,8 +1307,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
{ {
free_vtlinks = 1000; free_vtlinks = 1000;
vtlinks = (VTupleLink) repalloc(vtlinks, vtlinks = (VTupleLink) repalloc(vtlinks,
(free_vtlinks + num_vtlinks) * (free_vtlinks + num_vtlinks) *
sizeof(VTupleLinkData)); sizeof(VTupleLinkData));
} }
vtlinks[num_vtlinks].new_tid = tuple.t_data->t_ctid; vtlinks[num_vtlinks].new_tid = tuple.t_data->t_ctid;
vtlinks[num_vtlinks].this_tid = tuple.t_self; vtlinks[num_vtlinks].this_tid = tuple.t_self;
...@@ -1295,10 +1319,10 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, ...@@ -1295,10 +1319,10 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
case HEAPTUPLE_INSERT_IN_PROGRESS: case HEAPTUPLE_INSERT_IN_PROGRESS:
/* /*
* This should not happen, since we hold exclusive lock on * This should not happen, since we hold exclusive
* the relation; shouldn't we raise an error? (Actually, * lock on the relation; shouldn't we raise an error?
* it can happen in system catalogs, since we tend to * (Actually, it can happen in system catalogs, since
* release write lock before commit there.) * we tend to release write lock before commit there.)
*/ */
ereport(NOTICE, ereport(NOTICE,
(errmsg("relation \"%s\" TID %u/%u: InsertTransactionInProgress %u --- can't shrink relation", (errmsg("relation \"%s\" TID %u/%u: InsertTransactionInProgress %u --- can't shrink relation",
...@@ -1308,10 +1332,10 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, ...@@ -1308,10 +1332,10 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
case HEAPTUPLE_DELETE_IN_PROGRESS: case HEAPTUPLE_DELETE_IN_PROGRESS:
/* /*
* This should not happen, since we hold exclusive lock on * This should not happen, since we hold exclusive
* the relation; shouldn't we raise an error? (Actually, * lock on the relation; shouldn't we raise an error?
* it can happen in system catalogs, since we tend to * (Actually, it can happen in system catalogs, since
* release write lock before commit there.) * we tend to release write lock before commit there.)
*/ */
ereport(NOTICE, ereport(NOTICE,
(errmsg("relation \"%s\" TID %u/%u: DeleteTransactionInProgress %u --- can't shrink relation", (errmsg("relation \"%s\" TID %u/%u: DeleteTransactionInProgress %u --- can't shrink relation",
...@@ -1333,11 +1357,12 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, ...@@ -1333,11 +1357,12 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
ItemId lpp; ItemId lpp;
/* /*
* Here we are building a temporary copy of the page with dead * Here we are building a temporary copy of the page with
* tuples removed. Below we will apply PageRepairFragmentation * dead tuples removed. Below we will apply
* to the copy, so that we can determine how much space will * PageRepairFragmentation to the copy, so that we can
* be available after removal of dead tuples. But note we are * determine how much space will be available after
* NOT changing the real page yet... * removal of dead tuples. But note we are NOT changing
* the real page yet...
*/ */
if (tempPage == NULL) if (tempPage == NULL)
{ {
...@@ -1387,8 +1412,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, ...@@ -1387,8 +1412,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
/* /*
* Add the page to fraged_pages if it has a useful amount of free * Add the page to fraged_pages if it has a useful amount of free
* space. "Useful" means enough for a minimal-sized tuple. But we * space. "Useful" means enough for a minimal-sized tuple. But we
* don't know that accurately near the start of the relation, so add * don't know that accurately near the start of the relation, so
* pages unconditionally if they have >= BLCKSZ/10 free space. * add pages unconditionally if they have >= BLCKSZ/10 free space.
*/ */
do_frag = (vacpage->free >= min_tlen || vacpage->free >= BLCKSZ / 10); do_frag = (vacpage->free >= min_tlen || vacpage->free >= BLCKSZ / 10);
...@@ -1404,7 +1429,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, ...@@ -1404,7 +1429,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
/* /*
* Include the page in empty_end_pages if it will be empty after * Include the page in empty_end_pages if it will be empty after
* vacuuming; this is to keep us from using it as a move destination. * vacuuming; this is to keep us from using it as a move
* destination.
*/ */
if (notup) if (notup)
{ {
...@@ -1474,11 +1500,11 @@ scan_heap(VRelStats *vacrelstats, Relation onerel, ...@@ -1474,11 +1500,11 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
RelationGetRelationName(onerel), RelationGetRelationName(onerel),
tups_vacuumed, num_tuples, nblocks), tups_vacuumed, num_tuples, nblocks),
errdetail("%.0f dead row versions cannot be removed yet.\n" errdetail("%.0f dead row versions cannot be removed yet.\n"
"Nonremovable row versions range from %lu to %lu bytes long.\n" "Nonremovable row versions range from %lu to %lu bytes long.\n"
"There were %.0f unused item pointers.\n" "There were %.0f unused item pointers.\n"
"Total free space (including removable row versions) is %.0f bytes.\n" "Total free space (including removable row versions) is %.0f bytes.\n"
"%u pages are or will become empty, including %u at the end of the table.\n" "%u pages are or will become empty, including %u at the end of the table.\n"
"%u pages containing %.0f free bytes are potential move destinations.\n" "%u pages containing %.0f free bytes are potential move destinations.\n"
"%s", "%s",
nkeep, nkeep,
(unsigned long) min_tlen, (unsigned long) max_tlen, (unsigned long) min_tlen, (unsigned long) max_tlen,
...@@ -1551,14 +1577,14 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -1551,14 +1577,14 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
vacpage->offsets_used = vacpage->offsets_free = 0; vacpage->offsets_used = vacpage->offsets_free = 0;
/* /*
* Scan pages backwards from the last nonempty page, trying to move tuples * Scan pages backwards from the last nonempty page, trying to move
* down to lower pages. Quit when we reach a page that we have moved any * tuples down to lower pages. Quit when we reach a page that we have
* tuples onto, or the first page if we haven't moved anything, or when we * moved any tuples onto, or the first page if we haven't moved
* find a page we cannot completely empty (this last condition is handled * anything, or when we find a page we cannot completely empty (this
* by "break" statements within the loop). * last condition is handled by "break" statements within the loop).
* *
* NB: this code depends on the vacuum_pages and fraged_pages lists being in * NB: this code depends on the vacuum_pages and fraged_pages lists being
* order by blkno. * in order by blkno.
*/ */
nblocks = vacrelstats->rel_pages; nblocks = vacrelstats->rel_pages;
for (blkno = nblocks - vacuum_pages->empty_end_pages - 1; for (blkno = nblocks - vacuum_pages->empty_end_pages - 1;
...@@ -1576,23 +1602,26 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -1576,23 +1602,26 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
vacuum_delay_point(); vacuum_delay_point();
/* /*
* Forget fraged_pages pages at or after this one; they're no longer * Forget fraged_pages pages at or after this one; they're no
* useful as move targets, since we only want to move down. Note that * longer useful as move targets, since we only want to move down.
* since we stop the outer loop at last_move_dest_block, pages removed * Note that since we stop the outer loop at last_move_dest_block,
* here cannot have had anything moved onto them already. * pages removed here cannot have had anything moved onto them
* already.
* *
* Also note that we don't change the stored fraged_pages list, only our * Also note that we don't change the stored fraged_pages list, only
* local variable num_fraged_pages; so the forgotten pages are still * our local variable num_fraged_pages; so the forgotten pages are
* available to be loaded into the free space map later. * still available to be loaded into the free space map later.
*/ */
while (num_fraged_pages > 0 && while (num_fraged_pages > 0 &&
fraged_pages->pagedesc[num_fraged_pages - 1]->blkno >= blkno) fraged_pages->pagedesc[num_fraged_pages - 1]->blkno >= blkno)
{ {
Assert(fraged_pages->pagedesc[num_fraged_pages - 1]->offsets_used == 0); Assert(fraged_pages->pagedesc[num_fraged_pages - 1]->offsets_used == 0);
--num_fraged_pages; --num_fraged_pages;
} }
/* Process this page of relation. */ /*
* Process this page of relation.
*/
buf = ReadBuffer(onerel, blkno); buf = ReadBuffer(onerel, blkno);
page = BufferGetPage(buf); page = BufferGetPage(buf);
...@@ -1637,8 +1666,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -1637,8 +1666,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
else else
Assert(!isempty); Assert(!isempty);
chain_tuple_moved = false; /* no one chain-tuple was moved off chain_tuple_moved = false; /* no one chain-tuple was moved
* this page, yet */ * off this page, yet */
vacpage->blkno = blkno; vacpage->blkno = blkno;
maxoff = PageGetMaxOffsetNumber(page); maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber; for (offnum = FirstOffsetNumber;
...@@ -1658,36 +1687,38 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -1658,36 +1687,38 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
ItemPointerSet(&(tuple.t_self), blkno, offnum); ItemPointerSet(&(tuple.t_self), blkno, offnum);
/* /*
* VACUUM FULL has an exclusive lock on the relation. So normally * VACUUM FULL has an exclusive lock on the relation. So
* no other transaction can have pending INSERTs or DELETEs in * normally no other transaction can have pending INSERTs or
* this relation. A tuple is either (a) a tuple in a system * DELETEs in this relation. A tuple is either (a) a tuple in
* catalog, inserted or deleted by a not yet committed transaction * a system catalog, inserted or deleted by a not yet
* or (b) dead (XMIN_INVALID or XMAX_COMMITTED) or (c) inserted by * committed transaction or (b) dead (XMIN_INVALID or
* a committed xact (XMIN_COMMITTED) or (d) moved by the currently * XMAX_COMMITTED) or (c) inserted by a committed xact
* running VACUUM. In case (a) we wouldn't be in repair_frag() at * (XMIN_COMMITTED) or (d) moved by the currently running
* all. In case (b) we cannot be here, because scan_heap() has * VACUUM. In case (a) we wouldn't be in repair_frag() at all.
* already marked the item as unused, see continue above. Case (c) * In case (b) we cannot be here, because scan_heap() has
* is what normally is to be expected. Case (d) is only possible, * already marked the item as unused, see continue above. Case
* if a whole tuple chain has been moved while processing this or * (c) is what normally is to be expected. Case (d) is only
* a higher numbered block. * possible, if a whole tuple chain has been moved while
* processing this or a higher numbered block.
*/ */
if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED)) if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
{ {
/* /*
* There cannot be another concurrently running VACUUM. If the * There cannot be another concurrently running VACUUM. If
* tuple had been moved in by a previous VACUUM, the * the tuple had been moved in by a previous VACUUM, the
* visibility check would have set XMIN_COMMITTED. If the * visibility check would have set XMIN_COMMITTED. If the
* tuple had been moved in by the currently running VACUUM, * tuple had been moved in by the currently running
* the loop would have been terminated. We had elog(ERROR, * VACUUM, the loop would have been terminated. We had
* ...) here, but as we are testing for a can't-happen * elog(ERROR, ...) here, but as we are testing for a
* condition, Assert() seems more appropriate. * can't-happen condition, Assert() seems more
* appropriate.
*/ */
Assert(!(tuple.t_data->t_infomask & HEAP_MOVED_IN)); Assert(!(tuple.t_data->t_infomask & HEAP_MOVED_IN));
/* /*
* If this (chain) tuple is moved by me already then I have to * If this (chain) tuple is moved by me already then I
* check is it in vacpage or not - i.e. is it moved while * have to check is it in vacpage or not - i.e. is it
* cleaning this page or some previous one. * moved while cleaning this page or some previous one.
*/ */
Assert(tuple.t_data->t_infomask & HEAP_MOVED_OFF); Assert(tuple.t_data->t_infomask & HEAP_MOVED_OFF);
...@@ -1723,25 +1754,27 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -1723,25 +1754,27 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
} }
/* /*
* If this tuple is in the chain of tuples created in updates by * If this tuple is in the chain of tuples created in updates
* "recent" transactions then we have to move all chain of tuples * by "recent" transactions then we have to move all chain of
* to another places. * tuples to another places.
* *
* NOTE: this test is not 100% accurate: it is possible for a tuple * NOTE: this test is not 100% accurate: it is possible for a
* to be an updated one with recent xmin, and yet not have a * tuple to be an updated one with recent xmin, and yet not
* corresponding tuple in the vtlinks list. Presumably there was * have a corresponding tuple in the vtlinks list. Presumably
* once a parent tuple with xmax matching the xmin, but it's * there was once a parent tuple with xmax matching the xmin,
* possible that that tuple has been removed --- for example, if * but it's possible that that tuple has been removed --- for
* it had xmin = xmax then HeapTupleSatisfiesVacuum would deem it * example, if it had xmin = xmax then
* removable as soon as the xmin xact completes. * HeapTupleSatisfiesVacuum would deem it removable as soon as
* the xmin xact completes.
* *
* To be on the safe side, we abandon the repair_frag process if we * To be on the safe side, we abandon the repair_frag process if
* cannot find the parent tuple in vtlinks. This may be overly * we cannot find the parent tuple in vtlinks. This may be
* conservative; AFAICS it would be safe to move the chain. * overly conservative; AFAICS it would be safe to move the
* chain.
*/ */
if (((tuple.t_data->t_infomask & HEAP_UPDATED) && if (((tuple.t_data->t_infomask & HEAP_UPDATED) &&
!TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data), !TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
OldestXmin)) || OldestXmin)) ||
(!(tuple.t_data->t_infomask & (HEAP_XMAX_INVALID | (!(tuple.t_data->t_infomask & (HEAP_XMAX_INVALID |
HEAP_MARKED_FOR_UPDATE)) && HEAP_MARKED_FOR_UPDATE)) &&
!(ItemPointerEquals(&(tuple.t_self), !(ItemPointerEquals(&(tuple.t_self),
...@@ -1778,11 +1811,11 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -1778,11 +1811,11 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
free_vtmove = 100; free_vtmove = 100;
/* /*
* If this tuple is in the begin/middle of the chain then we * If this tuple is in the begin/middle of the chain then
* have to move to the end of chain. * we have to move to the end of chain.
*/ */
while (!(tp.t_data->t_infomask & (HEAP_XMAX_INVALID | while (!(tp.t_data->t_infomask & (HEAP_XMAX_INVALID |
HEAP_MARKED_FOR_UPDATE)) && HEAP_MARKED_FOR_UPDATE)) &&
!(ItemPointerEquals(&(tp.t_self), !(ItemPointerEquals(&(tp.t_self),
&(tp.t_data->t_ctid)))) &(tp.t_data->t_ctid))))
{ {
...@@ -1798,17 +1831,17 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -1798,17 +1831,17 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
ItemPointerGetBlockNumber(&Ctid)); ItemPointerGetBlockNumber(&Ctid));
Cpage = BufferGetPage(Cbuf); Cpage = BufferGetPage(Cbuf);
Citemid = PageGetItemId(Cpage, Citemid = PageGetItemId(Cpage,
ItemPointerGetOffsetNumber(&Ctid)); ItemPointerGetOffsetNumber(&Ctid));
if (!ItemIdIsUsed(Citemid)) if (!ItemIdIsUsed(Citemid))
{ {
/* /*
* This means that in the middle of chain there was * This means that in the middle of chain there
* tuple updated by older (than OldestXmin) xaction * was tuple updated by older (than OldestXmin)
* and this tuple is already deleted by me. Actually, * xaction and this tuple is already deleted by
* upper part of chain should be removed and seems * me. Actually, upper part of chain should be
* that this should be handled in scan_heap(), but * removed and seems that this should be handled
* it's not implemented at the moment and so we just * in scan_heap(), but it's not implemented at the
* stop shrinking here. * moment and so we just stop shrinking here.
*/ */
elog(DEBUG2, "child itemid in update-chain marked as unused --- can't continue repair_frag"); elog(DEBUG2, "child itemid in update-chain marked as unused --- can't continue repair_frag");
chain_move_failed = true; chain_move_failed = true;
...@@ -1827,7 +1860,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -1827,7 +1860,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
break; /* out of walk-along-page loop */ break; /* out of walk-along-page loop */
} }
/* Check if all items in chain can be moved */ /*
* Check if all items in chain can be moved
*/
for (;;) for (;;)
{ {
Buffer Pbuf; Buffer Pbuf;
...@@ -1878,8 +1913,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -1878,8 +1913,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/* At beginning of chain? */ /* At beginning of chain? */
if (!(tp.t_data->t_infomask & HEAP_UPDATED) || if (!(tp.t_data->t_infomask & HEAP_UPDATED) ||
TransactionIdPrecedes(HeapTupleHeaderGetXmin(tp.t_data), TransactionIdPrecedes(HeapTupleHeaderGetXmin(tp.t_data),
OldestXmin)) OldestXmin))
break; break;
/* No, move to tuple with prior row version */ /* No, move to tuple with prior row version */
...@@ -1899,10 +1934,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -1899,10 +1934,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
} }
tp.t_self = vtlp->this_tid; tp.t_self = vtlp->this_tid;
Pbuf = ReadBuffer(onerel, Pbuf = ReadBuffer(onerel,
ItemPointerGetBlockNumber(&(tp.t_self))); ItemPointerGetBlockNumber(&(tp.t_self)));
Ppage = BufferGetPage(Pbuf); Ppage = BufferGetPage(Pbuf);
Pitemid = PageGetItemId(Ppage, Pitemid = PageGetItemId(Ppage,
ItemPointerGetOffsetNumber(&(tp.t_self))); ItemPointerGetOffsetNumber(&(tp.t_self)));
/* this can't happen since we saw tuple earlier: */ /* this can't happen since we saw tuple earlier: */
if (!ItemIdIsUsed(Pitemid)) if (!ItemIdIsUsed(Pitemid))
elog(ERROR, "parent itemid marked as unused"); elog(ERROR, "parent itemid marked as unused");
...@@ -1915,17 +1950,18 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -1915,17 +1950,18 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/* /*
* Read above about cases when !ItemIdIsUsed(Citemid) * Read above about cases when !ItemIdIsUsed(Citemid)
* (child item is removed)... Due to the fact that at the * (child item is removed)... Due to the fact that at
* moment we don't remove unuseful part of update-chain, * the moment we don't remove unuseful part of
* it's possible to get too old parent row here. Like as * update-chain, it's possible to get too old parent
* in the case which caused this problem, we stop * row here. Like as in the case which caused this
* shrinking here. I could try to find real parent row but * problem, we stop shrinking here. I could try to
* want not to do it because of real solution will be * find real parent row but want not to do it because
* implemented anyway, later, and we are too close to 6.5 * of real solution will be implemented anyway, later,
* release. - vadim 06/11/99 * and we are too close to 6.5 release. - vadim
* 06/11/99
*/ */
if (!(TransactionIdEquals(HeapTupleHeaderGetXmax(Ptp.t_data), if (!(TransactionIdEquals(HeapTupleHeaderGetXmax(Ptp.t_data),
HeapTupleHeaderGetXmin(tp.t_data)))) HeapTupleHeaderGetXmin(tp.t_data))))
{ {
ReleaseBuffer(Pbuf); ReleaseBuffer(Pbuf);
elog(DEBUG2, "too old parent tuple found --- can't continue repair_frag"); elog(DEBUG2, "too old parent tuple found --- can't continue repair_frag");
...@@ -1948,9 +1984,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -1948,9 +1984,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (chain_move_failed) if (chain_move_failed)
{ {
/* /*
* Undo changes to offsets_used state. We don't bother * Undo changes to offsets_used state. We don't
* cleaning up the amount-free state, since we're not * bother cleaning up the amount-free state, since
* going to do any further tuple motion. * we're not going to do any further tuple motion.
*/ */
for (i = 0; i < num_vtmove; i++) for (i = 0; i < num_vtmove; i++)
{ {
...@@ -1961,7 +1997,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -1961,7 +1997,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
break; /* out of walk-along-page loop */ break; /* out of walk-along-page loop */
} }
/* Okay, move the whole tuple chain */ /*
* Okay, move the whole tuple chain
*/
ItemPointerSetInvalid(&Ctid); ItemPointerSetInvalid(&Ctid);
for (ti = 0; ti < num_vtmove; ti++) for (ti = 0; ti < num_vtmove; ti++)
{ {
...@@ -1972,7 +2010,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -1972,7 +2010,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/* Get page to move from */ /* Get page to move from */
tuple.t_self = vtmove[ti].tid; tuple.t_self = vtmove[ti].tid;
Cbuf = ReadBuffer(onerel, Cbuf = ReadBuffer(onerel,
ItemPointerGetBlockNumber(&(tuple.t_self))); ItemPointerGetBlockNumber(&(tuple.t_self)));
/* Get page to move to */ /* Get page to move to */
dst_buffer = ReadBuffer(onerel, destvacpage->blkno); dst_buffer = ReadBuffer(onerel, destvacpage->blkno);
...@@ -1985,7 +2023,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -1985,7 +2023,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
Cpage = BufferGetPage(Cbuf); Cpage = BufferGetPage(Cbuf);
Citemid = PageGetItemId(Cpage, Citemid = PageGetItemId(Cpage,
ItemPointerGetOffsetNumber(&(tuple.t_self))); ItemPointerGetOffsetNumber(&(tuple.t_self)));
tuple.t_datamcxt = NULL; tuple.t_datamcxt = NULL;
tuple.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid); tuple.t_data = (HeapTupleHeader) PageGetItem(Cpage, Citemid);
tuple_len = tuple.t_len = ItemIdGetLength(Citemid); tuple_len = tuple.t_len = ItemIdGetLength(Citemid);
...@@ -2069,16 +2107,19 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -2069,16 +2107,19 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
} /* walk along page */ } /* walk along page */
/* /*
* If we broke out of the walk-along-page loop early (ie, still have * If we broke out of the walk-along-page loop early (ie, still
* offnum <= maxoff), then we failed to move some tuple off this page. * have offnum <= maxoff), then we failed to move some tuple off
* No point in shrinking any more, so clean up and exit the per-page * this page. No point in shrinking any more, so clean up and
* loop. * exit the per-page loop.
*/ */
if (offnum < maxoff && keep_tuples > 0) if (offnum < maxoff && keep_tuples > 0)
{ {
OffsetNumber off; OffsetNumber off;
/* Fix vacpage state for any unvisited tuples remaining on page */ /*
* Fix vacpage state for any unvisited tuples remaining on
* page
*/
for (off = OffsetNumberNext(offnum); for (off = OffsetNumberNext(offnum);
off <= maxoff; off <= maxoff;
off = OffsetNumberNext(off)) off = OffsetNumberNext(off))
...@@ -2093,8 +2134,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -2093,8 +2134,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
continue; continue;
/* /*
* * See comments in the walk-along-page loop above, why we * * * See comments in the walk-along-page loop above, why
* have Asserts here instead of if (...) elog(ERROR). * we * have Asserts here instead of if (...) elog(ERROR).
*/ */
Assert(!(htup->t_infomask & HEAP_MOVED_IN)); Assert(!(htup->t_infomask & HEAP_MOVED_IN));
Assert(htup->t_infomask & HEAP_MOVED_OFF); Assert(htup->t_infomask & HEAP_MOVED_OFF);
...@@ -2158,20 +2199,20 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -2158,20 +2199,20 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* We have to commit our tuple movings before we truncate the * We have to commit our tuple movings before we truncate the
* relation. Ideally we should do Commit/StartTransactionCommand * relation. Ideally we should do Commit/StartTransactionCommand
* here, relying on the session-level table lock to protect our * here, relying on the session-level table lock to protect our
* exclusive access to the relation. However, that would require a * exclusive access to the relation. However, that would require
* lot of extra code to close and re-open the relation, indexes, etc. * a lot of extra code to close and re-open the relation, indexes,
* For now, a quick hack: record status of current transaction as * etc. For now, a quick hack: record status of current
* committed, and continue. * transaction as committed, and continue.
*/ */
RecordTransactionCommit(); RecordTransactionCommit();
} }
/* /*
* We are not going to move any more tuples across pages, but we still * We are not going to move any more tuples across pages, but we still
* need to apply vacuum_page to compact free space in the remaining pages * need to apply vacuum_page to compact free space in the remaining
* in vacuum_pages list. Note that some of these pages may also be in the * pages in vacuum_pages list. Note that some of these pages may also
* fraged_pages list, and may have had tuples moved onto them; if so, we * be in the fraged_pages list, and may have had tuples moved onto
* already did vacuum_page and needn't do it again. * them; if so, we already did vacuum_page and needn't do it again.
*/ */
for (i = 0, curpage = vacuum_pages->pagedesc; for (i = 0, curpage = vacuum_pages->pagedesc;
i < vacuumed_pages; i < vacuumed_pages;
...@@ -2205,19 +2246,21 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -2205,19 +2246,21 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
last_move_dest_block, num_moved); last_move_dest_block, num_moved);
/* /*
* It'd be cleaner to make this report at the bottom of this routine, but * It'd be cleaner to make this report at the bottom of this routine,
* then the rusage would double-count the second pass of index vacuuming. * but then the rusage would double-count the second pass of index
* So do it here and ignore the relatively small amount of processing that * vacuuming. So do it here and ignore the relatively small amount of
* occurs below. * processing that occurs below.
*/ */
ereport(elevel, ereport(elevel,
(errmsg("\"%s\": moved %u row versions, truncated %u to %u pages", (errmsg("\"%s\": moved %u row versions, truncated %u to %u pages",
RelationGetRelationName(onerel), RelationGetRelationName(onerel),
num_moved, nblocks, blkno), num_moved, nblocks, blkno),
errdetail("%s", errdetail("%s",
vac_show_rusage(&ru0)))); vac_show_rusage(&ru0))));
/* Reflect the motion of system tuples to catalog cache here. */ /*
* Reflect the motion of system tuples to catalog cache here.
*/
CommandCounterIncrement(); CommandCounterIncrement();
if (Nvacpagelist.num_pages > 0) if (Nvacpagelist.num_pages > 0)
...@@ -2231,7 +2274,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -2231,7 +2274,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/* re-sort Nvacpagelist.pagedesc */ /* re-sort Nvacpagelist.pagedesc */
for (vpleft = Nvacpagelist.pagedesc, for (vpleft = Nvacpagelist.pagedesc,
vpright = Nvacpagelist.pagedesc + Nvacpagelist.num_pages - 1; vpright = Nvacpagelist.pagedesc + Nvacpagelist.num_pages - 1;
vpleft < vpright; vpleft++, vpright--) vpleft < vpright; vpleft++, vpright--)
{ {
vpsave = *vpleft; vpsave = *vpleft;
...@@ -2240,10 +2283,11 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -2240,10 +2283,11 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
} }
/* /*
* keep_tuples is the number of tuples that have been moved off a * keep_tuples is the number of tuples that have been moved
* page during chain moves but not been scanned over subsequently. * off a page during chain moves but not been scanned over
* The tuple ids of these tuples are not recorded as free offsets * subsequently. The tuple ids of these tuples are not
* for any VacPage, so they will not be cleared from the indexes. * recorded as free offsets for any VacPage, so they will not
* be cleared from the indexes.
*/ */
Assert(keep_tuples >= 0); Assert(keep_tuples >= 0);
for (i = 0; i < nindexes; i++) for (i = 0; i < nindexes; i++)
...@@ -2281,8 +2325,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -2281,8 +2325,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
continue; continue;
/* /*
* * See comments in the walk-along-page loop above, why we * * * See comments in the walk-along-page loop above, why
* have Asserts here instead of if (...) elog(ERROR). * we * have Asserts here instead of if (...) elog(ERROR).
*/ */
Assert(!(htup->t_infomask & HEAP_MOVED_IN)); Assert(!(htup->t_infomask & HEAP_MOVED_IN));
Assert(htup->t_infomask & HEAP_MOVED_OFF); Assert(htup->t_infomask & HEAP_MOVED_OFF);
...@@ -2310,8 +2354,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -2310,8 +2354,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
else else
{ {
/* /*
* No XLOG record, but still need to flag that XID exists on * No XLOG record, but still need to flag that XID exists
* disk * on disk
*/ */
MyXactMadeTempRelUpdate = true; MyXactMadeTempRelUpdate = true;
} }
...@@ -2330,10 +2374,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel, ...@@ -2330,10 +2374,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
} }
/* /*
* Flush dirty pages out to disk. We do this unconditionally, even if we * Flush dirty pages out to disk. We do this unconditionally, even if
* don't need to truncate, because we want to ensure that all tuples have * we don't need to truncate, because we want to ensure that all
* correct on-row commit status on disk (see bufmgr.c's comments for * tuples have correct on-row commit status on disk (see bufmgr.c's
* FlushRelationBuffers()). * comments for FlushRelationBuffers()).
*/ */
FlushRelationBuffers(onerel, blkno); FlushRelationBuffers(onerel, blkno);
...@@ -2379,7 +2423,9 @@ move_chain_tuple(Relation rel, ...@@ -2379,7 +2423,9 @@ move_chain_tuple(Relation rel,
heap_copytuple_with_tuple(old_tup, &newtup); heap_copytuple_with_tuple(old_tup, &newtup);
/* register invalidation of source tuple in catcaches. */ /*
* register invalidation of source tuple in catcaches.
*/
CacheInvalidateHeapTuple(rel, old_tup); CacheInvalidateHeapTuple(rel, old_tup);
/* NO EREPORT(ERROR) TILL CHANGES ARE LOGGED */ /* NO EREPORT(ERROR) TILL CHANGES ARE LOGGED */
...@@ -2394,20 +2440,20 @@ move_chain_tuple(Relation rel, ...@@ -2394,20 +2440,20 @@ move_chain_tuple(Relation rel,
/* /*
* If this page was not used before - clean it. * If this page was not used before - clean it.
* *
* NOTE: a nasty bug used to lurk here. It is possible for the source and * NOTE: a nasty bug used to lurk here. It is possible for the source
* destination pages to be the same (since this tuple-chain member can be * and destination pages to be the same (since this tuple-chain member
* on a page lower than the one we're currently processing in the outer * can be on a page lower than the one we're currently processing in
* loop). If that's true, then after vacuum_page() the source tuple will * the outer loop). If that's true, then after vacuum_page() the
* have been moved, and tuple.t_data will be pointing at garbage. * source tuple will have been moved, and tuple.t_data will be
* Therefore we must do everything that uses old_tup->t_data BEFORE this * pointing at garbage. Therefore we must do everything that uses
* step!! * old_tup->t_data BEFORE this step!!
* *
* This path is different from the other callers of vacuum_page, because we * This path is different from the other callers of vacuum_page, because
* have already incremented the vacpage's offsets_used field to account * we have already incremented the vacpage's offsets_used field to
* for the tuple(s) we expect to move onto the page. Therefore * account for the tuple(s) we expect to move onto the page. Therefore
* vacuum_page's check for offsets_used == 0 is wrong. But since that's a * vacuum_page's check for offsets_used == 0 is wrong. But since
* good debugging check for all other callers, we work around it here * that's a good debugging check for all other callers, we work around
* rather than remove it. * it here rather than remove it.
*/ */
if (!PageIsEmpty(dst_page) && cleanVpd) if (!PageIsEmpty(dst_page) && cleanVpd)
{ {
...@@ -2419,8 +2465,8 @@ move_chain_tuple(Relation rel, ...@@ -2419,8 +2465,8 @@ move_chain_tuple(Relation rel,
} }
/* /*
* Update the state of the copied tuple, and store it on the destination * Update the state of the copied tuple, and store it on the
* page. * destination page.
*/ */
newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED | newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
HEAP_XMIN_INVALID | HEAP_XMIN_INVALID |
...@@ -2456,15 +2502,17 @@ move_chain_tuple(Relation rel, ...@@ -2456,15 +2502,17 @@ move_chain_tuple(Relation rel,
} }
else else
{ {
/* No XLOG record, but still need to flag that XID exists on disk */ /*
* No XLOG record, but still need to flag that XID exists on disk
*/
MyXactMadeTempRelUpdate = true; MyXactMadeTempRelUpdate = true;
} }
END_CRIT_SECTION(); END_CRIT_SECTION();
/* /*
* Set new tuple's t_ctid pointing to itself for last tuple in chain, and * Set new tuple's t_ctid pointing to itself for last tuple in chain,
* to next tuple in chain otherwise. * and to next tuple in chain otherwise.
*/ */
/* Is this ok after log_heap_move() and END_CRIT_SECTION()? */ /* Is this ok after log_heap_move() and END_CRIT_SECTION()? */
if (!ItemPointerIsValid(ctid)) if (!ItemPointerIsValid(ctid))
...@@ -2515,15 +2563,17 @@ move_plain_tuple(Relation rel, ...@@ -2515,15 +2563,17 @@ move_plain_tuple(Relation rel,
* register invalidation of source tuple in catcaches. * register invalidation of source tuple in catcaches.
* *
* (Note: we do not need to register the copied tuple, because we are not * (Note: we do not need to register the copied tuple, because we are not
* changing the tuple contents and so there cannot be any need to flush * changing the tuple contents and so there cannot be any need to
* negative catcache entries.) * flush negative catcache entries.)
*/ */
CacheInvalidateHeapTuple(rel, old_tup); CacheInvalidateHeapTuple(rel, old_tup);
/* NO EREPORT(ERROR) TILL CHANGES ARE LOGGED */ /* NO EREPORT(ERROR) TILL CHANGES ARE LOGGED */
START_CRIT_SECTION(); START_CRIT_SECTION();
/* Mark new tuple as MOVED_IN by me. */ /*
* Mark new tuple as MOVED_IN by me.
*/
newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED | newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
HEAP_XMIN_INVALID | HEAP_XMIN_INVALID |
HEAP_MOVED_OFF); HEAP_MOVED_OFF);
...@@ -2547,7 +2597,9 @@ move_plain_tuple(Relation rel, ...@@ -2547,7 +2597,9 @@ move_plain_tuple(Relation rel,
ItemPointerSet(&(newtup.t_data->t_ctid), dst_vacpage->blkno, newoff); ItemPointerSet(&(newtup.t_data->t_ctid), dst_vacpage->blkno, newoff);
newtup.t_self = newtup.t_data->t_ctid; newtup.t_self = newtup.t_data->t_ctid;
/* Mark old tuple as MOVED_OFF by me. */ /*
* Mark old tuple as MOVED_OFF by me.
*/
old_tup->t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED | old_tup->t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
HEAP_XMIN_INVALID | HEAP_XMIN_INVALID |
HEAP_MOVED_IN); HEAP_MOVED_IN);
...@@ -2567,7 +2619,9 @@ move_plain_tuple(Relation rel, ...@@ -2567,7 +2619,9 @@ move_plain_tuple(Relation rel,
} }
else else
{ {
/* No XLOG record, but still need to flag that XID exists on disk */ /*
* No XLOG record, but still need to flag that XID exists on disk
*/
MyXactMadeTempRelUpdate = true; MyXactMadeTempRelUpdate = true;
} }
...@@ -2644,8 +2698,8 @@ update_hint_bits(Relation rel, VacPageList fraged_pages, int num_fraged_pages, ...@@ -2644,8 +2698,8 @@ update_hint_bits(Relation rel, VacPageList fraged_pages, int num_fraged_pages,
/* /*
* See comments in the walk-along-page loop above, why we have * See comments in the walk-along-page loop above, why we have
* Asserts here instead of if (...) elog(ERROR). The difference * Asserts here instead of if (...) elog(ERROR). The
* here is that we may see MOVED_IN. * difference here is that we may see MOVED_IN.
*/ */
Assert(htup->t_infomask & HEAP_MOVED); Assert(htup->t_infomask & HEAP_MOVED);
Assert(HeapTupleHeaderGetXvac(htup) == GetCurrentTransactionId()); Assert(HeapTupleHeaderGetXvac(htup) == GetCurrentTransactionId());
...@@ -2699,10 +2753,10 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages) ...@@ -2699,10 +2753,10 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
} }
/* /*
* Flush dirty pages out to disk. We do this unconditionally, even if we * Flush dirty pages out to disk. We do this unconditionally, even if
* don't need to truncate, because we want to ensure that all tuples have * we don't need to truncate, because we want to ensure that all
* correct on-row commit status on disk (see bufmgr.c's comments for * tuples have correct on-row commit status on disk (see bufmgr.c's
* FlushRelationBuffers()). * comments for FlushRelationBuffers()).
*/ */
Assert(vacrelstats->rel_pages >= vacuum_pages->empty_end_pages); Assert(vacrelstats->rel_pages >= vacuum_pages->empty_end_pages);
relblocks = vacrelstats->rel_pages - vacuum_pages->empty_end_pages; relblocks = vacrelstats->rel_pages - vacuum_pages->empty_end_pages;
...@@ -2717,7 +2771,8 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages) ...@@ -2717,7 +2771,8 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
RelationGetRelationName(onerel), RelationGetRelationName(onerel),
vacrelstats->rel_pages, relblocks))); vacrelstats->rel_pages, relblocks)));
RelationTruncate(onerel, relblocks); RelationTruncate(onerel, relblocks);
vacrelstats->rel_pages = relblocks; /* set new number of blocks */ vacrelstats->rel_pages = relblocks; /* set new number of
* blocks */
} }
} }
...@@ -2781,9 +2836,9 @@ scan_index(Relation indrel, double num_tuples) ...@@ -2781,9 +2836,9 @@ scan_index(Relation indrel, double num_tuples)
/* /*
* Even though we're not planning to delete anything, we use the * Even though we're not planning to delete anything, we use the
* ambulkdelete call, because (a) the scan happens within the index AM for * ambulkdelete call, because (a) the scan happens within the index AM
* more speed, and (b) it may want to pass private statistics to the * for more speed, and (b) it may want to pass private statistics to
* amvacuumcleanup call. * the amvacuumcleanup call.
*/ */
stats = index_bulk_delete(indrel, dummy_tid_reaped, NULL); stats = index_bulk_delete(indrel, dummy_tid_reaped, NULL);
...@@ -2802,18 +2857,18 @@ scan_index(Relation indrel, double num_tuples) ...@@ -2802,18 +2857,18 @@ scan_index(Relation indrel, double num_tuples)
false); false);
ereport(elevel, ereport(elevel,
(errmsg("index \"%s\" now contains %.0f row versions in %u pages", (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
RelationGetRelationName(indrel), RelationGetRelationName(indrel),
stats->num_index_tuples, stats->num_index_tuples,
stats->num_pages), stats->num_pages),
errdetail("%u index pages have been deleted, %u are currently reusable.\n" errdetail("%u index pages have been deleted, %u are currently reusable.\n"
"%s", "%s",
stats->pages_deleted, stats->pages_free, stats->pages_deleted, stats->pages_free,
vac_show_rusage(&ru0)))); vac_show_rusage(&ru0))));
/* /*
* Check for tuple count mismatch. If the index is partial, then it's OK * Check for tuple count mismatch. If the index is partial, then it's
* for it to have fewer tuples than the heap; else we got trouble. * OK for it to have fewer tuples than the heap; else we got trouble.
*/ */
if (stats->num_index_tuples != num_tuples) if (stats->num_index_tuples != num_tuples)
{ {
...@@ -2869,20 +2924,20 @@ vacuum_index(VacPageList vacpagelist, Relation indrel, ...@@ -2869,20 +2924,20 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
false); false);
ereport(elevel, ereport(elevel,
(errmsg("index \"%s\" now contains %.0f row versions in %u pages", (errmsg("index \"%s\" now contains %.0f row versions in %u pages",
RelationGetRelationName(indrel), RelationGetRelationName(indrel),
stats->num_index_tuples, stats->num_index_tuples,
stats->num_pages), stats->num_pages),
errdetail("%.0f index row versions were removed.\n" errdetail("%.0f index row versions were removed.\n"
"%u index pages have been deleted, %u are currently reusable.\n" "%u index pages have been deleted, %u are currently reusable.\n"
"%s", "%s",
stats->tuples_removed, stats->tuples_removed,
stats->pages_deleted, stats->pages_free, stats->pages_deleted, stats->pages_free,
vac_show_rusage(&ru0)))); vac_show_rusage(&ru0))));
/* /*
* Check for tuple count mismatch. If the index is partial, then it's OK * Check for tuple count mismatch. If the index is partial, then it's
* for it to have fewer tuples than the heap; else we got trouble. * OK for it to have fewer tuples than the heap; else we got trouble.
*/ */
if (stats->num_index_tuples != num_tuples + keep_tuples) if (stats->num_index_tuples != num_tuples + keep_tuples)
{ {
...@@ -2891,7 +2946,7 @@ vacuum_index(VacPageList vacpagelist, Relation indrel, ...@@ -2891,7 +2946,7 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
ereport(WARNING, ereport(WARNING,
(errmsg("index \"%s\" contains %.0f row versions, but table contains %.0f row versions", (errmsg("index \"%s\" contains %.0f row versions, but table contains %.0f row versions",
RelationGetRelationName(indrel), RelationGetRelationName(indrel),
stats->num_index_tuples, num_tuples + keep_tuples), stats->num_index_tuples, num_tuples + keep_tuples),
errhint("Rebuild the index with REINDEX."))); errhint("Rebuild the index with REINDEX.")));
} }
...@@ -2976,13 +3031,14 @@ vac_update_fsm(Relation onerel, VacPageList fraged_pages, ...@@ -2976,13 +3031,14 @@ vac_update_fsm(Relation onerel, VacPageList fraged_pages,
/* /*
* We only report pages with free space at least equal to the average * We only report pages with free space at least equal to the average
* request size --- this avoids cluttering FSM with uselessly-small bits * request size --- this avoids cluttering FSM with uselessly-small
* of space. Although FSM would discard pages with little free space * bits of space. Although FSM would discard pages with little free
* anyway, it's important to do this prefiltering because (a) it reduces * space anyway, it's important to do this prefiltering because (a) it
* the time spent holding the FSM lock in RecordRelationFreeSpace, and (b) * reduces the time spent holding the FSM lock in
* FSM uses the number of pages reported as a statistic for guiding space * RecordRelationFreeSpace, and (b) FSM uses the number of pages
* management. If we didn't threshold our reports the same way * reported as a statistic for guiding space management. If we didn't
* vacuumlazy.c does, we'd be skewing that statistic. * threshold our reports the same way vacuumlazy.c does, we'd be
* skewing that statistic.
*/ */
threshold = GetAvgFSMRequestSize(&onerel->rd_node); threshold = GetAvgFSMRequestSize(&onerel->rd_node);
...@@ -2993,9 +3049,9 @@ vac_update_fsm(Relation onerel, VacPageList fraged_pages, ...@@ -2993,9 +3049,9 @@ vac_update_fsm(Relation onerel, VacPageList fraged_pages,
for (i = 0; i < nPages; i++) for (i = 0; i < nPages; i++)
{ {
/* /*
* fraged_pages may contain entries for pages that we later decided to * fraged_pages may contain entries for pages that we later
* truncate from the relation; don't enter them into the free space * decided to truncate from the relation; don't enter them into
* map! * the free space map!
*/ */
if (pagedesc[i]->blkno >= rel_pages) if (pagedesc[i]->blkno >= rel_pages)
break; break;
...@@ -3021,7 +3077,7 @@ copy_vac_page(VacPage vacpage) ...@@ -3021,7 +3077,7 @@ copy_vac_page(VacPage vacpage)
/* allocate a VacPageData entry */ /* allocate a VacPageData entry */
newvacpage = (VacPage) palloc(sizeof(VacPageData) + newvacpage = (VacPage) palloc(sizeof(VacPageData) +
vacpage->offsets_free * sizeof(OffsetNumber)); vacpage->offsets_free * sizeof(OffsetNumber));
/* fill it in */ /* fill it in */
if (vacpage->offsets_free > 0) if (vacpage->offsets_free > 0)
...@@ -3191,7 +3247,7 @@ vac_open_indexes(Relation relation, LOCKMODE lockmode, ...@@ -3191,7 +3247,7 @@ vac_open_indexes(Relation relation, LOCKMODE lockmode,
} }
/* /*
* Release the resources acquired by vac_open_indexes. Optionally release * Release the resources acquired by vac_open_indexes. Optionally release
* the locks (say NoLock to keep 'em). * the locks (say NoLock to keep 'em).
*/ */
void void
...@@ -3218,7 +3274,10 @@ vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode) ...@@ -3218,7 +3274,10 @@ vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
bool bool
vac_is_partial_index(Relation indrel) vac_is_partial_index(Relation indrel)
{ {
/* If the index's AM doesn't support nulls, it's partial for our purposes */ /*
* If the index's AM doesn't support nulls, it's partial for our
* purposes
*/
if (!indrel->rd_am->amindexnulls) if (!indrel->rd_am->amindexnulls)
return true; return true;
...@@ -3295,9 +3354,9 @@ vac_show_rusage(VacRUsage *ru0) ...@@ -3295,9 +3354,9 @@ vac_show_rusage(VacRUsage *ru0)
snprintf(result, sizeof(result), snprintf(result, sizeof(result),
"CPU %d.%02ds/%d.%02du sec elapsed %d.%02d sec.", "CPU %d.%02ds/%d.%02du sec elapsed %d.%02d sec.",
(int) (ru1.ru.ru_stime.tv_sec - ru0->ru.ru_stime.tv_sec), (int) (ru1.ru.ru_stime.tv_sec - ru0->ru.ru_stime.tv_sec),
(int) (ru1.ru.ru_stime.tv_usec - ru0->ru.ru_stime.tv_usec) / 10000, (int) (ru1.ru.ru_stime.tv_usec - ru0->ru.ru_stime.tv_usec) / 10000,
(int) (ru1.ru.ru_utime.tv_sec - ru0->ru.ru_utime.tv_sec), (int) (ru1.ru.ru_utime.tv_sec - ru0->ru.ru_utime.tv_sec),
(int) (ru1.ru.ru_utime.tv_usec - ru0->ru.ru_utime.tv_usec) / 10000, (int) (ru1.ru.ru_utime.tv_usec - ru0->ru.ru_utime.tv_usec) / 10000,
(int) (ru1.tv.tv_sec - ru0->tv.tv_sec), (int) (ru1.tv.tv_sec - ru0->tv.tv_sec),
(int) (ru1.tv.tv_usec - ru0->tv.tv_usec) / 10000); (int) (ru1.tv.tv_usec - ru0->tv.tv_usec) / 10000);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment