Commit cdc84adb authored by Bruce Momjian's avatar Bruce Momjian

Indent comment pushed to new line by else so it is indented by BSD

indent.
parent 914ff1ea
......@@ -13,7 +13,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.292 2004/09/30 23:21:19 tgl Exp $
* $PostgreSQL: pgsql/src/backend/commands/vacuum.c,v 1.293 2004/10/07 14:15:50 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -240,20 +240,27 @@ vacuum(VacuumStmt *vacstmt)
if (vacstmt->verbose)
elevel = INFO;
else
/* bjm comment */
elevel = DEBUG2;
if (1 == 0)
func();
else
/* bjm comment */
{
elevel = DEBUG2;
}
/*
* We cannot run VACUUM inside a user transaction block; if we were
* inside a transaction, then our commit- and
* start-transaction-command calls would not have the intended effect!
* Furthermore, the forced commit that occurs before truncating the
* relation's file would have the effect of committing the rest of the
* user's transaction too, which would certainly not be the desired
* behavior. (This only applies to VACUUM FULL, though. We could in
* theory run lazy VACUUM inside a transaction block, but we choose to
* disallow that case because we'd rather commit as soon as possible
* after finishing the vacuum. This is mainly so that we can let go
* the AccessExclusiveLock that we may be holding.)
* We cannot run VACUUM inside a user transaction block; if we were inside
* a transaction, then our commit- and start-transaction-command calls
* would not have the intended effect! Furthermore, the forced commit that
* occurs before truncating the relation's file would have the effect of
* committing the rest of the user's transaction too, which would
* certainly not be the desired behavior. (This only applies to VACUUM
* FULL, though. We could in theory run lazy VACUUM inside a transaction
* block, but we choose to disallow that case because we'd rather commit
* as soon as possible after finishing the vacuum. This is mainly so
* that we can let go the AccessExclusiveLock that we may be holding.)
*
* ANALYZE (without VACUUM) can run either way.
*/
......@@ -265,18 +272,15 @@ vacuum(VacuumStmt *vacstmt)
else
in_outer_xact = IsInTransactionChain((void *) vacstmt);
/*
* Send info about dead objects to the statistics collector
*/
/* Send info about dead objects to the statistics collector */
if (vacstmt->vacuum)
pgstat_vacuum_tabstat();
/*
* Create special memory context for cross-transaction storage.
*
* Since it is a child of PortalContext, it will go away eventually even
* if we suffer an error; there's no need for special abort cleanup
* logic.
* Since it is a child of PortalContext, it will go away eventually even if
* we suffer an error; there's no need for special abort cleanup logic.
*/
vac_context = AllocSetContextCreate(PortalContext,
"Vacuum",
......@@ -295,21 +299,21 @@ vacuum(VacuumStmt *vacstmt)
/*
* It's a database-wide VACUUM.
*
* Compute the initially applicable OldestXmin and FreezeLimit XIDs,
* so that we can record these values at the end of the VACUUM.
* Note that individual tables may well be processed with newer
* values, but we can guarantee that no (non-shared) relations are
* processed with older ones.
* Compute the initially applicable OldestXmin and FreezeLimit XIDs, so
* that we can record these values at the end of the VACUUM. Note that
* individual tables may well be processed with newer values, but we
* can guarantee that no (non-shared) relations are processed with
* older ones.
*
* It is okay to record non-shared values in pg_database, even though
* we may vacuum shared relations with older cutoffs, because only
* the minimum of the values present in pg_database matters. We
* can be sure that shared relations have at some time been
* vacuumed with cutoffs no worse than the global minimum; for, if
* there is a backend in some other DB with xmin = OLDXMIN that's
* determining the cutoff with which we vacuum shared relations,
* it is not possible for that database to have a cutoff newer
* than OLDXMIN recorded in pg_database.
* It is okay to record non-shared values in pg_database, even though we
* may vacuum shared relations with older cutoffs, because only the
* minimum of the values present in pg_database matters. We can be
* sure that shared relations have at some time been vacuumed with
* cutoffs no worse than the global minimum; for, if there is a
* backend in some other DB with xmin = OLDXMIN that's determining the
* cutoff with which we vacuum shared relations, it is not possible
* for that database to have a cutoff newer than OLDXMIN recorded in
* pg_database.
*/
vacuum_set_xid_limits(vacstmt, false,
&initialOldestXmin,
......@@ -319,16 +323,15 @@ vacuum(VacuumStmt *vacstmt)
/*
* Decide whether we need to start/commit our own transactions.
*
* For VACUUM (with or without ANALYZE): always do so, so that we can
* release locks as soon as possible. (We could possibly use the
* outer transaction for a one-table VACUUM, but handling TOAST tables
* would be problematic.)
* For VACUUM (with or without ANALYZE): always do so, so that we can release
* locks as soon as possible. (We could possibly use the outer
* transaction for a one-table VACUUM, but handling TOAST tables would be
* problematic.)
*
* For ANALYZE (no VACUUM): if inside a transaction block, we cannot
* start/commit our own transactions. Also, there's no need to do so
* if only processing one relation. For multiple relations when not
* within a transaction block, use own transactions so we can release
* locks sooner.
* start/commit our own transactions. Also, there's no need to do so if
* only processing one relation. For multiple relations when not within a
* transaction block, use own transactions so we can release locks sooner.
*/
if (vacstmt->vacuum)
use_own_xacts = true;
......@@ -344,8 +347,8 @@ vacuum(VacuumStmt *vacstmt)
}
/*
* If we are running ANALYZE without per-table transactions, we'll
* need a memory context with table lifetime.
* If we are running ANALYZE without per-table transactions, we'll need a
* memory context with table lifetime.
*/
if (!use_own_xacts)
anl_context = AllocSetContextCreate(PortalContext,
......@@ -355,12 +358,12 @@ vacuum(VacuumStmt *vacstmt)
ALLOCSET_DEFAULT_MAXSIZE);
/*
* vacuum_rel expects to be entered with no transaction active; it
* will start and commit its own transaction. But we are called by an
* SQL command, and so we are executing inside a transaction already.
* We commit the transaction started in PostgresMain() here, and start
* another one before exiting to match the commit waiting for us back
* in PostgresMain().
* vacuum_rel expects to be entered with no transaction active; it will
* start and commit its own transaction. But we are called by an SQL
* command, and so we are executing inside a transaction already. We
* commit the transaction started in PostgresMain() here, and start
* another one before exiting to match the commit waiting for us back in
* PostgresMain().
*/
if (use_own_xacts)
{
......@@ -376,9 +379,7 @@ vacuum(VacuumStmt *vacstmt)
VacuumCostActive = (VacuumCostDelay > 0);
VacuumCostBalance = 0;
/*
* Loop to process each selected relation.
*/
/* Loop to process each selected relation. */
foreach(cur, relations)
{
Oid relid = lfirst_oid(cur);
......@@ -393,11 +394,11 @@ vacuum(VacuumStmt *vacstmt)
MemoryContext old_context = NULL;
/*
* If using separate xacts, start one for analyze.
* Otherwise, we can use the outer transaction, but we
* still need to call analyze_rel in a memory context that
* will be cleaned up on return (else we leak memory while
* processing multiple tables).
* If using separate xacts, start one for analyze. Otherwise,
* we can use the outer transaction, but we still need to call
* analyze_rel in a memory context that will be cleaned up on
* return (else we leak memory while processing multiple
* tables).
*/
if (use_own_xacts)
{
......@@ -409,8 +410,8 @@ vacuum(VacuumStmt *vacstmt)
old_context = MemoryContextSwitchTo(anl_context);
/*
* Tell the buffer replacement strategy that vacuum is
* causing the IO
* Tell the buffer replacement strategy that vacuum is causing
* the IO
*/
StrategyHintVacuum(true);
......@@ -439,9 +440,7 @@ vacuum(VacuumStmt *vacstmt)
/* Turn off vacuum cost accounting */
VacuumCostActive = false;
/*
* Finish up processing.
*/
/* Finish up processing. */
if (use_own_xacts)
{
/* here, we are not in a transaction */
......@@ -456,16 +455,16 @@ vacuum(VacuumStmt *vacstmt)
if (vacstmt->vacuum)
{
/*
* If it was a database-wide VACUUM, print FSM usage statistics
* (we don't make you be superuser to see these).
* If it was a database-wide VACUUM, print FSM usage statistics (we
* don't make you be superuser to see these).
*/
if (vacstmt->relation == NULL)
PrintFreeSpaceMapStatistics(elevel);
/*
* If we completed a database-wide VACUUM without skipping any
* relations, update the database's pg_database row with info
* about the transaction IDs used, and try to truncate pg_clog.
* relations, update the database's pg_database row with info about
* the transaction IDs used, and try to truncate pg_clog.
*/
if (all_rels)
{
......@@ -477,8 +476,8 @@ vacuum(VacuumStmt *vacstmt)
/*
* Clean up working storage --- note we must do this after
* StartTransactionCommand, else we might be trying to delete the
* active context!
* StartTransactionCommand, else we might be trying to delete the active
* context!
*/
MemoryContextDelete(vac_context);
vac_context = NULL;
......@@ -571,15 +570,11 @@ vacuum_set_xid_limits(VacuumStmt *vacstmt, bool sharedRel,
limit = GetCurrentTransactionId() - (MaxTransactionId >> 2);
}
/*
* Be careful not to generate a "permanent" XID
*/
/* Be careful not to generate a "permanent" XID */
if (!TransactionIdIsNormal(limit))
limit = FirstNormalTransactionId;
/*
* Ensure sane relationship of limits
*/
/* Ensure sane relationship of limits */
if (TransactionIdFollows(limit, *oldestXmin))
{
ereport(WARNING,
......@@ -621,9 +616,7 @@ vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples,
Form_pg_class pgcform;
Buffer buffer;
/*
* update number of tuples and number of pages in pg_class
*/
/* update number of tuples and number of pages in pg_class */
rd = heap_openr(RelationRelationName, RowExclusiveLock);
ctup = SearchSysCache(RELOID,
......@@ -659,10 +652,10 @@ vac_update_relstats(Oid relid, BlockNumber num_pages, double num_tuples,
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
/*
* Invalidate the tuple in the catcaches; this also arranges to flush
* the relation's relcache entry. (If we fail to commit for some
* reason, no flush will occur, but no great harm is done since there
* are no noncritical state updates here.)
* Invalidate the tuple in the catcaches; this also arranges to flush the
* relation's relcache entry. (If we fail to commit for some reason, no
* flush will occur, but no great harm is done since there are no
* noncritical state updates here.)
*/
CacheInvalidateHeapTuple(rd, &rtup);
......@@ -795,8 +788,8 @@ vac_truncate_clog(TransactionId vacuumXID, TransactionId frozenXID)
heap_close(relation, AccessShareLock);
/*
* Do not truncate CLOG if we seem to have suffered wraparound
* already; the computed minimum XID might be bogus.
* Do not truncate CLOG if we seem to have suffered wraparound already;
* the computed minimum XID might be bogus.
*/
if (vacuumAlreadyWrapped)
{
......@@ -881,8 +874,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
CHECK_FOR_INTERRUPTS();
/*
* Race condition -- if the pg_class tuple has gone away since the
* last time we saw it, we don't need to vacuum it.
* Race condition -- if the pg_class tuple has gone away since the last
* time we saw it, we don't need to vacuum it.
*/
if (!SearchSysCacheExists(RELOID,
ObjectIdGetDatum(relid),
......@@ -894,24 +887,21 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
}
/*
* Determine the type of lock we want --- hard exclusive lock for a
* FULL vacuum, but just ShareUpdateExclusiveLock for concurrent
* vacuum. Either way, we can be sure that no other backend is
* vacuuming the same table.
* Determine the type of lock we want --- hard exclusive lock for a FULL
* vacuum, but just ShareUpdateExclusiveLock for concurrent vacuum. Either
* way, we can be sure that no other backend is vacuuming the same table.
*/
lmode = vacstmt->full ? AccessExclusiveLock : ShareUpdateExclusiveLock;
/*
* Open the class, get an appropriate lock on it, and check
* permissions.
* Open the class, get an appropriate lock on it, and check permissions.
*
* We allow the user to vacuum a table if he is superuser, the table
* owner, or the database owner (but in the latter case, only if it's
* not a shared relation). pg_class_ownercheck includes the superuser
* case.
* We allow the user to vacuum a table if he is superuser, the table owner,
* or the database owner (but in the latter case, only if it's not a
* shared relation). pg_class_ownercheck includes the superuser case.
*
* Note we choose to treat permissions failure as a WARNING and keep
* trying to vacuum the rest of the DB --- is this appropriate?
* Note we choose to treat permissions failure as a WARNING and keep trying
* to vacuum the rest of the DB --- is this appropriate?
*/
onerel = relation_open(relid, lmode);
......@@ -928,8 +918,8 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
}
/*
* Check that it's a plain table; we used to do this in get_rel_oids()
* but seems safer to check after we've locked the relation.
* Check that it's a plain table; we used to do this in get_rel_oids() but
* seems safer to check after we've locked the relation.
*/
if (onerel->rd_rel->relkind != expected_relkind)
{
......@@ -954,15 +944,14 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
relation_close(onerel, lmode);
StrategyHintVacuum(false);
CommitTransactionCommand();
return true; /* assume no long-lived data in temp
* tables */
return true; /* assume no long-lived data in temp tables */
}
/*
* Get a session-level lock too. This will protect our access to the
* relation across multiple transactions, so that we can vacuum the
* relation's TOAST table (if any) secure in the knowledge that no one
* is deleting the parent relation.
* relation's TOAST table (if any) secure in the knowledge that no one is
* deleting the parent relation.
*
* NOTE: this cannot block, even if someone else is waiting for access,
* because the lock manager knows that both lock requests are from the
......@@ -971,14 +960,10 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
onerelid = onerel->rd_lockInfo.lockRelId;
LockRelationForSession(&onerelid, lmode);
/*
* Remember the relation's TOAST relation for later
*/
/* Remember the relation's TOAST relation for later */
toast_relid = onerel->rd_rel->reltoastrelid;
/*
* Do the actual work --- either FULL or "lazy" vacuum
*/
/* Do the actual work --- either FULL or "lazy" vacuum */
if (vacstmt->full)
full_vacuum_rel(onerel, vacstmt);
else
......@@ -989,18 +974,16 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
/* all done with this class, but hold lock until commit */
relation_close(onerel, NoLock);
/*
* Complete the transaction and free all temporary memory used.
*/
/* Complete the transaction and free all temporary memory used. */
StrategyHintVacuum(false);
CommitTransactionCommand();
/*
* If the relation has a secondary toast rel, vacuum that too while we
* still hold the session lock on the master table. Note however that
* "analyze" will not get done on the toast table. This is good,
* because the toaster always uses hardcoded index access and
* statistics are totally unimportant for toast relations.
* "analyze" will not get done on the toast table. This is good, because
* the toaster always uses hardcoded index access and statistics are
* totally unimportant for toast relations.
*/
if (toast_relid != InvalidOid)
{
......@@ -1008,9 +991,7 @@ vacuum_rel(Oid relid, VacuumStmt *vacstmt, char expected_relkind)
result = false; /* failed to vacuum the TOAST table? */
}
/*
* Now release the session-level lock on the master table.
*/
/* Now release the session-level lock on the master table. */
UnlockRelationForSession(&onerelid, lmode);
return result;
......@@ -1039,8 +1020,8 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
{
VacPageListData vacuum_pages; /* List of pages to vacuum and/or
* clean indexes */
VacPageListData fraged_pages; /* List of pages with space enough
* for re-using */
VacPageListData fraged_pages; /* List of pages with space enough for
* re-using */
Relation *Irel;
int nindexes,
i;
......@@ -1049,9 +1030,7 @@ full_vacuum_rel(Relation onerel, VacuumStmt *vacstmt)
vacuum_set_xid_limits(vacstmt, onerel->rd_rel->relisshared,
&OldestXmin, &FreezeLimit);
/*
* Set up statistics-gathering machinery.
*/
/* Set up statistics-gathering machinery. */
vacrelstats = (VRelStats *) palloc(sizeof(VRelStats));
vacrelstats->rel_pages = 0;
vacrelstats->rel_tuples = 0;
......@@ -1265,8 +1244,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
case HEAPTUPLE_LIVE:
/*
* Tuple is good. Consider whether to replace its
* xmin value with FrozenTransactionId.
* Tuple is good. Consider whether to replace its xmin
* value with FrozenTransactionId.
*/
if (TransactionIdIsNormal(HeapTupleHeaderGetXmin(tuple.t_data)) &&
TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
......@@ -1278,9 +1257,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
pgchanged = true;
}
/*
* Other checks...
*/
/* Other checks... */
if (onerel->rd_rel->relhasoids &&
!OidIsValid(HeapTupleGetOid(&tuple)))
elog(WARNING, "relation \"%s\" TID %u/%u: OID is invalid",
......@@ -1289,15 +1266,14 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
case HEAPTUPLE_RECENTLY_DEAD:
/*
* If tuple is recently deleted then we must not
* remove it from relation.
* If tuple is recently deleted then we must not remove it
* from relation.
*/
nkeep += 1;
/*
* If we do shrinking and this tuple is updated one
* then remember it to construct updated tuple
* dependencies.
* If we do shrinking and this tuple is updated one then
* remember it to construct updated tuple dependencies.
*/
if (do_shrinking &&
!(ItemPointerEquals(&(tuple.t_self),
......@@ -1319,10 +1295,10 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
case HEAPTUPLE_INSERT_IN_PROGRESS:
/*
* This should not happen, since we hold exclusive
* lock on the relation; shouldn't we raise an error?
* (Actually, it can happen in system catalogs, since
* we tend to release write lock before commit there.)
* This should not happen, since we hold exclusive lock on
* the relation; shouldn't we raise an error? (Actually,
* it can happen in system catalogs, since we tend to
* release write lock before commit there.)
*/
ereport(NOTICE,
(errmsg("relation \"%s\" TID %u/%u: InsertTransactionInProgress %u --- can't shrink relation",
......@@ -1332,10 +1308,10 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
case HEAPTUPLE_DELETE_IN_PROGRESS:
/*
* This should not happen, since we hold exclusive
* lock on the relation; shouldn't we raise an error?
* (Actually, it can happen in system catalogs, since
* we tend to release write lock before commit there.)
* This should not happen, since we hold exclusive lock on
* the relation; shouldn't we raise an error? (Actually,
* it can happen in system catalogs, since we tend to
* release write lock before commit there.)
*/
ereport(NOTICE,
(errmsg("relation \"%s\" TID %u/%u: DeleteTransactionInProgress %u --- can't shrink relation",
......@@ -1357,12 +1333,11 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
ItemId lpp;
/*
* Here we are building a temporary copy of the page with
* dead tuples removed. Below we will apply
* PageRepairFragmentation to the copy, so that we can
* determine how much space will be available after
* removal of dead tuples. But note we are NOT changing
* the real page yet...
* Here we are building a temporary copy of the page with dead
* tuples removed. Below we will apply PageRepairFragmentation
* to the copy, so that we can determine how much space will
* be available after removal of dead tuples. But note we are
* NOT changing the real page yet...
*/
if (tempPage == NULL)
{
......@@ -1412,8 +1387,8 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
/*
* Add the page to fraged_pages if it has a useful amount of free
* space. "Useful" means enough for a minimal-sized tuple. But we
* don't know that accurately near the start of the relation, so
* add pages unconditionally if they have >= BLCKSZ/10 free space.
* don't know that accurately near the start of the relation, so add
* pages unconditionally if they have >= BLCKSZ/10 free space.
*/
do_frag = (vacpage->free >= min_tlen || vacpage->free >= BLCKSZ / 10);
......@@ -1429,8 +1404,7 @@ scan_heap(VRelStats *vacrelstats, Relation onerel,
/*
* Include the page in empty_end_pages if it will be empty after
* vacuuming; this is to keep us from using it as a move
* destination.
* vacuuming; this is to keep us from using it as a move destination.
*/
if (notup)
{
......@@ -1577,14 +1551,14 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
vacpage->offsets_used = vacpage->offsets_free = 0;
/*
* Scan pages backwards from the last nonempty page, trying to move
* tuples down to lower pages. Quit when we reach a page that we have
* moved any tuples onto, or the first page if we haven't moved
* anything, or when we find a page we cannot completely empty (this
* last condition is handled by "break" statements within the loop).
* Scan pages backwards from the last nonempty page, trying to move tuples
* down to lower pages. Quit when we reach a page that we have moved any
* tuples onto, or the first page if we haven't moved anything, or when we
* find a page we cannot completely empty (this last condition is handled
* by "break" statements within the loop).
*
* NB: this code depends on the vacuum_pages and fraged_pages lists being
* in order by blkno.
* NB: this code depends on the vacuum_pages and fraged_pages lists being in
* order by blkno.
*/
nblocks = vacrelstats->rel_pages;
for (blkno = nblocks - vacuum_pages->empty_end_pages - 1;
......@@ -1602,15 +1576,14 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
vacuum_delay_point();
/*
* Forget fraged_pages pages at or after this one; they're no
* longer useful as move targets, since we only want to move down.
* Note that since we stop the outer loop at last_move_dest_block,
* pages removed here cannot have had anything moved onto them
* already.
* Forget fraged_pages pages at or after this one; they're no longer
* useful as move targets, since we only want to move down. Note that
* since we stop the outer loop at last_move_dest_block, pages removed
* here cannot have had anything moved onto them already.
*
* Also note that we don't change the stored fraged_pages list, only
* our local variable num_fraged_pages; so the forgotten pages are
* still available to be loaded into the free space map later.
* Also note that we don't change the stored fraged_pages list, only our
* local variable num_fraged_pages; so the forgotten pages are still
* available to be loaded into the free space map later.
*/
while (num_fraged_pages > 0 &&
fraged_pages->pagedesc[num_fraged_pages - 1]->blkno >= blkno)
......@@ -1619,9 +1592,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
--num_fraged_pages;
}
/*
* Process this page of relation.
*/
/* Process this page of relation. */
buf = ReadBuffer(onerel, blkno);
page = BufferGetPage(buf);
......@@ -1666,8 +1637,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
else
Assert(!isempty);
chain_tuple_moved = false; /* no one chain-tuple was moved
* off this page, yet */
chain_tuple_moved = false; /* no one chain-tuple was moved off
* this page, yet */
vacpage->blkno = blkno;
maxoff = PageGetMaxOffsetNumber(page);
for (offnum = FirstOffsetNumber;
......@@ -1687,38 +1658,36 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
ItemPointerSet(&(tuple.t_self), blkno, offnum);
/*
* VACUUM FULL has an exclusive lock on the relation. So
* normally no other transaction can have pending INSERTs or
* DELETEs in this relation. A tuple is either (a) a tuple in
* a system catalog, inserted or deleted by a not yet
* committed transaction or (b) dead (XMIN_INVALID or
* XMAX_COMMITTED) or (c) inserted by a committed xact
* (XMIN_COMMITTED) or (d) moved by the currently running
* VACUUM. In case (a) we wouldn't be in repair_frag() at all.
* In case (b) we cannot be here, because scan_heap() has
* already marked the item as unused, see continue above. Case
* (c) is what normally is to be expected. Case (d) is only
* possible, if a whole tuple chain has been moved while
* processing this or a higher numbered block.
* VACUUM FULL has an exclusive lock on the relation. So normally
* no other transaction can have pending INSERTs or DELETEs in
* this relation. A tuple is either (a) a tuple in a system
* catalog, inserted or deleted by a not yet committed transaction
* or (b) dead (XMIN_INVALID or XMAX_COMMITTED) or (c) inserted by
* a committed xact (XMIN_COMMITTED) or (d) moved by the currently
* running VACUUM. In case (a) we wouldn't be in repair_frag() at
* all. In case (b) we cannot be here, because scan_heap() has
* already marked the item as unused, see continue above. Case (c)
* is what normally is to be expected. Case (d) is only possible,
* if a whole tuple chain has been moved while processing this or
* a higher numbered block.
*/
if (!(tuple.t_data->t_infomask & HEAP_XMIN_COMMITTED))
{
/*
* There cannot be another concurrently running VACUUM. If
* the tuple had been moved in by a previous VACUUM, the
* There cannot be another concurrently running VACUUM. If the
* tuple had been moved in by a previous VACUUM, the
* visibility check would have set XMIN_COMMITTED. If the
* tuple had been moved in by the currently running
* VACUUM, the loop would have been terminated. We had
* elog(ERROR, ...) here, but as we are testing for a
* can't-happen condition, Assert() seems more
* appropriate.
* tuple had been moved in by the currently running VACUUM,
* the loop would have been terminated. We had elog(ERROR,
* ...) here, but as we are testing for a can't-happen
* condition, Assert() seems more appropriate.
*/
Assert(!(tuple.t_data->t_infomask & HEAP_MOVED_IN));
/*
* If this (chain) tuple is moved by me already then I
* have to check is it in vacpage or not - i.e. is it
* moved while cleaning this page or some previous one.
* If this (chain) tuple is moved by me already then I have to
* check is it in vacpage or not - i.e. is it moved while
* cleaning this page or some previous one.
*/
Assert(tuple.t_data->t_infomask & HEAP_MOVED_OFF);
......@@ -1754,23 +1723,21 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
}
/*
* If this tuple is in the chain of tuples created in updates
* by "recent" transactions then we have to move all chain of
* tuples to another places.
* If this tuple is in the chain of tuples created in updates by
* "recent" transactions then we have to move all chain of tuples
* to another places.
*
* NOTE: this test is not 100% accurate: it is possible for a
* tuple to be an updated one with recent xmin, and yet not
* have a corresponding tuple in the vtlinks list. Presumably
* there was once a parent tuple with xmax matching the xmin,
* but it's possible that that tuple has been removed --- for
* example, if it had xmin = xmax then
* HeapTupleSatisfiesVacuum would deem it removable as soon as
* the xmin xact completes.
* NOTE: this test is not 100% accurate: it is possible for a tuple
* to be an updated one with recent xmin, and yet not have a
* corresponding tuple in the vtlinks list. Presumably there was
* once a parent tuple with xmax matching the xmin, but it's
* possible that that tuple has been removed --- for example, if
* it had xmin = xmax then HeapTupleSatisfiesVacuum would deem it
* removable as soon as the xmin xact completes.
*
* To be on the safe side, we abandon the repair_frag process if
* we cannot find the parent tuple in vtlinks. This may be
* overly conservative; AFAICS it would be safe to move the
* chain.
* To be on the safe side, we abandon the repair_frag process if we
* cannot find the parent tuple in vtlinks. This may be overly
* conservative; AFAICS it would be safe to move the chain.
*/
if (((tuple.t_data->t_infomask & HEAP_UPDATED) &&
!TransactionIdPrecedes(HeapTupleHeaderGetXmin(tuple.t_data),
......@@ -1811,8 +1778,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
free_vtmove = 100;
/*
* If this tuple is in the begin/middle of the chain then
* we have to move to the end of chain.
* If this tuple is in the begin/middle of the chain then we
* have to move to the end of chain.
*/
while (!(tp.t_data->t_infomask & (HEAP_XMAX_INVALID |
HEAP_MARKED_FOR_UPDATE)) &&
......@@ -1835,13 +1802,13 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (!ItemIdIsUsed(Citemid))
{
/*
* This means that in the middle of chain there
* was tuple updated by older (than OldestXmin)
* xaction and this tuple is already deleted by
* me. Actually, upper part of chain should be
* removed and seems that this should be handled
* in scan_heap(), but it's not implemented at the
* moment and so we just stop shrinking here.
* This means that in the middle of chain there was
* tuple updated by older (than OldestXmin) xaction
* and this tuple is already deleted by me. Actually,
* upper part of chain should be removed and seems
* that this should be handled in scan_heap(), but
* it's not implemented at the moment and so we just
* stop shrinking here.
*/
elog(DEBUG2, "child itemid in update-chain marked as unused --- can't continue repair_frag");
chain_move_failed = true;
......@@ -1860,9 +1827,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
break; /* out of walk-along-page loop */
}
/*
* Check if all items in chain can be moved
*/
/* Check if all items in chain can be moved */
for (;;)
{
Buffer Pbuf;
......@@ -1950,15 +1915,14 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
/*
* Read above about cases when !ItemIdIsUsed(Citemid)
* (child item is removed)... Due to the fact that at
* the moment we don't remove unuseful part of
* update-chain, it's possible to get too old parent
* row here. Like as in the case which caused this
* problem, we stop shrinking here. I could try to
* find real parent row but want not to do it because
* of real solution will be implemented anyway, later,
* and we are too close to 6.5 release. - vadim
* 06/11/99
* (child item is removed)... Due to the fact that at the
* moment we don't remove unuseful part of update-chain,
* it's possible to get too old parent row here. Like as
* in the case which caused this problem, we stop
* shrinking here. I could try to find real parent row but
* want not to do it because of real solution will be
* implemented anyway, later, and we are too close to 6.5
* release. - vadim 06/11/99
*/
if (!(TransactionIdEquals(HeapTupleHeaderGetXmax(Ptp.t_data),
HeapTupleHeaderGetXmin(tp.t_data))))
......@@ -1984,9 +1948,9 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
if (chain_move_failed)
{
/*
* Undo changes to offsets_used state. We don't
* bother cleaning up the amount-free state, since
* we're not going to do any further tuple motion.
* Undo changes to offsets_used state. We don't bother
* cleaning up the amount-free state, since we're not
* going to do any further tuple motion.
*/
for (i = 0; i < num_vtmove; i++)
{
......@@ -1997,9 +1961,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
break; /* out of walk-along-page loop */
}
/*
* Okay, move the whole tuple chain
*/
/* Okay, move the whole tuple chain */
ItemPointerSetInvalid(&Ctid);
for (ti = 0; ti < num_vtmove; ti++)
{
......@@ -2107,19 +2069,16 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
} /* walk along page */
/*
* If we broke out of the walk-along-page loop early (ie, still
* have offnum <= maxoff), then we failed to move some tuple off
* this page. No point in shrinking any more, so clean up and
* exit the per-page loop.
* If we broke out of the walk-along-page loop early (ie, still have
* offnum <= maxoff), then we failed to move some tuple off this page.
* No point in shrinking any more, so clean up and exit the per-page
* loop.
*/
if (offnum < maxoff && keep_tuples > 0)
{
OffsetNumber off;
/*
* Fix vacpage state for any unvisited tuples remaining on
* page
*/
/* Fix vacpage state for any unvisited tuples remaining on page */
for (off = OffsetNumberNext(offnum);
off <= maxoff;
off = OffsetNumberNext(off))
......@@ -2134,8 +2093,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
continue;
/*
* * See comments in the walk-along-page loop above, why
* we * have Asserts here instead of if (...) elog(ERROR).
* * See comments in the walk-along-page loop above, why we *
* have Asserts here instead of if (...) elog(ERROR).
*/
Assert(!(htup->t_infomask & HEAP_MOVED_IN));
Assert(htup->t_infomask & HEAP_MOVED_OFF);
......@@ -2199,20 +2158,20 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
* We have to commit our tuple movings before we truncate the
* relation. Ideally we should do Commit/StartTransactionCommand
* here, relying on the session-level table lock to protect our
* exclusive access to the relation. However, that would require
* a lot of extra code to close and re-open the relation, indexes,
* etc. For now, a quick hack: record status of current
* transaction as committed, and continue.
* exclusive access to the relation. However, that would require a
* lot of extra code to close and re-open the relation, indexes, etc.
* For now, a quick hack: record status of current transaction as
* committed, and continue.
*/
RecordTransactionCommit();
}
/*
* We are not going to move any more tuples across pages, but we still
* need to apply vacuum_page to compact free space in the remaining
* pages in vacuum_pages list. Note that some of these pages may also
* be in the fraged_pages list, and may have had tuples moved onto
* them; if so, we already did vacuum_page and needn't do it again.
* need to apply vacuum_page to compact free space in the remaining pages
* in vacuum_pages list. Note that some of these pages may also be in the
* fraged_pages list, and may have had tuples moved onto them; if so, we
* already did vacuum_page and needn't do it again.
*/
for (i = 0, curpage = vacuum_pages->pagedesc;
i < vacuumed_pages;
......@@ -2246,10 +2205,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
last_move_dest_block, num_moved);
/*
* It'd be cleaner to make this report at the bottom of this routine,
* but then the rusage would double-count the second pass of index
* vacuuming. So do it here and ignore the relatively small amount of
* processing that occurs below.
* It'd be cleaner to make this report at the bottom of this routine, but
* then the rusage would double-count the second pass of index vacuuming.
* So do it here and ignore the relatively small amount of processing that
* occurs below.
*/
ereport(elevel,
(errmsg("\"%s\": moved %u row versions, truncated %u to %u pages",
......@@ -2258,9 +2217,7 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
errdetail("%s",
vac_show_rusage(&ru0))));
/*
* Reflect the motion of system tuples to catalog cache here.
*/
/* Reflect the motion of system tuples to catalog cache here. */
CommandCounterIncrement();
if (Nvacpagelist.num_pages > 0)
......@@ -2283,11 +2240,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
}
/*
* keep_tuples is the number of tuples that have been moved
* off a page during chain moves but not been scanned over
* subsequently. The tuple ids of these tuples are not
* recorded as free offsets for any VacPage, so they will not
* be cleared from the indexes.
* keep_tuples is the number of tuples that have been moved off a
* page during chain moves but not been scanned over subsequently.
* The tuple ids of these tuples are not recorded as free offsets
* for any VacPage, so they will not be cleared from the indexes.
*/
Assert(keep_tuples >= 0);
for (i = 0; i < nindexes; i++)
......@@ -2325,8 +2281,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
continue;
/*
* * See comments in the walk-along-page loop above, why
* we * have Asserts here instead of if (...) elog(ERROR).
* * See comments in the walk-along-page loop above, why we *
* have Asserts here instead of if (...) elog(ERROR).
*/
Assert(!(htup->t_infomask & HEAP_MOVED_IN));
Assert(htup->t_infomask & HEAP_MOVED_OFF);
......@@ -2354,8 +2310,8 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
else
{
/*
* No XLOG record, but still need to flag that XID exists
* on disk
* No XLOG record, but still need to flag that XID exists on
* disk
*/
MyXactMadeTempRelUpdate = true;
}
......@@ -2374,10 +2330,10 @@ repair_frag(VRelStats *vacrelstats, Relation onerel,
}
/*
* Flush dirty pages out to disk. We do this unconditionally, even if
* we don't need to truncate, because we want to ensure that all
* tuples have correct on-row commit status on disk (see bufmgr.c's
* comments for FlushRelationBuffers()).
* Flush dirty pages out to disk. We do this unconditionally, even if we
* don't need to truncate, because we want to ensure that all tuples have
* correct on-row commit status on disk (see bufmgr.c's comments for
* FlushRelationBuffers()).
*/
FlushRelationBuffers(onerel, blkno);
......@@ -2423,9 +2379,7 @@ move_chain_tuple(Relation rel,
heap_copytuple_with_tuple(old_tup, &newtup);
/*
* register invalidation of source tuple in catcaches.
*/
/* register invalidation of source tuple in catcaches. */
CacheInvalidateHeapTuple(rel, old_tup);
/* NO EREPORT(ERROR) TILL CHANGES ARE LOGGED */
......@@ -2440,20 +2394,20 @@ move_chain_tuple(Relation rel,
/*
* If this page was not used before - clean it.
*
* NOTE: a nasty bug used to lurk here. It is possible for the source
* and destination pages to be the same (since this tuple-chain member
* can be on a page lower than the one we're currently processing in
* the outer loop). If that's true, then after vacuum_page() the
* source tuple will have been moved, and tuple.t_data will be
* pointing at garbage. Therefore we must do everything that uses
* old_tup->t_data BEFORE this step!!
* NOTE: a nasty bug used to lurk here. It is possible for the source and
* destination pages to be the same (since this tuple-chain member can be
* on a page lower than the one we're currently processing in the outer
* loop). If that's true, then after vacuum_page() the source tuple will
* have been moved, and tuple.t_data will be pointing at garbage.
* Therefore we must do everything that uses old_tup->t_data BEFORE this
* step!!
*
* This path is different from the other callers of vacuum_page, because
* we have already incremented the vacpage's offsets_used field to
* account for the tuple(s) we expect to move onto the page. Therefore
* vacuum_page's check for offsets_used == 0 is wrong. But since
* that's a good debugging check for all other callers, we work around
* it here rather than remove it.
* This path is different from the other callers of vacuum_page, because we
* have already incremented the vacpage's offsets_used field to account
* for the tuple(s) we expect to move onto the page. Therefore
* vacuum_page's check for offsets_used == 0 is wrong. But since that's a
* good debugging check for all other callers, we work around it here
* rather than remove it.
*/
if (!PageIsEmpty(dst_page) && cleanVpd)
{
......@@ -2465,8 +2419,8 @@ move_chain_tuple(Relation rel,
}
/*
* Update the state of the copied tuple, and store it on the
* destination page.
* Update the state of the copied tuple, and store it on the destination
* page.
*/
newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
HEAP_XMIN_INVALID |
......@@ -2502,17 +2456,15 @@ move_chain_tuple(Relation rel,
}
else
{
/*
* No XLOG record, but still need to flag that XID exists on disk
*/
/* No XLOG record, but still need to flag that XID exists on disk */
MyXactMadeTempRelUpdate = true;
}
END_CRIT_SECTION();
/*
* Set new tuple's t_ctid pointing to itself for last tuple in chain,
* and to next tuple in chain otherwise.
* Set new tuple's t_ctid pointing to itself for last tuple in chain, and
* to next tuple in chain otherwise.
*/
/* Is this ok after log_heap_move() and END_CRIT_SECTION()? */
if (!ItemPointerIsValid(ctid))
......@@ -2563,17 +2515,15 @@ move_plain_tuple(Relation rel,
* register invalidation of source tuple in catcaches.
*
* (Note: we do not need to register the copied tuple, because we are not
* changing the tuple contents and so there cannot be any need to
* flush negative catcache entries.)
* changing the tuple contents and so there cannot be any need to flush
* negative catcache entries.)
*/
CacheInvalidateHeapTuple(rel, old_tup);
/* NO EREPORT(ERROR) TILL CHANGES ARE LOGGED */
START_CRIT_SECTION();
/*
* Mark new tuple as MOVED_IN by me.
*/
/* Mark new tuple as MOVED_IN by me. */
newtup.t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
HEAP_XMIN_INVALID |
HEAP_MOVED_OFF);
......@@ -2597,9 +2547,7 @@ move_plain_tuple(Relation rel,
ItemPointerSet(&(newtup.t_data->t_ctid), dst_vacpage->blkno, newoff);
newtup.t_self = newtup.t_data->t_ctid;
/*
* Mark old tuple as MOVED_OFF by me.
*/
/* Mark old tuple as MOVED_OFF by me. */
old_tup->t_data->t_infomask &= ~(HEAP_XMIN_COMMITTED |
HEAP_XMIN_INVALID |
HEAP_MOVED_IN);
......@@ -2619,9 +2567,7 @@ move_plain_tuple(Relation rel,
}
else
{
/*
* No XLOG record, but still need to flag that XID exists on disk
*/
/* No XLOG record, but still need to flag that XID exists on disk */
MyXactMadeTempRelUpdate = true;
}
......@@ -2698,8 +2644,8 @@ update_hint_bits(Relation rel, VacPageList fraged_pages, int num_fraged_pages,
/*
* See comments in the walk-along-page loop above, why we have
* Asserts here instead of if (...) elog(ERROR). The
* difference here is that we may see MOVED_IN.
* Asserts here instead of if (...) elog(ERROR). The difference
* here is that we may see MOVED_IN.
*/
Assert(htup->t_infomask & HEAP_MOVED);
Assert(HeapTupleHeaderGetXvac(htup) == GetCurrentTransactionId());
......@@ -2753,10 +2699,10 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
}
/*
* Flush dirty pages out to disk. We do this unconditionally, even if
* we don't need to truncate, because we want to ensure that all
* tuples have correct on-row commit status on disk (see bufmgr.c's
* comments for FlushRelationBuffers()).
* Flush dirty pages out to disk. We do this unconditionally, even if we
* don't need to truncate, because we want to ensure that all tuples have
* correct on-row commit status on disk (see bufmgr.c's comments for
* FlushRelationBuffers()).
*/
Assert(vacrelstats->rel_pages >= vacuum_pages->empty_end_pages);
relblocks = vacrelstats->rel_pages - vacuum_pages->empty_end_pages;
......@@ -2771,8 +2717,7 @@ vacuum_heap(VRelStats *vacrelstats, Relation onerel, VacPageList vacuum_pages)
RelationGetRelationName(onerel),
vacrelstats->rel_pages, relblocks)));
RelationTruncate(onerel, relblocks);
vacrelstats->rel_pages = relblocks; /* set new number of
* blocks */
vacrelstats->rel_pages = relblocks; /* set new number of blocks */
}
}
......@@ -2836,9 +2781,9 @@ scan_index(Relation indrel, double num_tuples)
/*
* Even though we're not planning to delete anything, we use the
* ambulkdelete call, because (a) the scan happens within the index AM
* for more speed, and (b) it may want to pass private statistics to
* the amvacuumcleanup call.
* ambulkdelete call, because (a) the scan happens within the index AM for
* more speed, and (b) it may want to pass private statistics to the
* amvacuumcleanup call.
*/
stats = index_bulk_delete(indrel, dummy_tid_reaped, NULL);
......@@ -2867,8 +2812,8 @@ scan_index(Relation indrel, double num_tuples)
vac_show_rusage(&ru0))));
/*
* Check for tuple count mismatch. If the index is partial, then it's
* OK for it to have fewer tuples than the heap; else we got trouble.
* Check for tuple count mismatch. If the index is partial, then it's OK
* for it to have fewer tuples than the heap; else we got trouble.
*/
if (stats->num_index_tuples != num_tuples)
{
......@@ -2936,8 +2881,8 @@ vacuum_index(VacPageList vacpagelist, Relation indrel,
vac_show_rusage(&ru0))));
/*
* Check for tuple count mismatch. If the index is partial, then it's
* OK for it to have fewer tuples than the heap; else we got trouble.
* Check for tuple count mismatch. If the index is partial, then it's OK
* for it to have fewer tuples than the heap; else we got trouble.
*/
if (stats->num_index_tuples != num_tuples + keep_tuples)
{
......@@ -3031,14 +2976,13 @@ vac_update_fsm(Relation onerel, VacPageList fraged_pages,
/*
* We only report pages with free space at least equal to the average
* request size --- this avoids cluttering FSM with uselessly-small
* bits of space. Although FSM would discard pages with little free
* space anyway, it's important to do this prefiltering because (a) it
* reduces the time spent holding the FSM lock in
* RecordRelationFreeSpace, and (b) FSM uses the number of pages
* reported as a statistic for guiding space management. If we didn't
* threshold our reports the same way vacuumlazy.c does, we'd be
* skewing that statistic.
* request size --- this avoids cluttering FSM with uselessly-small bits
* of space. Although FSM would discard pages with little free space
* anyway, it's important to do this prefiltering because (a) it reduces
* the time spent holding the FSM lock in RecordRelationFreeSpace, and (b)
* FSM uses the number of pages reported as a statistic for guiding space
* management. If we didn't threshold our reports the same way
* vacuumlazy.c does, we'd be skewing that statistic.
*/
threshold = GetAvgFSMRequestSize(&onerel->rd_node);
......@@ -3049,9 +2993,9 @@ vac_update_fsm(Relation onerel, VacPageList fraged_pages,
for (i = 0; i < nPages; i++)
{
/*
* fraged_pages may contain entries for pages that we later
* decided to truncate from the relation; don't enter them into
* the free space map!
* fraged_pages may contain entries for pages that we later decided to
* truncate from the relation; don't enter them into the free space
* map!
*/
if (pagedesc[i]->blkno >= rel_pages)
break;
......@@ -3274,10 +3218,7 @@ vac_close_indexes(int nindexes, Relation *Irel, LOCKMODE lockmode)
bool
vac_is_partial_index(Relation indrel)
{
/*
* If the index's AM doesn't support nulls, it's partial for our
* purposes
*/
/* If the index's AM doesn't support nulls, it's partial for our purposes */
if (!indrel->rd_am->amindexnulls)
return true;
......
......@@ -38,10 +38,9 @@ do
# mark some comments for special treatment later
sed 's;/\* *---;/*---X_X;g' |
# workaround for indent bug with 'else' handling
sed 's;\([ ]*\)else[ ]*\(/\*.*\)$;\1else\
\1\2;g' |
sed 's;\([ ]*\)\(}[ ]\)else[ ]*\(/\*.*\)$;\1\2else\
\1\3;g' |
# indent comment so BSD indent will move it
sed 's;\([} ]\)else[ ]*\(/\*.*\)$;\1else\
\2;g' |
detab -t4 -qc |
# work around bug where function that defines no local variables misindents
# switch() case lines and line after #else. Do not do for struct/enum.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment