Commit e8abf585 authored by Heikki Linnakangas's avatar Heikki Linnakangas

Move check for fsync=off so that pendingOps still gets cleared.

Commit 3eb77eba moved the loop and refactored it, and inadvertently
changed the effect of fsync=off so that it also skipped removing entries
from the pendingOps table. That was not intentional, and leads to an
assertion failure if you turn fsync on while the server is running and
reload the config.

Backpatch-through: 12-
Reviewed-By: Thomas Munro
Discussion: https://www.postgresql.org/message-id/3cbc7f4b-a5fa-56e9-9591-c886deb07513%40iki.fi
parent a1691554
...@@ -315,14 +315,6 @@ ProcessSyncRequests(void) ...@@ -315,14 +315,6 @@ ProcessSyncRequests(void)
{ {
int failures; int failures;
/*
* If fsync is off then we don't have to bother opening the file at
* all. (We delay checking until this point so that changing fsync on
* the fly behaves sensibly.)
*/
if (!enableFsync)
continue;
/* /*
* If the entry is new then don't process it this time; it is new. * If the entry is new then don't process it this time; it is new.
* Note "continue" bypasses the hash-remove call at the bottom of the * Note "continue" bypasses the hash-remove call at the bottom of the
...@@ -335,78 +327,88 @@ ProcessSyncRequests(void) ...@@ -335,78 +327,88 @@ ProcessSyncRequests(void)
Assert((CycleCtr) (entry->cycle_ctr + 1) == sync_cycle_ctr); Assert((CycleCtr) (entry->cycle_ctr + 1) == sync_cycle_ctr);
/* /*
* If in checkpointer, we want to absorb pending requests every so * If fsync is off then we don't have to bother opening the file at
* often to prevent overflow of the fsync request queue. It is * all. (We delay checking until this point so that changing fsync on
* unspecified whether newly-added entries will be visited by * the fly behaves sensibly.)
* hash_seq_search, but we don't care since we don't need to process
* them anyway.
*/
if (--absorb_counter <= 0)
{
AbsorbSyncRequests();
absorb_counter = FSYNCS_PER_ABSORB;
}
/*
* The fsync table could contain requests to fsync segments that have
* been deleted (unlinked) by the time we get to them. Rather than
* just hoping an ENOENT (or EACCES on Windows) error can be ignored,
* what we do on error is absorb pending requests and then retry.
* Since mdunlink() queues a "cancel" message before actually
* unlinking, the fsync request is guaranteed to be marked canceled
* after the absorb if it really was this case. DROP DATABASE likewise
* has to tell us to forget fsync requests before it starts deletions.
*/ */
for (failures = 0; !entry->canceled; failures++) if (enableFsync)
{ {
char path[MAXPGPATH];
INSTR_TIME_SET_CURRENT(sync_start);
if (syncsw[entry->tag.handler].sync_syncfiletag(&entry->tag,
path) == 0)
{
/* Success; update statistics about sync timing */
INSTR_TIME_SET_CURRENT(sync_end);
sync_diff = sync_end;
INSTR_TIME_SUBTRACT(sync_diff, sync_start);
elapsed = INSTR_TIME_GET_MICROSEC(sync_diff);
if (elapsed > longest)
longest = elapsed;
total_elapsed += elapsed;
processed++;
if (log_checkpoints)
elog(DEBUG1, "checkpoint sync: number=%d file=%s time=%.3f msec",
processed,
path,
(double) elapsed / 1000);
break; /* out of retry loop */
}
/* /*
* It is possible that the relation has been dropped or truncated * If in checkpointer, we want to absorb pending requests every so
* since the fsync request was entered. Therefore, allow ENOENT, * often to prevent overflow of the fsync request queue. It is
* but only if we didn't fail already on this file. * unspecified whether newly-added entries will be visited by
* hash_seq_search, but we don't care since we don't need to
* process them anyway.
*/ */
if (!FILE_POSSIBLY_DELETED(errno) || failures > 0) if (--absorb_counter <= 0)
ereport(data_sync_elevel(ERROR), {
(errcode_for_file_access(), AbsorbSyncRequests();
errmsg("could not fsync file \"%s\": %m", absorb_counter = FSYNCS_PER_ABSORB;
path))); }
else
ereport(DEBUG1,
(errcode_for_file_access(),
errmsg("could not fsync file \"%s\" but retrying: %m",
path)));
/* /*
* Absorb incoming requests and check to see if a cancel arrived * The fsync table could contain requests to fsync segments that
* for this relation fork. * have been deleted (unlinked) by the time we get to them. Rather
* than just hoping an ENOENT (or EACCES on Windows) error can be
* ignored, what we do on error is absorb pending requests and
* then retry. Since mdunlink() queues a "cancel" message before
* actually unlinking, the fsync request is guaranteed to be
* marked canceled after the absorb if it really was this case.
* DROP DATABASE likewise has to tell us to forget fsync requests
* before it starts deletions.
*/ */
AbsorbSyncRequests(); for (failures = 0; !entry->canceled; failures++)
absorb_counter = FSYNCS_PER_ABSORB; /* might as well... */ {
} /* end retry loop */ char path[MAXPGPATH];
INSTR_TIME_SET_CURRENT(sync_start);
if (syncsw[entry->tag.handler].sync_syncfiletag(&entry->tag,
path) == 0)
{
/* Success; update statistics about sync timing */
INSTR_TIME_SET_CURRENT(sync_end);
sync_diff = sync_end;
INSTR_TIME_SUBTRACT(sync_diff, sync_start);
elapsed = INSTR_TIME_GET_MICROSEC(sync_diff);
if (elapsed > longest)
longest = elapsed;
total_elapsed += elapsed;
processed++;
if (log_checkpoints)
elog(DEBUG1, "checkpoint sync: number=%d file=%s time=%.3f msec",
processed,
path,
(double) elapsed / 1000);
break; /* out of retry loop */
}
/*
* It is possible that the relation has been dropped or
* truncated since the fsync request was entered. Therefore,
* allow ENOENT, but only if we didn't fail already on this
* file.
*/
if (!FILE_POSSIBLY_DELETED(errno) || failures > 0)
ereport(data_sync_elevel(ERROR),
(errcode_for_file_access(),
errmsg("could not fsync file \"%s\": %m",
path)));
else
ereport(DEBUG1,
(errcode_for_file_access(),
errmsg("could not fsync file \"%s\" but retrying: %m",
path)));
/*
* Absorb incoming requests and check to see if a cancel
* arrived for this relation fork.
*/
AbsorbSyncRequests();
absorb_counter = FSYNCS_PER_ABSORB; /* might as well... */
} /* end retry loop */
}
/* We are done with this entry, remove it */ /* We are done with this entry, remove it */
if (hash_search(pendingOps, &entry->tag, HASH_REMOVE, NULL) == NULL) if (hash_search(pendingOps, &entry->tag, HASH_REMOVE, NULL) == NULL)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment