Commit 73f617f1 authored by Simon Riggs's avatar Simon Riggs

Various minor comments changes from bgwriter to checkpointer.

parent a5782570
...@@ -92,7 +92,7 @@ ...@@ -92,7 +92,7 @@
* *
* num_backend_writes is used to count the number of buffer writes performed * num_backend_writes is used to count the number of buffer writes performed
* by user backend processes. This counter should be wide enough that it * by user backend processes. This counter should be wide enough that it
* can't overflow during a single processingbgwriter cycle. num_backend_fsync * can't overflow during a single processing cycle. num_backend_fsync
* counts the subset of those writes that also had to do their own fsync, * counts the subset of those writes that also had to do their own fsync,
* because the background writer failed to absorb their request. * because the background writer failed to absorb their request.
* *
...@@ -892,7 +892,7 @@ BgWriterShmemInit(void) ...@@ -892,7 +892,7 @@ BgWriterShmemInit(void)
* since the last one (implied by CHECKPOINT_IS_SHUTDOWN or * since the last one (implied by CHECKPOINT_IS_SHUTDOWN or
* CHECKPOINT_END_OF_RECOVERY). * CHECKPOINT_END_OF_RECOVERY).
* CHECKPOINT_WAIT: wait for completion before returning (otherwise, * CHECKPOINT_WAIT: wait for completion before returning (otherwise,
* just signal bgwriter to do it, and return). * just signal checkpointer to do it, and return).
* CHECKPOINT_CAUSE_XLOG: checkpoint is requested due to xlog filling. * CHECKPOINT_CAUSE_XLOG: checkpoint is requested due to xlog filling.
* (This affects logging, and in particular enables CheckPointWarning.) * (This affects logging, and in particular enables CheckPointWarning.)
*/ */
...@@ -928,7 +928,7 @@ RequestCheckpoint(int flags) ...@@ -928,7 +928,7 @@ RequestCheckpoint(int flags)
/* /*
* Atomically set the request flags, and take a snapshot of the counters. * Atomically set the request flags, and take a snapshot of the counters.
* When we see ckpt_started > old_started, we know the flags we set here * When we see ckpt_started > old_started, we know the flags we set here
* have been seen by bgwriter. * have been seen by checkpointer.
* *
* Note that we OR the flags with any existing flags, to avoid overriding * Note that we OR the flags with any existing flags, to avoid overriding
* a "stronger" request by another backend. The flag senses must be * a "stronger" request by another backend. The flag senses must be
...@@ -943,7 +943,7 @@ RequestCheckpoint(int flags) ...@@ -943,7 +943,7 @@ RequestCheckpoint(int flags)
SpinLockRelease(&bgs->ckpt_lck); SpinLockRelease(&bgs->ckpt_lck);
/* /*
* Send signal to request checkpoint. It's possible that the bgwriter * Send signal to request checkpoint. It's possible that the checkpointer
* hasn't started yet, or is in process of restarting, so we will retry a * hasn't started yet, or is in process of restarting, so we will retry a
* few times if needed. Also, if not told to wait for the checkpoint to * few times if needed. Also, if not told to wait for the checkpoint to
* occur, we consider failure to send the signal to be nonfatal and merely * occur, we consider failure to send the signal to be nonfatal and merely
...@@ -1027,10 +1027,10 @@ RequestCheckpoint(int flags) ...@@ -1027,10 +1027,10 @@ RequestCheckpoint(int flags)
/* /*
* ForwardFsyncRequest * ForwardFsyncRequest
* Forward a file-fsync request from a backend to the bgwriter * Forward a file-fsync request from a backend to the checkpointer
* *
* Whenever a backend is compelled to write directly to a relation * Whenever a backend is compelled to write directly to a relation
* (which should be seldom, if the bgwriter is getting its job done), * (which should be seldom, if the checkpointer is getting its job done),
* the backend calls this routine to pass over knowledge that the relation * the backend calls this routine to pass over knowledge that the relation
* is dirty and must be fsync'd before next checkpoint. We also use this * is dirty and must be fsync'd before next checkpoint. We also use this
* opportunity to count such writes for statistical purposes. * opportunity to count such writes for statistical purposes.
...@@ -1041,7 +1041,7 @@ RequestCheckpoint(int flags) ...@@ -1041,7 +1041,7 @@ RequestCheckpoint(int flags)
* see for details.) * see for details.)
* *
* To avoid holding the lock for longer than necessary, we normally write * To avoid holding the lock for longer than necessary, we normally write
* to the requests[] queue without checking for duplicates. The bgwriter * to the requests[] queue without checking for duplicates. The checkpointer
* will have to eliminate dups internally anyway. However, if we discover * will have to eliminate dups internally anyway. However, if we discover
* that the queue is full, we make a pass over the entire queue to compact * that the queue is full, we make a pass over the entire queue to compact
* it. This is somewhat expensive, but the alternative is for the backend * it. This is somewhat expensive, but the alternative is for the backend
...@@ -1060,7 +1060,7 @@ ForwardFsyncRequest(RelFileNodeBackend rnode, ForkNumber forknum, ...@@ -1060,7 +1060,7 @@ ForwardFsyncRequest(RelFileNodeBackend rnode, ForkNumber forknum,
return false; /* probably shouldn't even get here */ return false; /* probably shouldn't even get here */
if (am_checkpointer) if (am_checkpointer)
elog(ERROR, "ForwardFsyncRequest must not be called in bgwriter"); elog(ERROR, "ForwardFsyncRequest must not be called in checkpointer");
LWLockAcquire(BgWriterCommLock, LW_EXCLUSIVE); LWLockAcquire(BgWriterCommLock, LW_EXCLUSIVE);
...@@ -1132,7 +1132,7 @@ CompactCheckpointerRequestQueue() ...@@ -1132,7 +1132,7 @@ CompactCheckpointerRequestQueue()
ctl.keysize = sizeof(BgWriterRequest); ctl.keysize = sizeof(BgWriterRequest);
ctl.entrysize = sizeof(struct BgWriterSlotMapping); ctl.entrysize = sizeof(struct BgWriterSlotMapping);
ctl.hash = tag_hash; ctl.hash = tag_hash;
htab = hash_create("CompactBgwriterRequestQueue", htab = hash_create("CompactCheckpointerRequestQueue",
BgWriterShmem->num_requests, BgWriterShmem->num_requests,
&ctl, &ctl,
HASH_ELEM | HASH_FUNCTION); HASH_ELEM | HASH_FUNCTION);
......
...@@ -581,7 +581,7 @@ SyncRepWakeQueue(bool all, int mode) ...@@ -581,7 +581,7 @@ SyncRepWakeQueue(bool all, int mode)
} }
/* /*
* The background writer calls this as needed to update the shared * The checkpointer calls this as needed to update the shared
* sync_standbys_defined flag, so that backends don't remain permanently wedged * sync_standbys_defined flag, so that backends don't remain permanently wedged
* if synchronous_standby_names is unset. It's safe to check the current value * if synchronous_standby_names is unset. It's safe to check the current value
* without the lock, because it's only ever updated by one process. But we * without the lock, because it's only ever updated by one process. But we
......
...@@ -82,7 +82,7 @@ typedef struct ...@@ -82,7 +82,7 @@ typedef struct
/* /*
* Are any sync standbys defined? Waiting backends can't reload the * Are any sync standbys defined? Waiting backends can't reload the
* config file safely, so WAL writer updates this value as needed. * config file safely, so checkpointer updates this value as needed.
* Protected by SyncRepLock. * Protected by SyncRepLock.
*/ */
bool sync_standbys_defined; bool sync_standbys_defined;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment