Commit e495c168 authored by Peter Eisentraut's avatar Peter Eisentraut

Spelling fixes in code comments

Author: Euler Taveira <euler@timbira.com.br>
parent 1f8b0601
......@@ -321,7 +321,7 @@ GetAllTablesPublicationRelations(void)
/*
* Get publication using oid
*
* The Publication struct and it's data are palloced here.
* The Publication struct and its data are palloc'ed here.
*/
Publication *
GetPublication(Oid pubid)
......
......@@ -403,7 +403,7 @@ RemoveSubscriptionRel(Oid subid, Oid relid)
/*
* Get all relations for subscription.
*
* Returned list is palloced in current memory context.
* Returned list is palloc'ed in current memory context.
*/
List *
GetSubscriptionRelations(Oid subid)
......@@ -450,7 +450,7 @@ GetSubscriptionRelations(Oid subid)
/*
* Get all relations for subscription that are not in a ready state.
*
* Returned list is palloced in current memory context.
* Returned list is palloc'ed in current memory context.
*/
List *
GetSubscriptionNotReadyRelations(Oid subid)
......
......@@ -20,7 +20,7 @@
* Non-transactional messages are sent to the plugin at the time when the
* logical decoding reads them from XLOG. This also means that transactional
* messages won't be delivered if the transaction was rolled back but the
* non-transactional one will be delivered always.
* non-transactional one will always be delivered.
*
* Every message carries prefix to avoid conflicts between different decoding
* plugins. The plugin authors must take extra care to use unique prefix,
......
......@@ -24,7 +24,7 @@
* two bytes allow us to be more space efficient.
*
* Replication progress is tracked in a shared memory table
* (ReplicationStates) that's dumped to disk every checkpoint. Entries
* (ReplicationState) that's dumped to disk every checkpoint. Entries
* ('slots') in this table are identified by the internal id. That's the case
* because it allows to increase replication progress during crash
* recovery. To allow doing so we store the original LSN (from the originating
......@@ -48,7 +48,7 @@
* pg_replication_slot is required for the duration. That allows us to
* safely and conflict free assign new origins using a dirty snapshot.
*
* * When creating an in-memory replication progress slot the ReplicationOirgin
* * When creating an in-memory replication progress slot the ReplicationOrigin
* LWLock has to be held exclusively; when iterating over the replication
* progress a shared lock has to be held, the same when advancing the
* replication progress of an individual backend that has not setup as the
......@@ -162,8 +162,8 @@ static ReplicationState *replication_states;
static ReplicationStateCtl *replication_states_ctl;
/*
* Backend-local, cached element from ReplicationStates for use in a backend
* replaying remote commits, so we don't have to search ReplicationStates for
* Backend-local, cached element from ReplicationState for use in a backend
* replaying remote commits, so we don't have to search ReplicationState for
* the backends current RepOriginId.
*/
static ReplicationState *session_replication_state = NULL;
......@@ -441,7 +441,7 @@ ReplicationOriginShmemSize(void)
/*
* XXX: max_replication_slots is arguably the wrong thing to use, as here
* we keep the replay state of *remote* transactions. But for now it seems
* sufficient to reuse it, lest we introduce a separate guc.
* sufficient to reuse it, lest we introduce a separate GUC.
*/
if (max_replication_slots == 0)
return size;
......@@ -497,7 +497,7 @@ ReplicationOriginShmemInit(void)
*
* So its just the magic, followed by the statically sized
* ReplicationStateOnDisk structs. Note that the maximum number of
* ReplicationStates is determined by max_replication_slots.
* ReplicationState is determined by max_replication_slots.
* ---------------------------------------------------------------------------
*/
void
......@@ -1253,7 +1253,7 @@ pg_replication_origin_session_is_setup(PG_FUNCTION_ARGS)
* Return the replication progress for origin setup in the current session.
*
* If 'flush' is set to true it is ensured that the returned value corresponds
* to a local transaction that has been flushed. this is useful if asynchronous
* to a local transaction that has been flushed. This is useful if asynchronous
* commits are used when replaying replicated transactions.
*/
Datum
......@@ -1327,7 +1327,7 @@ pg_replication_origin_advance(PG_FUNCTION_ARGS)
* set up the initial replication state, but not for replay.
*/
replorigin_advance(node, remote_commit, InvalidXLogRecPtr,
true /* go backward */ , true /* wal log */ );
true /* go backward */ , true /* WAL log */ );
UnlockRelationOid(ReplicationOriginRelationId, RowExclusiveLock);
......@@ -1339,7 +1339,7 @@ pg_replication_origin_advance(PG_FUNCTION_ARGS)
* Return the replication progress for an individual replication origin.
*
* If 'flush' is set to true it is ensured that the returned value corresponds
* to a local transaction that has been flushed. this is useful if asynchronous
* to a local transaction that has been flushed. This is useful if asynchronous
* commits are used when replaying replicated transactions.
*/
Datum
......
......@@ -377,7 +377,7 @@ logicalrep_read_typ(StringInfo in, LogicalRepTyp *ltyp)
{
ltyp->remoteid = pq_getmsgint(in, 4);
/* Read tupe name from stream */
/* Read type name from stream */
ltyp->nspname = pstrdup(logicalrep_read_namespace(in));
ltyp->typname = pstrdup(pq_getmsgstring(in));
}
......@@ -459,7 +459,7 @@ logicalrep_read_tuple(StringInfo in, LogicalRepTupleData *tuple)
int i;
int natts;
/* Get of attributes. */
/* Get number of attributes */
natts = pq_getmsgint(in, 2);
memset(tuple->changed, 0, sizeof(tuple->changed));
......
......@@ -33,12 +33,12 @@
* When the desired state appears it will compare its position in the
* stream with the SYNCWAIT position and based on that changes the
* state to based on following rules:
* - if the apply is in front of the sync in the wal stream the new
* - if the apply is in front of the sync in the WAL stream the new
* state is set to CATCHUP and apply loops until the sync process
* catches up to the same LSN as apply
* - if the sync is in front of the apply in the wal stream the new
* - if the sync is in front of the apply in the WAL stream the new
* state is set to SYNCDONE
* - if both apply and sync are at the same position in the wal stream
* - if both apply and sync are at the same position in the WAL stream
* the state of the table is set to READY
* - If the state was set to CATCHUP sync will read the stream and
* apply changes until it catches up to the specified stream
......@@ -698,7 +698,7 @@ copy_table(Relation rel)
/*
* Start syncing the table in the sync worker.
*
* The returned slot name is palloced in current memory context.
* The returned slot name is palloc'ed in current memory context.
*/
char *
LogicalRepSyncTableStart(XLogRecPtr *origin_startpos)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment