Commit e0763364 authored by Andres Freund's avatar Andres Freund

code: replace 'master' with 'leader' where appropriate.

Leader already is the more widely used terminology, but a few places
didn't get the message.

Author: Andres Freund
Reviewed-By: David Steele
Discussion: https://postgr.es/m/20200615182235.x7lch5n6kcjq4aue@alap3.anarazel.de
parent 5e7bbb52
......@@ -11,7 +11,7 @@
* pages from a relation that is in the process of being dropped.
*
* While prewarming, autoprewarm will use two workers. There's a
* master worker that reads and sorts the list of blocks to be
* leader worker that reads and sorts the list of blocks to be
* prewarmed and then launches a per-database worker for each
* relevant database in turn. The former keeps running after the
* initial prewarm is complete to update the dump file periodically.
......@@ -88,7 +88,7 @@ PG_FUNCTION_INFO_V1(autoprewarm_dump_now);
static void apw_load_buffers(void);
static int apw_dump_now(bool is_bgworker, bool dump_unlogged);
static void apw_start_master_worker(void);
static void apw_start_leader_worker(void);
static void apw_start_database_worker(void);
static bool apw_init_shmem(void);
static void apw_detach_shmem(int code, Datum arg);
......@@ -146,11 +146,11 @@ _PG_init(void)
/* Register autoprewarm worker, if enabled. */
if (autoprewarm)
apw_start_master_worker();
apw_start_leader_worker();
}
/*
* Main entry point for the master autoprewarm process. Per-database workers
* Main entry point for the leader autoprewarm process. Per-database workers
* have a separate entry point.
*/
void
......@@ -716,7 +716,7 @@ autoprewarm_start_worker(PG_FUNCTION_ARGS)
errmsg("autoprewarm worker is already running under PID %lu",
(unsigned long) pid)));
apw_start_master_worker();
apw_start_leader_worker();
PG_RETURN_VOID();
}
......@@ -786,10 +786,10 @@ apw_detach_shmem(int code, Datum arg)
}
/*
* Start autoprewarm master worker process.
* Start autoprewarm leader worker process.
*/
static void
apw_start_master_worker(void)
apw_start_leader_worker(void)
{
BackgroundWorker worker;
BackgroundWorkerHandle *handle;
......@@ -801,8 +801,8 @@ apw_start_master_worker(void)
worker.bgw_start_time = BgWorkerStart_ConsistentState;
strcpy(worker.bgw_library_name, "pg_prewarm");
strcpy(worker.bgw_function_name, "autoprewarm_main");
strcpy(worker.bgw_name, "autoprewarm master");
strcpy(worker.bgw_type, "autoprewarm master");
strcpy(worker.bgw_name, "autoprewarm leader");
strcpy(worker.bgw_type, "autoprewarm leader");
if (process_shared_preload_libraries_in_progress)
{
......
......@@ -332,12 +332,12 @@ PostgreSQL documentation
</para>
<para>
Requesting exclusive locks on database objects while running a parallel dump could
cause the dump to fail. The reason is that the <application>pg_dump</application> master process
cause the dump to fail. The reason is that the <application>pg_dump</application> leader process
requests shared locks on the objects that the worker processes are going to dump later
in order to
make sure that nobody deletes them and makes them go away while the dump is running.
If another client then requests an exclusive lock on a table, that lock will not be
granted but will be queued waiting for the shared lock of the master process to be
granted but will be queued waiting for the shared lock of the leader process to be
released. Consequently any other access to the table will not be granted either and
will queue after the exclusive lock request. This includes the worker process trying
to dump the table. Without any precautions this would be a classic deadlock situation.
......@@ -354,14 +354,14 @@ PostgreSQL documentation
for standbys. With this feature, database clients can ensure they see
the same data set even though they use different connections.
<command>pg_dump -j</command> uses multiple database connections; it
connects to the database once with the master process and once again
connects to the database once with the leader process and once again
for each worker job. Without the synchronized snapshot feature, the
different worker jobs wouldn't be guaranteed to see the same data in
each connection, which could lead to an inconsistent backup.
</para>
<para>
If you want to run a parallel dump of a pre-9.2 server, you need to make sure that the
database content doesn't change from between the time the master connects to the
database content doesn't change from between the time the leader connects to the
database until the last worker job has connected to the database. The easiest way to
do this is to halt any data modifying processes (DDL and DML) accessing the database
before starting the backup. You also need to specify the
......
......@@ -89,9 +89,9 @@ typedef struct FixedParallelState
Oid temp_toast_namespace_id;
int sec_context;
bool is_superuser;
PGPROC *parallel_master_pgproc;
pid_t parallel_master_pid;
BackendId parallel_master_backend_id;
PGPROC *parallel_leader_pgproc;
pid_t parallel_leader_pid;
BackendId parallel_leader_backend_id;
TimestampTz xact_ts;
TimestampTz stmt_ts;
SerializableXactHandle serializable_xact_handle;
......@@ -124,7 +124,7 @@ static FixedParallelState *MyFixedParallelState;
static dlist_head pcxt_list = DLIST_STATIC_INIT(pcxt_list);
/* Backend-local copy of data from FixedParallelState. */
static pid_t ParallelMasterPid;
static pid_t ParallelLeaderPid;
/*
* List of internal parallel worker entry points. We need this for
......@@ -323,9 +323,9 @@ InitializeParallelDSM(ParallelContext *pcxt)
GetUserIdAndSecContext(&fps->current_user_id, &fps->sec_context);
GetTempNamespaceState(&fps->temp_namespace_id,
&fps->temp_toast_namespace_id);
fps->parallel_master_pgproc = MyProc;
fps->parallel_master_pid = MyProcPid;
fps->parallel_master_backend_id = MyBackendId;
fps->parallel_leader_pgproc = MyProc;
fps->parallel_leader_pid = MyProcPid;
fps->parallel_leader_backend_id = MyBackendId;
fps->xact_ts = GetCurrentTransactionStartTimestamp();
fps->stmt_ts = GetCurrentStatementStartTimestamp();
fps->serializable_xact_handle = ShareSerializableXact();
......@@ -857,8 +857,8 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt)
*
* This function ensures that workers have been completely shutdown. The
* difference between WaitForParallelWorkersToFinish and this function is
* that former just ensures that last message sent by worker backend is
* received by master backend whereas this ensures the complete shutdown.
* that the former just ensures that last message sent by a worker backend is
* received by the leader backend whereas this ensures the complete shutdown.
*/
static void
WaitForParallelWorkersToExit(ParallelContext *pcxt)
......@@ -1302,8 +1302,8 @@ ParallelWorkerMain(Datum main_arg)
MyFixedParallelState = fps;
/* Arrange to signal the leader if we exit. */
ParallelMasterPid = fps->parallel_master_pid;
ParallelMasterBackendId = fps->parallel_master_backend_id;
ParallelLeaderPid = fps->parallel_leader_pid;
ParallelLeaderBackendId = fps->parallel_leader_backend_id;
on_shmem_exit(ParallelWorkerShutdown, (Datum) 0);
/*
......@@ -1318,8 +1318,8 @@ ParallelWorkerMain(Datum main_arg)
shm_mq_set_sender(mq, MyProc);
mqh = shm_mq_attach(mq, seg, NULL);
pq_redirect_to_shm_mq(seg, mqh);
pq_set_parallel_master(fps->parallel_master_pid,
fps->parallel_master_backend_id);
pq_set_parallel_leader(fps->parallel_leader_pid,
fps->parallel_leader_backend_id);
/*
* Send a BackendKeyData message to the process that initiated parallelism
......@@ -1347,8 +1347,8 @@ ParallelWorkerMain(Datum main_arg)
* deadlock. (If we can't join the lock group, the leader has gone away,
* so just exit quietly.)
*/
if (!BecomeLockGroupMember(fps->parallel_master_pgproc,
fps->parallel_master_pid))
if (!BecomeLockGroupMember(fps->parallel_leader_pgproc,
fps->parallel_leader_pid))
return;
/*
......@@ -1410,7 +1410,7 @@ ParallelWorkerMain(Datum main_arg)
/* Restore transaction snapshot. */
tsnapspace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT, false);
RestoreTransactionSnapshot(RestoreSnapshot(tsnapspace),
fps->parallel_master_pgproc);
fps->parallel_leader_pgproc);
/* Restore active snapshot. */
asnapspace = shm_toc_lookup(toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, false);
......@@ -1510,9 +1510,9 @@ ParallelWorkerReportLastRecEnd(XLogRecPtr last_xlog_end)
static void
ParallelWorkerShutdown(int code, Datum arg)
{
SendProcSignal(ParallelMasterPid,
SendProcSignal(ParallelLeaderPid,
PROCSIG_PARALLEL_MESSAGE,
ParallelMasterBackendId);
ParallelLeaderBackendId);
}
/*
......
......@@ -750,7 +750,7 @@ GetCurrentCommandId(bool used)
{
/*
* Forbid setting currentCommandIdUsed in a parallel worker, because
* we have no provision for communicating this back to the master. We
* we have no provision for communicating this back to the leader. We
* could relax this restriction when currentCommandIdUsed was already
* true at the start of the parallel operation.
*/
......@@ -987,7 +987,7 @@ ExitParallelMode(void)
/*
* IsInParallelMode
*
* Are we in a parallel operation, as either the master or a worker? Check
* Are we in a parallel operation, as either the leader or a worker? Check
* this to prohibit operations that change backend-local state expected to
* match across all workers. Mere caches usually don't require such a
* restriction. State modified in a strict push/pop fashion, such as the
......@@ -2164,13 +2164,13 @@ CommitTransaction(void)
else
{
/*
* We must not mark our XID committed; the parallel master is
* We must not mark our XID committed; the parallel leader is
* responsible for that.
*/
latestXid = InvalidTransactionId;
/*
* Make sure the master will know about any WAL we wrote before it
* Make sure the leader will know about any WAL we wrote before it
* commits.
*/
ParallelWorkerReportLastRecEnd(XactLastRecEnd);
......@@ -2699,7 +2699,7 @@ AbortTransaction(void)
latestXid = InvalidTransactionId;
/*
* Since the parallel master won't get our value of XactLastRecEnd in
* Since the parallel leader won't get our value of XactLastRecEnd in
* this case, we nudge WAL-writer ourselves in this case. See related
* comments in RecordTransactionAbort for why this matters.
*/
......@@ -4488,7 +4488,7 @@ RollbackAndReleaseCurrentSubTransaction(void)
/*
* Unlike ReleaseCurrentSubTransaction(), this is nominally permitted
* during parallel operations. That's because we may be in the master,
* during parallel operations. That's because we may be in the leader,
* recovering from an error thrown while we were in parallel mode. We
* won't reach here in a worker, because BeginInternalSubTransaction()
* will have failed.
......
......@@ -190,7 +190,7 @@ BuildTupleHashTableExt(PlanState *parent,
hashtable->cur_eq_func = NULL;
/*
* If parallelism is in use, even if the master backend is performing the
* If parallelism is in use, even if the leader backend is performing the
* scan itself, we don't want to create the hashtable exactly the same way
* in all workers. As hashtables are iterated over in keyspace-order,
* doing so in all processes in the same way is likely to lead to
......
......@@ -23,8 +23,8 @@
static shm_mq_handle *pq_mq_handle;
static bool pq_mq_busy = false;
static pid_t pq_mq_parallel_master_pid = 0;
static pid_t pq_mq_parallel_master_backend_id = InvalidBackendId;
static pid_t pq_mq_parallel_leader_pid = 0;
static pid_t pq_mq_parallel_leader_backend_id = InvalidBackendId;
static void pq_cleanup_redirect_to_shm_mq(dsm_segment *seg, Datum arg);
static void mq_comm_reset(void);
......@@ -73,15 +73,15 @@ pq_cleanup_redirect_to_shm_mq(dsm_segment *seg, Datum arg)
}
/*
* Arrange to SendProcSignal() to the parallel master each time we transmit
* Arrange to SendProcSignal() to the parallel leader each time we transmit
* message data via the shm_mq.
*/
void
pq_set_parallel_master(pid_t pid, BackendId backend_id)
pq_set_parallel_leader(pid_t pid, BackendId backend_id)
{
Assert(PqCommMethods == &PqCommMqMethods);
pq_mq_parallel_master_pid = pid;
pq_mq_parallel_master_backend_id = backend_id;
pq_mq_parallel_leader_pid = pid;
pq_mq_parallel_leader_backend_id = backend_id;
}
static void
......@@ -160,10 +160,10 @@ mq_putmessage(char msgtype, const char *s, size_t len)
{
result = shm_mq_sendv(pq_mq_handle, iov, 2, true);
if (pq_mq_parallel_master_pid != 0)
SendProcSignal(pq_mq_parallel_master_pid,
if (pq_mq_parallel_leader_pid != 0)
SendProcSignal(pq_mq_parallel_leader_pid,
PROCSIG_PARALLEL_MESSAGE,
pq_mq_parallel_master_backend_id);
pq_mq_parallel_leader_backend_id);
if (result != SHM_MQ_WOULD_BLOCK)
break;
......
......@@ -11,7 +11,7 @@
* cpu_tuple_cost Cost of typical CPU time to process a tuple
* cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
* cpu_operator_cost Cost of CPU time to execute an operator or function
* parallel_tuple_cost Cost of CPU time to pass a tuple from worker to master backend
* parallel_tuple_cost Cost of CPU time to pass a tuple from worker to leader backend
* parallel_setup_cost Cost of setting up shared memory for parallelism
*
* We expect that the kernel will typically do some amount of read-ahead
......
......@@ -1028,8 +1028,8 @@ max_parallel_hazard_walker(Node *node, max_parallel_hazard_context *context)
* We can't pass Params to workers at the moment either, so they are also
* parallel-restricted, unless they are PARAM_EXTERN Params or are
* PARAM_EXEC Params listed in safe_param_ids, meaning they could be
* either generated within the worker or can be computed in master and
* then their value can be passed to the worker.
* either generated within workers or can be computed by the leader and
* then their value can be passed to workers.
*/
else if (IsA(node, Param))
{
......
......@@ -80,7 +80,7 @@ char postgres_exec_path[MAXPGPATH]; /* full path to backend */
BackendId MyBackendId = InvalidBackendId;
BackendId ParallelMasterBackendId = InvalidBackendId;
BackendId ParallelLeaderBackendId = InvalidBackendId;
Oid MyDatabaseId = InvalidOid;
......
......@@ -3448,7 +3448,7 @@ static struct config_real ConfigureNamesReal[] =
{
{"parallel_tuple_cost", PGC_USERSET, QUERY_TUNING_COST,
gettext_noop("Sets the planner's estimate of the cost of "
"passing each tuple (row) from worker to master backend."),
"passing each tuple (row) from worker to leader backend."),
NULL,
GUC_EXPLAIN
},
......
This diff is collapsed.
......@@ -18,7 +18,7 @@
#include "pg_backup_archiver.h"
/* Function to call in master process on completion of a worker task */
/* Function to call in leader process on completion of a worker task */
typedef void (*ParallelCompletionPtr) (ArchiveHandle *AH,
TocEntry *te,
int status,
......
......@@ -662,7 +662,7 @@ RestoreArchive(Archive *AHX)
restore_toc_entries_parallel(AH, pstate, &pending_list);
ParallelBackupEnd(AH, pstate);
/* reconnect the master and see if we missed something */
/* reconnect the leader and see if we missed something */
restore_toc_entries_postfork(AH, &pending_list);
Assert(AH->connection != NULL);
}
......@@ -2393,7 +2393,7 @@ WriteDataChunks(ArchiveHandle *AH, ParallelState *pstate)
if (pstate && pstate->numWorkers > 1)
{
/*
* In parallel mode, this code runs in the master process. We
* In parallel mode, this code runs in the leader process. We
* construct an array of candidate TEs, then sort it into decreasing
* size order, then dispatch each TE to a data-transfer worker. By
* dumping larger tables first, we avoid getting into a situation
......@@ -2447,7 +2447,7 @@ WriteDataChunks(ArchiveHandle *AH, ParallelState *pstate)
/*
* Callback function that's invoked in the master process after a step has
* Callback function that's invoked in the leader process after a step has
* been parallel dumped.
*
* We don't need to do anything except check for worker failure.
......@@ -4437,7 +4437,7 @@ pop_next_work_item(ArchiveHandle *AH, ParallelReadyList *ready_list,
* this is run in the worker, i.e. in a thread (Windows) or a separate process
* (everything else). A worker process executes several such work items during
* a parallel backup or restore. Once we terminate here and report back that
* our work is finished, the master process will assign us a new work item.
* our work is finished, the leader process will assign us a new work item.
*/
int
parallel_restore(ArchiveHandle *AH, TocEntry *te)
......@@ -4457,7 +4457,7 @@ parallel_restore(ArchiveHandle *AH, TocEntry *te)
/*
* Callback function that's invoked in the master process after a step has
* Callback function that's invoked in the leader process after a step has
* been parallel restored.
*
* Update status and reduce the dependency count of any dependent items.
......
......@@ -807,7 +807,7 @@ _Clone(ArchiveHandle *AH)
*/
/*
* We also don't copy the ParallelState pointer (pstate), only the master
* We also don't copy the ParallelState pointer (pstate), only the leader
* process ever writes to it.
*/
}
......
......@@ -1238,7 +1238,7 @@ static void
setupDumpWorker(Archive *AH)
{
/*
* We want to re-select all the same values the master connection is
* We want to re-select all the same values the leader connection is
* using. We'll have inherited directly-usable values in
* AH->sync_snapshot_id and AH->use_role, but we need to translate the
* inherited encoding value back to a string to pass to setup_connection.
......
......@@ -157,10 +157,10 @@ typedef FormData_pg_proc *Form_pg_proc;
/*
* Symbolic values for proparallel column: these indicate whether a function
* can be safely be run in a parallel backend, during parallelism but
* necessarily in the master, or only in non-parallel mode.
* necessarily in the leader, or only in non-parallel mode.
*/
#define PROPARALLEL_SAFE 's' /* can run in worker or master */
#define PROPARALLEL_RESTRICTED 'r' /* can run in parallel master only */
#define PROPARALLEL_SAFE 's' /* can run in worker or leader */
#define PROPARALLEL_RESTRICTED 'r' /* can run in parallel leader only */
#define PROPARALLEL_UNSAFE 'u' /* banned while in parallel mode */
/*
......
......@@ -17,7 +17,7 @@
#include "storage/shm_mq.h"
extern void pq_redirect_to_shm_mq(dsm_segment *seg, shm_mq_handle *mqh);
extern void pq_set_parallel_master(pid_t pid, BackendId backend_id);
extern void pq_set_parallel_leader(pid_t pid, BackendId backend_id);
extern void pq_parse_errornotice(StringInfo str, ErrorData *edata);
......
......@@ -25,13 +25,13 @@ typedef int BackendId; /* unique currently active backend identifier */
extern PGDLLIMPORT BackendId MyBackendId; /* backend id of this backend */
/* backend id of our parallel session leader, or InvalidBackendId if none */
extern PGDLLIMPORT BackendId ParallelMasterBackendId;
extern PGDLLIMPORT BackendId ParallelLeaderBackendId;
/*
* The BackendId to use for our session's temp relations is normally our own,
* but parallel workers should use their leader's ID.
*/
#define BackendIdForTempRelations() \
(ParallelMasterBackendId == InvalidBackendId ? MyBackendId : ParallelMasterBackendId)
(ParallelLeaderBackendId == InvalidBackendId ? MyBackendId : ParallelLeaderBackendId)
#endif /* BACKENDID_H */
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment