Commit a89c46f9 authored by Bruce Momjian's avatar Bruce Momjian

Allow parallel copy/link in pg_upgrade

This patch implements parallel copying/linking of files by tablespace
using the --jobs option in pg_upgrade.
parent c00dc337
...@@ -606,7 +606,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name) ...@@ -606,7 +606,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
fprintf(script, RMDIR_CMD " %s\n", fix_path_separator(old_cluster.pgdata)); fprintf(script, RMDIR_CMD " %s\n", fix_path_separator(old_cluster.pgdata));
/* delete old cluster's alternate tablespaces */ /* delete old cluster's alternate tablespaces */
for (tblnum = 0; tblnum < os_info.num_tablespaces; tblnum++) for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
{ {
/* /*
* Do the old cluster's per-database directories share a directory * Do the old cluster's per-database directories share a directory
...@@ -621,14 +621,14 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name) ...@@ -621,14 +621,14 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
/* remove PG_VERSION? */ /* remove PG_VERSION? */
if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804) if (GET_MAJOR_VERSION(old_cluster.major_version) <= 804)
fprintf(script, RM_CMD " %s%s%cPG_VERSION\n", fprintf(script, RM_CMD " %s%s%cPG_VERSION\n",
fix_path_separator(os_info.tablespaces[tblnum]), fix_path_separator(os_info.old_tablespaces[tblnum]),
fix_path_separator(old_cluster.tablespace_suffix), fix_path_separator(old_cluster.tablespace_suffix),
PATH_SEPARATOR); PATH_SEPARATOR);
for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++)
{ {
fprintf(script, RMDIR_CMD " %s%s%c%d\n", fprintf(script, RMDIR_CMD " %s%s%c%d\n",
fix_path_separator(os_info.tablespaces[tblnum]), fix_path_separator(os_info.old_tablespaces[tblnum]),
fix_path_separator(old_cluster.tablespace_suffix), fix_path_separator(old_cluster.tablespace_suffix),
PATH_SEPARATOR, old_cluster.dbarr.dbs[dbnum].db_oid); PATH_SEPARATOR, old_cluster.dbarr.dbs[dbnum].db_oid);
} }
...@@ -640,7 +640,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name) ...@@ -640,7 +640,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name)
* or a version-specific subdirectory. * or a version-specific subdirectory.
*/ */
fprintf(script, RMDIR_CMD " %s%s\n", fprintf(script, RMDIR_CMD " %s%s\n",
fix_path_separator(os_info.tablespaces[tblnum]), fix_path_separator(os_info.old_tablespaces[tblnum]),
fix_path_separator(old_cluster.tablespace_suffix)); fix_path_separator(old_cluster.tablespace_suffix));
} }
......
...@@ -106,20 +106,25 @@ create_rel_filename_map(const char *old_data, const char *new_data, ...@@ -106,20 +106,25 @@ create_rel_filename_map(const char *old_data, const char *new_data,
* relation belongs to the default tablespace, hence relfiles should * relation belongs to the default tablespace, hence relfiles should
* exist in the data directories. * exist in the data directories.
*/ */
snprintf(map->old_dir, sizeof(map->old_dir), "%s/base/%u", old_data, strlcpy(map->old_tablespace, old_data, sizeof(map->old_tablespace));
old_db->db_oid); strlcpy(map->new_tablespace, new_data, sizeof(map->new_tablespace));
snprintf(map->new_dir, sizeof(map->new_dir), "%s/base/%u", new_data, strlcpy(map->old_tablespace_suffix, "/base", sizeof(map->old_tablespace_suffix));
new_db->db_oid); strlcpy(map->new_tablespace_suffix, "/base", sizeof(map->new_tablespace_suffix));
} }
else else
{ {
/* relation belongs to a tablespace, so use the tablespace location */ /* relation belongs to a tablespace, so use the tablespace location */
snprintf(map->old_dir, sizeof(map->old_dir), "%s%s/%u", old_rel->tablespace, strlcpy(map->old_tablespace, old_rel->tablespace, sizeof(map->old_tablespace));
old_cluster.tablespace_suffix, old_db->db_oid); strlcpy(map->new_tablespace, new_rel->tablespace, sizeof(map->new_tablespace));
snprintf(map->new_dir, sizeof(map->new_dir), "%s%s/%u", new_rel->tablespace, strlcpy(map->old_tablespace_suffix, old_cluster.tablespace_suffix,
new_cluster.tablespace_suffix, new_db->db_oid); sizeof(map->old_tablespace_suffix));
strlcpy(map->new_tablespace_suffix, new_cluster.tablespace_suffix,
sizeof(map->new_tablespace_suffix));
} }
map->old_db_oid = old_db->db_oid;
map->new_db_oid = new_db->db_oid;
/* /*
* old_relfilenode might differ from pg_class.oid (and hence * old_relfilenode might differ from pg_class.oid (and hence
* new_relfilenode) because of CLUSTER, REINDEX, or VACUUM FULL. * new_relfilenode) because of CLUSTER, REINDEX, or VACUUM FULL.
......
...@@ -34,11 +34,24 @@ typedef struct { ...@@ -34,11 +34,24 @@ typedef struct {
char log_file[MAXPGPATH]; char log_file[MAXPGPATH];
char opt_log_file[MAXPGPATH]; char opt_log_file[MAXPGPATH];
char cmd[MAX_STRING]; char cmd[MAX_STRING];
} thread_arg; } exec_thread_arg;
thread_arg **thread_args; typedef struct {
DbInfoArr *old_db_arr;
DbInfoArr *new_db_arr;
char old_pgdata[MAXPGPATH];
char new_pgdata[MAXPGPATH];
char old_tablespace[MAXPGPATH];
} transfer_thread_arg;
exec_thread_arg **exec_thread_args;
transfer_thread_arg **transfer_thread_args;
/* track current thread_args struct so reap_child() can be used for all cases */
void **cur_thread_args;
DWORD win32_exec_prog(thread_arg *args); DWORD win32_exec_prog(exec_thread_arg *args);
DWORD win32_transfer_all_new_dbs(transfer_thread_arg *args);
#endif #endif
...@@ -58,7 +71,7 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file, ...@@ -58,7 +71,7 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file,
pid_t child; pid_t child;
#else #else
HANDLE child; HANDLE child;
thread_arg *new_arg; exec_thread_arg *new_arg;
#endif #endif
va_start(args, fmt); va_start(args, fmt);
...@@ -71,7 +84,9 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file, ...@@ -71,7 +84,9 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file,
else else
{ {
/* parallel */ /* parallel */
#ifdef WIN32
cur_thread_args = (void **)exec_thread_args;
#endif
/* harvest any dead children */ /* harvest any dead children */
while (reap_child(false) == true) while (reap_child(false) == true)
; ;
...@@ -100,7 +115,7 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file, ...@@ -100,7 +115,7 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file,
int i; int i;
thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE)); thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE));
thread_args = pg_malloc(user_opts.jobs * sizeof(thread_arg *)); exec_thread_args = pg_malloc(user_opts.jobs * sizeof(exec_thread_arg *));
/* /*
* For safety and performance, we keep the args allocated during * For safety and performance, we keep the args allocated during
...@@ -108,11 +123,11 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file, ...@@ -108,11 +123,11 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file,
* in a thread different from the one that allocated it. * in a thread different from the one that allocated it.
*/ */
for (i = 0; i < user_opts.jobs; i++) for (i = 0; i < user_opts.jobs; i++)
thread_args[i] = pg_malloc(sizeof(thread_arg)); exec_thread_args[i] = pg_malloc(sizeof(exec_thread_arg));
} }
/* use first empty array element */ /* use first empty array element */
new_arg = thread_args[parallel_jobs-1]; new_arg = exec_thread_args[parallel_jobs-1];
/* Can only pass one pointer into the function, so use a struct */ /* Can only pass one pointer into the function, so use a struct */
strcpy(new_arg->log_file, log_file); strcpy(new_arg->log_file, log_file);
...@@ -134,7 +149,7 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file, ...@@ -134,7 +149,7 @@ parallel_exec_prog(const char *log_file, const char *opt_log_file,
#ifdef WIN32 #ifdef WIN32
DWORD DWORD
win32_exec_prog(thread_arg *args) win32_exec_prog(exec_thread_arg *args)
{ {
int ret; int ret;
...@@ -146,6 +161,112 @@ win32_exec_prog(thread_arg *args) ...@@ -146,6 +161,112 @@ win32_exec_prog(thread_arg *args)
#endif #endif
/*
* parallel_transfer_all_new_dbs
*
* This has the same API as transfer_all_new_dbs, except it does parallel execution
* by transfering multiple tablespaces in parallel
*/
void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
char *old_pgdata, char *new_pgdata,
char *old_tablespace)
{
#ifndef WIN32
pid_t child;
#else
HANDLE child;
transfer_thread_arg *new_arg;
#endif
if (user_opts.jobs <= 1)
/* throw_error must be true to allow jobs */
transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata, NULL);
else
{
/* parallel */
#ifdef WIN32
cur_thread_args = (void **)transfer_thread_args;
#endif
/* harvest any dead children */
while (reap_child(false) == true)
;
/* must we wait for a dead child? */
if (parallel_jobs >= user_opts.jobs)
reap_child(true);
/* set this before we start the job */
parallel_jobs++;
/* Ensure stdio state is quiesced before forking */
fflush(NULL);
#ifndef WIN32
child = fork();
if (child == 0)
{
transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata, new_pgdata,
old_tablespace);
/* if we take another exit path, it will be non-zero */
/* use _exit to skip atexit() functions */
_exit(0);
}
else if (child < 0)
/* fork failed */
pg_log(PG_FATAL, "could not create worker process: %s\n", strerror(errno));
#else
if (thread_handles == NULL)
{
int i;
thread_handles = pg_malloc(user_opts.jobs * sizeof(HANDLE));
transfer_thread_args = pg_malloc(user_opts.jobs * sizeof(transfer_thread_arg *));
/*
* For safety and performance, we keep the args allocated during
* the entire life of the process, and we don't free the args
* in a thread different from the one that allocated it.
*/
for (i = 0; i < user_opts.jobs; i++)
transfer_thread_args[i] = pg_malloc(sizeof(transfer_thread_arg));
}
/* use first empty array element */
new_arg = transfer_thread_args[parallel_jobs-1];
/* Can only pass one pointer into the function, so use a struct */
new_arg->old_db_arr = old_db_arr;
new_arg->new_db_arr = new_db_arr;
strcpy(new_arg->old_pgdata, old_pgdata);
strcpy(new_arg->new_pgdata, new_pgdata);
strcpy(new_arg->old_tablespace, old_tablespace);
child = (HANDLE) _beginthreadex(NULL, 0, (void *) win32_exec_prog,
new_arg, 0, NULL);
if (child == 0)
pg_log(PG_FATAL, "could not create worker thread: %s\n", strerror(errno));
thread_handles[parallel_jobs-1] = child;
#endif
}
return;
}
#ifdef WIN32
DWORD
win32_transfer_all_new_dbs(transfer_thread_arg *args)
{
transfer_all_new_dbs(args->old_db_arr, args->new_db_arr, args->old_pgdata,
args->new_pgdata, args->old_tablespace);
/* terminates thread */
return 0;
}
#endif
/* /*
* collect status from a completed worker child * collect status from a completed worker child
*/ */
...@@ -195,7 +316,7 @@ reap_child(bool wait_for_child) ...@@ -195,7 +316,7 @@ reap_child(bool wait_for_child)
/* Move last slot into dead child's position */ /* Move last slot into dead child's position */
if (thread_num != parallel_jobs - 1) if (thread_num != parallel_jobs - 1)
{ {
thread_arg *tmp_args; void *tmp_args;
thread_handles[thread_num] = thread_handles[parallel_jobs - 1]; thread_handles[thread_num] = thread_handles[parallel_jobs - 1];
...@@ -205,9 +326,9 @@ reap_child(bool wait_for_child) ...@@ -205,9 +326,9 @@ reap_child(bool wait_for_child)
* reused by the next created thread. Instead, the new thread * reused by the next created thread. Instead, the new thread
* will use the arg struct of the thread that just died. * will use the arg struct of the thread that just died.
*/ */
tmp_args = thread_args[thread_num]; tmp_args = cur_thread_args[thread_num];
thread_args[thread_num] = thread_args[parallel_jobs - 1]; cur_thread_args[thread_num] = cur_thread_args[parallel_jobs - 1];
thread_args[parallel_jobs - 1] = tmp_args; cur_thread_args[parallel_jobs - 1] = tmp_args;
} }
#endif #endif
......
...@@ -133,7 +133,7 @@ main(int argc, char **argv) ...@@ -133,7 +133,7 @@ main(int argc, char **argv)
if (user_opts.transfer_mode == TRANSFER_MODE_LINK) if (user_opts.transfer_mode == TRANSFER_MODE_LINK)
disable_old_cluster(); disable_old_cluster();
transfer_all_new_dbs(&old_cluster.dbarr, &new_cluster.dbarr, transfer_all_new_tablespaces(&old_cluster.dbarr, &new_cluster.dbarr,
old_cluster.pgdata, new_cluster.pgdata); old_cluster.pgdata, new_cluster.pgdata);
/* /*
......
...@@ -134,8 +134,12 @@ typedef struct ...@@ -134,8 +134,12 @@ typedef struct
*/ */
typedef struct typedef struct
{ {
char old_dir[MAXPGPATH]; char old_tablespace[MAXPGPATH];
char new_dir[MAXPGPATH]; char new_tablespace[MAXPGPATH];
char old_tablespace_suffix[MAXPGPATH];
char new_tablespace_suffix[MAXPGPATH];
Oid old_db_oid;
Oid new_db_oid;
/* /*
* old/new relfilenodes might differ for pg_largeobject(_metadata) indexes * old/new relfilenodes might differ for pg_largeobject(_metadata) indexes
...@@ -276,8 +280,8 @@ typedef struct ...@@ -276,8 +280,8 @@ typedef struct
const char *progname; /* complete pathname for this program */ const char *progname; /* complete pathname for this program */
char *exec_path; /* full path to my executable */ char *exec_path; /* full path to my executable */
char *user; /* username for clusters */ char *user; /* username for clusters */
char **tablespaces; /* tablespaces */ char **old_tablespaces; /* tablespaces */
int num_tablespaces; int num_old_tablespaces;
char **libraries; /* loadable libraries */ char **libraries; /* loadable libraries */
int num_libraries; int num_libraries;
ClusterInfo *running_cluster; ClusterInfo *running_cluster;
...@@ -398,9 +402,11 @@ void get_sock_dir(ClusterInfo *cluster, bool live_check); ...@@ -398,9 +402,11 @@ void get_sock_dir(ClusterInfo *cluster, bool live_check);
/* relfilenode.c */ /* relfilenode.c */
void get_pg_database_relfilenode(ClusterInfo *cluster); void get_pg_database_relfilenode(ClusterInfo *cluster);
void transfer_all_new_dbs(DbInfoArr *olddb_arr, void transfer_all_new_tablespaces(DbInfoArr *old_db_arr,
DbInfoArr *newdb_arr, char *old_pgdata, char *new_pgdata); DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata);
void transfer_all_new_dbs(DbInfoArr *old_db_arr,
DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata,
char *old_tablespace);
/* tablespace.c */ /* tablespace.c */
...@@ -467,6 +473,8 @@ char *old_8_3_create_sequence_script(ClusterInfo *cluster); ...@@ -467,6 +473,8 @@ char *old_8_3_create_sequence_script(ClusterInfo *cluster);
void parallel_exec_prog(const char *log_file, const char *opt_log_file, void parallel_exec_prog(const char *log_file, const char *opt_log_file,
const char *fmt,...) const char *fmt,...)
__attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 4))); __attribute__((format(PG_PRINTF_ATTRIBUTE, 3, 4)));
void parallel_transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
char *old_pgdata, char *new_pgdata,
char *old_tablespace);
bool reap_child(bool wait_for_child); bool reap_child(bool wait_for_child);
...@@ -16,11 +16,57 @@ ...@@ -16,11 +16,57 @@
static void transfer_single_new_db(pageCnvCtx *pageConverter, static void transfer_single_new_db(pageCnvCtx *pageConverter,
FileNameMap *maps, int size); FileNameMap *maps, int size, char *old_tablespace);
static void transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map, static void transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
const char *suffix); const char *suffix);
/*
* transfer_all_new_tablespaces()
*
* Responsible for upgrading all database. invokes routines to generate mappings and then
* physically link the databases.
*/
void
transfer_all_new_tablespaces(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
char *old_pgdata, char *new_pgdata)
{
pg_log(PG_REPORT, "%s user relation files\n",
user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying");
/*
* Transfering files by tablespace is tricky because a single database
* can use multiple tablespaces. For non-parallel mode, we just pass a
* NULL tablespace path, which matches all tablespaces. In parallel mode,
* we pass the default tablespace and all user-created tablespaces
* and let those operations happen in parallel.
*/
if (user_opts.jobs <= 1)
parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
new_pgdata, NULL);
else
{
int tblnum;
/* transfer default tablespace */
parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
new_pgdata, old_pgdata);
for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
parallel_transfer_all_new_dbs(old_db_arr, new_db_arr, old_pgdata,
new_pgdata, os_info.old_tablespaces[tblnum]);
/* reap all children */
while (reap_child(true) == true)
;
}
end_progress_output();
check_ok();
return;
}
/* /*
* transfer_all_new_dbs() * transfer_all_new_dbs()
* *
...@@ -28,15 +74,12 @@ static void transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map, ...@@ -28,15 +74,12 @@ static void transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
* physically link the databases. * physically link the databases.
*/ */
void void
transfer_all_new_dbs(DbInfoArr *old_db_arr, transfer_all_new_dbs(DbInfoArr *old_db_arr, DbInfoArr *new_db_arr,
DbInfoArr *new_db_arr, char *old_pgdata, char *new_pgdata) char *old_pgdata, char *new_pgdata, char *old_tablespace)
{ {
int old_dbnum, int old_dbnum,
new_dbnum; new_dbnum;
pg_log(PG_REPORT, "%s user relation files\n",
user_opts.transfer_mode == TRANSFER_MODE_LINK ? "Linking" : "Copying");
/* Scan the old cluster databases and transfer their files */ /* Scan the old cluster databases and transfer their files */
for (old_dbnum = new_dbnum = 0; for (old_dbnum = new_dbnum = 0;
old_dbnum < old_db_arr->ndbs; old_dbnum < old_db_arr->ndbs;
...@@ -75,15 +118,13 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr, ...@@ -75,15 +118,13 @@ transfer_all_new_dbs(DbInfoArr *old_db_arr,
#ifdef PAGE_CONVERSION #ifdef PAGE_CONVERSION
pageConverter = setupPageConverter(); pageConverter = setupPageConverter();
#endif #endif
transfer_single_new_db(pageConverter, mappings, n_maps); transfer_single_new_db(pageConverter, mappings, n_maps,
old_tablespace);
pg_free(mappings); pg_free(mappings);
} }
} }
end_progress_output();
check_ok();
return; return;
} }
...@@ -125,7 +166,7 @@ get_pg_database_relfilenode(ClusterInfo *cluster) ...@@ -125,7 +166,7 @@ get_pg_database_relfilenode(ClusterInfo *cluster)
*/ */
static void static void
transfer_single_new_db(pageCnvCtx *pageConverter, transfer_single_new_db(pageCnvCtx *pageConverter,
FileNameMap *maps, int size) FileNameMap *maps, int size, char *old_tablespace)
{ {
int mapnum; int mapnum;
bool vm_crashsafe_match = true; bool vm_crashsafe_match = true;
...@@ -139,6 +180,9 @@ transfer_single_new_db(pageCnvCtx *pageConverter, ...@@ -139,6 +180,9 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
vm_crashsafe_match = false; vm_crashsafe_match = false;
for (mapnum = 0; mapnum < size; mapnum++) for (mapnum = 0; mapnum < size; mapnum++)
{
if (old_tablespace == NULL ||
strcmp(maps[mapnum].old_tablespace, old_tablespace) == 0)
{ {
/* transfer primary file */ /* transfer primary file */
transfer_relfile(pageConverter, &maps[mapnum], ""); transfer_relfile(pageConverter, &maps[mapnum], "");
...@@ -154,6 +198,7 @@ transfer_single_new_db(pageCnvCtx *pageConverter, ...@@ -154,6 +198,7 @@ transfer_single_new_db(pageCnvCtx *pageConverter,
transfer_relfile(pageConverter, &maps[mapnum], "_vm"); transfer_relfile(pageConverter, &maps[mapnum], "_vm");
} }
} }
}
} }
...@@ -187,10 +232,12 @@ transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map, ...@@ -187,10 +232,12 @@ transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
else else
snprintf(extent_suffix, sizeof(extent_suffix), ".%d", segno); snprintf(extent_suffix, sizeof(extent_suffix), ".%d", segno);
snprintf(old_file, sizeof(old_file), "%s/%u%s%s", map->old_dir, snprintf(old_file, sizeof(old_file), "%s%s/%u/%u%s%s", map->old_tablespace,
map->old_relfilenode, type_suffix, extent_suffix); map->old_tablespace_suffix, map->old_db_oid, map->old_relfilenode,
snprintf(new_file, sizeof(new_file), "%s/%u%s%s", map->new_dir, type_suffix, extent_suffix);
map->new_relfilenode, type_suffix, extent_suffix); snprintf(new_file, sizeof(new_file), "%s%s/%u/%u%s%s", map->new_tablespace,
map->new_tablespace_suffix, map->new_db_oid, map->new_relfilenode,
type_suffix, extent_suffix);
/* Is it an extent, fsm, or vm file? */ /* Is it an extent, fsm, or vm file? */
if (type_suffix[0] != '\0' || segno != 0) if (type_suffix[0] != '\0' || segno != 0)
...@@ -239,3 +286,4 @@ transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map, ...@@ -239,3 +286,4 @@ transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map,
return; return;
} }
...@@ -23,7 +23,7 @@ init_tablespaces(void) ...@@ -23,7 +23,7 @@ init_tablespaces(void)
set_tablespace_directory_suffix(&old_cluster); set_tablespace_directory_suffix(&old_cluster);
set_tablespace_directory_suffix(&new_cluster); set_tablespace_directory_suffix(&new_cluster);
if (os_info.num_tablespaces > 0 && if (os_info.num_old_tablespaces > 0 &&
strcmp(old_cluster.tablespace_suffix, new_cluster.tablespace_suffix) == 0) strcmp(old_cluster.tablespace_suffix, new_cluster.tablespace_suffix) == 0)
pg_log(PG_FATAL, pg_log(PG_FATAL,
"Cannot upgrade to/from the same system catalog version when\n" "Cannot upgrade to/from the same system catalog version when\n"
...@@ -57,16 +57,16 @@ get_tablespace_paths(void) ...@@ -57,16 +57,16 @@ get_tablespace_paths(void)
res = executeQueryOrDie(conn, "%s", query); res = executeQueryOrDie(conn, "%s", query);
if ((os_info.num_tablespaces = PQntuples(res)) != 0) if ((os_info.num_old_tablespaces = PQntuples(res)) != 0)
os_info.tablespaces = (char **) pg_malloc( os_info.old_tablespaces = (char **) pg_malloc(
os_info.num_tablespaces * sizeof(char *)); os_info.num_old_tablespaces * sizeof(char *));
else else
os_info.tablespaces = NULL; os_info.old_tablespaces = NULL;
i_spclocation = PQfnumber(res, "spclocation"); i_spclocation = PQfnumber(res, "spclocation");
for (tblnum = 0; tblnum < os_info.num_tablespaces; tblnum++) for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++)
os_info.tablespaces[tblnum] = pg_strdup( os_info.old_tablespaces[tblnum] = pg_strdup(
PQgetvalue(res, tblnum, i_spclocation)); PQgetvalue(res, tblnum, i_spclocation));
PQclear(res); PQclear(res);
......
...@@ -342,10 +342,11 @@ NET STOP pgsql-8.3 (<productname>PostgreSQL</> 8.3 and older used a different s ...@@ -342,10 +342,11 @@ NET STOP pgsql-8.3 (<productname>PostgreSQL</> 8.3 and older used a different s
<para> <para>
The <option>--jobs</> option allows multiple CPU cores to be used The <option>--jobs</> option allows multiple CPU cores to be used
to dump and reload database schemas in parallel; a good place to for copying/linking of files and to dump and reload database schemas
start is the number of CPU cores on the server. This option can in parallel; a good place to start is the maximum of the number of
dramatically reduce the time to upgrade a multi-database server CPU cores and tablespaces. This option can dramatically reduce the
running on a multiprocessor machine. time to upgrade a multi-database server running on a multiprocessor
machine.
</para> </para>
<para> <para>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment