Commit c2e9b2f2 authored by Bruce Momjian's avatar Bruce Momjian

Add pg_upgrade to /contrib; will be in 9.0 beta2.

Add documentation.

Supports migration from PG 8.3 and 8.4.
parent 28e17422
#
# Makefile for pg_upgrade
#
# targets: all, clean, install, uninstall
#
# This Makefile generates an executable and a shared object file
#
PROGRAM = pg_upgrade
OBJS = check.o controldata.o dump.o exec.o file.o function.o info.o \
option.o page.o pg_upgrade.o relfilenode.o server.o \
tablespace.o util.o version.o version_old_8_3.o $(WIN32RES)
PG_CPPFLAGS = -DFRONTEND -DDLSUFFIX=\"$(DLSUFFIX)\" -I$(srcdir) -I$(libpq_srcdir)
PG_LIBS = $(libpq_pgport)
PGFILEDESC = "pg_upgrade - In-Place Binary Upgrade Utility"
PGAPPICON = win32
MODULES = pg_upgrade_sysoids
ifdef USE_PGXS
PG_CONFIG = pg_config
PGXS := $(shell $(PG_CONFIG) --pgxs)
include $(PGXS)
else
subdir = contrib/pg_upgrade
top_builddir = ../..
include $(top_builddir)/src/Makefile.global
include $(top_srcdir)/contrib/contrib-global.mk
endif
This diff is collapsed.
This diff is collapsed.
/*
* dump.c
*
* dump functions
*/
#include "pg_upgrade.h"
void
generate_old_dump(migratorContext *ctx)
{
/* run new pg_dumpall binary */
prep_status(ctx, "Creating catalog dump");
/*
* --binary-upgrade records the width of dropped columns in pg_class, and
* restores the frozenid's for databases and relations.
*/
exec_prog(ctx, true,
SYSTEMQUOTE "\"%s/pg_dumpall\" --port %d --schema-only "
"--binary-upgrade > \"%s/" ALL_DUMP_FILE "\"" SYSTEMQUOTE,
ctx->new.bindir, ctx->old.port, ctx->output_dir);
check_ok(ctx);
}
/*
* split_old_dump
*
* This function splits pg_dumpall output into global values and
* database creation, and per-db schemas. This allows us to create
* the toast place holders between restoring these two parts of the
* dump. We split on the first "\connect " after a CREATE ROLE
* username match; this is where the per-db restore starts.
*
* We suppress recreation of our own username so we don't generate
* an error during restore
*/
void
split_old_dump(migratorContext *ctx)
{
FILE *all_dump,
*globals_dump,
*db_dump;
FILE *current_output;
char line[LINE_ALLOC];
bool start_of_line = true;
char create_role_str[MAX_STRING];
char create_role_str_quote[MAX_STRING];
char filename[MAXPGPATH];
bool suppressed_username = false;
snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, ALL_DUMP_FILE);
if ((all_dump = fopen(filename, "r")) == NULL)
pg_log(ctx, PG_FATAL, "Cannot open dump file %s\n", filename);
snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, GLOBALS_DUMP_FILE);
if ((globals_dump = fopen(filename, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Cannot write to dump file %s\n", filename);
snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, DB_DUMP_FILE);
if ((db_dump = fopen(filename, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Cannot write to dump file %s\n", filename);
current_output = globals_dump;
/* patterns used to prevent our own username from being recreated */
snprintf(create_role_str, sizeof(create_role_str),
"CREATE ROLE %s;", ctx->user);
snprintf(create_role_str_quote, sizeof(create_role_str_quote),
"CREATE ROLE %s;", quote_identifier(ctx, ctx->user));
while (fgets(line, sizeof(line), all_dump) != NULL)
{
/* switch to db_dump file output? */
if (current_output == globals_dump && start_of_line &&
suppressed_username &&
strncmp(line, "\\connect ", strlen("\\connect ")) == 0)
current_output = db_dump;
/* output unless we are recreating our own username */
if (current_output != globals_dump || !start_of_line ||
(strncmp(line, create_role_str, strlen(create_role_str)) != 0 &&
strncmp(line, create_role_str_quote, strlen(create_role_str_quote)) != 0))
fputs(line, current_output);
else
suppressed_username = true;
if (strlen(line) > 0 && line[strlen(line) - 1] == '\n')
start_of_line = true;
else
start_of_line = false;
}
fclose(all_dump);
fclose(globals_dump);
fclose(db_dump);
}
/*
* exec.c
*
* execution functions
*/
#include "pg_upgrade.h"
#include <fcntl.h>
#include <grp.h>
static void checkBinDir(migratorContext *ctx, ClusterInfo *cluster);
static int check_exec(migratorContext *ctx, const char *dir, const char *cmdName,
const char *alternative);
static const char *validate_exec(const char *path);
static int check_data_dir(migratorContext *ctx, const char *pg_data);
/*
* exec_prog()
*
* Formats a command from the given argument list and executes that
* command. If the command executes, exec_prog() returns 1 otherwise
* exec_prog() logs an error message and returns 0.
*
* If throw_error is TRUE, this function will throw a PG_FATAL error
* instead of returning should an error occur.
*/
int
exec_prog(migratorContext *ctx, bool throw_error, const char *fmt,...)
{
va_list args;
int result;
char cmd[MAXPGPATH];
va_start(args, fmt);
vsnprintf(cmd, MAXPGPATH, fmt, args);
va_end(args);
pg_log(ctx, PG_INFO, "%s\n", cmd);
result = system(cmd);
if (result != 0)
{
pg_log(ctx, throw_error ? PG_FATAL : PG_INFO,
"\nThere were problems executing %s\n", cmd);
return 1;
}
return 0;
}
/*
* verify_directories()
*
* does all the hectic work of verifying directories and executables
* of old and new server.
*
* NOTE: May update the values of all parameters
*/
void
verify_directories(migratorContext *ctx)
{
prep_status(ctx, "Checking old data directory (%s)", ctx->old.pgdata);
if (check_data_dir(ctx, ctx->old.pgdata) != 0)
pg_log(ctx, PG_FATAL, "Failed\n");
checkBinDir(ctx, &ctx->old);
check_ok(ctx);
prep_status(ctx, "Checking new data directory (%s)", ctx->new.pgdata);
if (check_data_dir(ctx, ctx->new.pgdata) != 0)
pg_log(ctx, PG_FATAL, "Failed\n");
checkBinDir(ctx, &ctx->new);
check_ok(ctx);
}
/*
* checkBinDir()
*
* This function searches for the executables that we expect to find
* in the binaries directory. If we find that a required executable
* is missing (or secured against us), we display an error message and
* exit().
*/
static void
checkBinDir(migratorContext *ctx, ClusterInfo *cluster)
{
check_exec(ctx, cluster->bindir, "postgres", "edb-postgres");
check_exec(ctx, cluster->bindir, "pg_ctl", NULL);
check_exec(ctx, cluster->bindir, "pg_dumpall", NULL);
#ifdef EDB_NATIVE_LANG
/* check for edb-psql first because we need to detect EDB AS */
if (check_exec(ctx, cluster->bindir, "edb-psql", "psql") == 1)
{
cluster->psql_exe = "edb-psql";
cluster->is_edb_as = true;
}
else
#else
if (check_exec(ctx, cluster->bindir, "psql", NULL) == 1)
#endif
cluster->psql_exe = "psql";
}
/*
* is_server_running()
*
* checks whether postmaster on the given data directory is running or not.
* The check is performed by looking for the existence of postmaster.pid file.
*/
bool
is_server_running(migratorContext *ctx, const char *datadir)
{
char path[MAXPGPATH];
int fd;
snprintf(path, sizeof(path), "%s/postmaster.pid", datadir);
if ((fd = open(path, O_RDONLY)) < 0)
{
if (errno != ENOENT)
pg_log(ctx, PG_FATAL, "\ncould not open file \"%s\" for reading\n",
path);
return false;
}
close(fd);
return true;
}
/*
* check_exec()
*
* Checks whether either of the two command names (cmdName and alternative)
* appears to be an executable (in the given directory). If dir/cmdName is
* an executable, this function returns 1. If dir/alternative is an
* executable, this function returns 2. If neither of the given names is
* a valid executable, this function returns 0 to indicated failure.
*/
static int
check_exec(migratorContext *ctx, const char *dir, const char *cmdName,
const char *alternative)
{
char path[MAXPGPATH];
const char *errMsg;
snprintf(path, sizeof(path), "%s%c%s", dir, pathSeparator, cmdName);
if ((errMsg = validate_exec(path)) == NULL)
{
return 1; /* 1 -> first alternative OK */
}
else
{
if (alternative)
{
report_status(ctx, PG_WARNING, "check for %s warning: %s",
cmdName, errMsg);
if (check_exec(ctx, dir, alternative, NULL) == 1)
return 2; /* 2 -> second alternative OK */
}
else
pg_log(ctx, PG_FATAL, "check for %s failed - %s\n", cmdName, errMsg);
}
return 0; /* 0 -> neither alternative is acceptable */
}
/*
* validate_exec()
*
* validate "path" as an executable file
* returns 0 if the file is found and no error is encountered.
* -1 if the regular file "path" does not exist or cannot be executed.
* -2 if the file is otherwise valid but cannot be read.
*/
static const char *
validate_exec(const char *path)
{
struct stat buf;
#ifndef WIN32
uid_t euid;
struct group *gp;
struct passwd *pwp;
int in_grp = 0;
#else
char path_exe[MAXPGPATH + sizeof(EXE_EXT) - 1];
#endif
#ifdef WIN32
/* Win32 requires a .exe suffix for stat() */
if (strlen(path) >= strlen(EXE_EXT) &&
pg_strcasecmp(path + strlen(path) - strlen(EXE_EXT), EXE_EXT) != 0)
{
strcpy(path_exe, path);
strcat(path_exe, EXE_EXT);
path = path_exe;
}
#endif
/*
* Ensure that the file exists and is a regular file.
*/
if (stat(path, &buf) < 0)
return getErrorText(errno);
if ((buf.st_mode & S_IFMT) != S_IFREG)
return "not an executable file";
/*
* Ensure that we are using an authorized executable.
*/
/*
* Ensure that the file is both executable and readable (required for
* dynamic loading).
*/
#ifndef WIN32
euid = geteuid();
/* If owned by us, just check owner bits */
if (euid == buf.st_uid)
{
if ((buf.st_mode & S_IRUSR) == 0)
return "can't read file (permission denied)";
if ((buf.st_mode & S_IXUSR) == 0)
return "can't execute (permission denied)";
return NULL;
}
/* OK, check group bits */
pwp = getpwuid(euid); /* not thread-safe */
if (pwp)
{
if (pwp->pw_gid == buf.st_gid) /* my primary group? */
++in_grp;
else if (pwp->pw_name &&
(gp = getgrgid(buf.st_gid)) != NULL &&
/* not thread-safe */ gp->gr_mem != NULL)
{
/* try list of member groups */
int i;
for (i = 0; gp->gr_mem[i]; ++i)
{
if (!strcmp(gp->gr_mem[i], pwp->pw_name))
{
++in_grp;
break;
}
}
}
if (in_grp)
{
if ((buf.st_mode & S_IRGRP) == 0)
return "can't read file (permission denied)";
if ((buf.st_mode & S_IXGRP) == 0)
return "can't execute (permission denied)";
return NULL;
}
}
/* Check "other" bits */
if ((buf.st_mode & S_IROTH) == 0)
return "can't read file (permission denied)";
if ((buf.st_mode & S_IXOTH) == 0)
return "can't execute (permission denied)";
return NULL;
#else
if ((buf.st_mode & S_IRUSR) == 0)
return "can't read file (permission denied)";
if ((buf.st_mode & S_IXUSR) == 0)
return "can't execute (permission denied)";
return NULL;
#endif
}
/*
* check_data_dir()
*
* This function validates the given cluster directory - we search for a
* small set of subdirectories that we expect to find in a valid $PGDATA
* directory. If any of the subdirectories are missing (or secured against
* us) we display an error message and exit()
*
*/
static int
check_data_dir(migratorContext *ctx, const char *pg_data)
{
char subDirName[MAXPGPATH];
const char *requiredSubdirs[] = {"base", "global", "pg_clog",
"pg_multixact", "pg_subtrans",
"pg_tblspc", "pg_twophase", "pg_xlog"};
bool fail = false;
int subdirnum;
for (subdirnum = 0; subdirnum < sizeof(requiredSubdirs) / sizeof(requiredSubdirs[0]); ++subdirnum)
{
struct stat statBuf;
snprintf(subDirName, sizeof(subDirName), "%s%c%s", pg_data,
pathSeparator, requiredSubdirs[subdirnum]);
if ((stat(subDirName, &statBuf)) != 0)
{
report_status(ctx, PG_WARNING, "check for %s warning: %s",
requiredSubdirs[subdirnum], getErrorText(errno));
fail = true;
}
else
{
if (!S_ISDIR(statBuf.st_mode))
{
report_status(ctx, PG_WARNING, "%s is not a directory",
requiredSubdirs[subdirnum]);
fail = true;
}
}
}
return (fail) ? -1 : 0;
}
This diff is collapsed.
/*
* function.c
*
* server-side function support
*/
#include "pg_upgrade.h"
#include "access/transam.h"
/*
* install_support_functions()
*
* pg_upgrade requires some support functions that enable it to modify
* backend behavior.
*/
void
install_support_functions(migratorContext *ctx)
{
int dbnum;
prep_status(ctx, "Adding support functions to new cluster");
for (dbnum = 0; dbnum < ctx->new.dbarr.ndbs; dbnum++)
{
DbInfo *newdb = &ctx->new.dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, newdb->db_name, CLUSTER_NEW);
/* suppress NOTICE of dropped objects */
PQclear(executeQueryOrDie(ctx, conn,
"SET client_min_messages = warning;"));
PQclear(executeQueryOrDie(ctx, conn,
"DROP SCHEMA IF EXISTS binary_upgrade CASCADE;"));
PQclear(executeQueryOrDie(ctx, conn,
"RESET client_min_messages;"));
PQclear(executeQueryOrDie(ctx, conn,
"CREATE SCHEMA binary_upgrade;"));
PQclear(executeQueryOrDie(ctx, conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.set_next_pg_type_oid(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_sysoids' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(ctx, conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.set_next_pg_type_array_oid(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_sysoids' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(ctx, conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.set_next_pg_type_toast_oid(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_sysoids' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(ctx, conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.set_next_heap_relfilenode(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_sysoids' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(ctx, conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.set_next_toast_relfilenode(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_sysoids' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(ctx, conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.set_next_index_relfilenode(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_sysoids' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(ctx, conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.add_pg_enum_label(OID, OID, NAME) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_sysoids' "
"LANGUAGE C STRICT;"));
PQfinish(conn);
}
check_ok(ctx);
}
void
uninstall_support_functions(migratorContext *ctx)
{
int dbnum;
prep_status(ctx, "Removing support functions from new cluster");
for (dbnum = 0; dbnum < ctx->new.dbarr.ndbs; dbnum++)
{
DbInfo *newdb = &ctx->new.dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, newdb->db_name, CLUSTER_NEW);
/* suppress NOTICE of dropped objects */
PQclear(executeQueryOrDie(ctx, conn,
"SET client_min_messages = warning;"));
PQclear(executeQueryOrDie(ctx, conn,
"DROP SCHEMA binary_upgrade CASCADE;"));
PQclear(executeQueryOrDie(ctx, conn,
"RESET client_min_messages;"));
PQfinish(conn);
}
check_ok(ctx);
}
/*
* get_loadable_libraries()
*
* Fetch the names of all old libraries containing C-language functions.
* We will later check that they all exist in the new installation.
*/
void
get_loadable_libraries(migratorContext *ctx)
{
ClusterInfo *active_cluster = &ctx->old;
PGresult **ress;
int totaltups;
int dbnum;
ress = (PGresult **)
pg_malloc(ctx, active_cluster->dbarr.ndbs * sizeof(PGresult *));
totaltups = 0;
/* Fetch all library names, removing duplicates within each DB */
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, CLUSTER_OLD);
/* Fetch all libraries referenced in this DB */
ress[dbnum] = executeQueryOrDie(ctx, conn,
"SELECT DISTINCT probin "
"FROM pg_catalog.pg_proc "
"WHERE prolang = 13 /* C */ AND "
" probin IS NOT NULL AND "
" oid >= %u;",
FirstNormalObjectId);
totaltups += PQntuples(ress[dbnum]);
PQfinish(conn);
}
/* Allocate what's certainly enough space */
if (totaltups > 0)
ctx->libraries = (char **) pg_malloc(ctx, totaltups * sizeof(char *));
else
ctx->libraries = NULL;
/*
* Now remove duplicates across DBs. This is pretty inefficient code, but
* there probably aren't enough entries to matter.
*/
totaltups = 0;
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
PGresult *res = ress[dbnum];
int ntups;
int rowno;
ntups = PQntuples(res);
for (rowno = 0; rowno < ntups; rowno++)
{
char *lib = PQgetvalue(res, rowno, 0);
bool dup = false;
int n;
for (n = 0; n < totaltups; n++)
{
if (strcmp(lib, ctx->libraries[n]) == 0)
{
dup = true;
break;
}
}
if (!dup)
ctx->libraries[totaltups++] = pg_strdup(ctx, lib);
}
PQclear(res);
}
ctx->num_libraries = totaltups;
pg_free(ress);
}
/*
* check_loadable_libraries()
*
* Check that the new cluster contains all required libraries.
* We do this by actually trying to LOAD each one, thereby testing
* compatibility as well as presence.
*/
void
check_loadable_libraries(migratorContext *ctx)
{
PGconn *conn = connectToServer(ctx, "template1", CLUSTER_NEW);
int libnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status(ctx, "Checking for presence of required libraries");
snprintf(output_path, sizeof(output_path), "%s/loadable_libraries.txt",
ctx->output_dir);
for (libnum = 0; libnum < ctx->num_libraries; libnum++)
{
char *lib = ctx->libraries[libnum];
int llen = strlen(lib);
char *cmd = (char *) pg_malloc(ctx, 8 + 2 * llen + 1);
PGresult *res;
strcpy(cmd, "LOAD '");
PQescapeStringConn(conn, cmd + 6, lib, llen, NULL);
strcat(cmd, "'");
res = PQexec(conn, cmd);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
found = true;
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n",
output_path);
fprintf(script, "Failed to load library: %s\n%s\n",
lib,
PQerrorMessage(conn));
}
PQclear(res);
pg_free(cmd);
}
PQfinish(conn);
if (found)
{
fclose(script);
pg_log(ctx, PG_REPORT, "fatal\n");
pg_log(ctx, PG_FATAL,
"| Your installation uses loadable libraries that are missing\n"
"| from the new installation. You can add these libraries to\n"
"| the new installation, or remove the functions using them\n"
"| from the old installation. A list of the problem libraries\n"
"| is in the file\n"
"| \"%s\".\n\n", output_path);
}
else
check_ok(ctx);
}
This diff is collapsed.
/*
* opt.c
*
* options functions
*/
#include "pg_upgrade.h"
#include "getopt_long.h"
#ifdef WIN32
#include <io.h>
#endif
static void usage(migratorContext *ctx);
static void validateDirectoryOption(migratorContext *ctx, char **dirpath,
char *envVarName, char *cmdLineOption, char *description);
static void get_pkglibdirs(migratorContext *ctx);
static char *get_pkglibdir(migratorContext *ctx, const char *bindir);
/*
* parseCommandLine()
*
* Parses the command line (argc, argv[]) into the given migratorContext object
* and initializes the rest of the object.
*/
void
parseCommandLine(migratorContext *ctx, int argc, char *argv[])
{
static struct option long_options[] = {
{"old-datadir", required_argument, NULL, 'd'},
{"new-datadir", required_argument, NULL, 'D'},
{"old-bindir", required_argument, NULL, 'b'},
{"new-bindir", required_argument, NULL, 'B'},
{"old-port", required_argument, NULL, 'p'},
{"new-port", required_argument, NULL, 'P'},
{"user", required_argument, NULL, 'u'},
{"check", no_argument, NULL, 'c'},
{"debug", no_argument, NULL, 'g'},
{"debugfile", required_argument, NULL, 'G'},
{"link", no_argument, NULL, 'k'},
{"logfile", required_argument, NULL, 'l'},
{"verbose", no_argument, NULL, 'v'},
{NULL, 0, NULL, 0}
};
char option; /* Command line option */
int optindex = 0; /* used by getopt_long */
if (getenv("PGUSER"))
{
pg_free(ctx->user);
ctx->user = pg_strdup(ctx, getenv("PGUSER"));
}
ctx->progname = get_progname(argv[0]);
ctx->old.port = getenv("PGPORT") ? atoi(getenv("PGPORT")) : DEF_PGPORT;
ctx->new.port = getenv("PGPORT") ? atoi(getenv("PGPORT")) : DEF_PGPORT;
/* must save value, getenv()'s pointer is not stable */
ctx->transfer_mode = TRANSFER_MODE_COPY;
if (argc > 1)
{
if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-h") == 0 ||
strcmp(argv[1], "-?") == 0)
{
usage(ctx);
exit_nicely(ctx, false);
}
if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
{
pg_log(ctx, PG_REPORT, "pg_upgrade " PG_VERSION "\n");
exit_nicely(ctx, false);
}
}
if ((get_user_info(ctx, &ctx->user)) == 0)
pg_log(ctx, PG_FATAL, "%s: cannot be run as root\n", ctx->progname);
#ifndef WIN32
get_home_path(ctx->home_dir);
#else
{
char *tmppath;
/* TMP is the best place on Windows, rather than APPDATA */
if ((tmppath = getenv("TMP")) == NULL)
pg_log(ctx, PG_FATAL, "TMP environment variable is not set.\n");
snprintf(ctx->home_dir, MAXPGPATH, "%s", tmppath);
}
#endif
snprintf(ctx->output_dir, MAXPGPATH, "%s/" OUTPUT_SUBDIR, ctx->home_dir);
while ((option = getopt_long(argc, argv, "d:D:b:B:cgG:kl:p:P:u:v",
long_options, &optindex)) != -1)
{
switch (option)
{
case 'd':
ctx->old.pgdata = pg_strdup(ctx, optarg);
break;
case 'D':
ctx->new.pgdata = pg_strdup(ctx, optarg);
break;
case 'b':
ctx->old.bindir = pg_strdup(ctx, optarg);
break;
case 'B':
ctx->new.bindir = pg_strdup(ctx, optarg);
break;
case 'c':
ctx->check = true;
break;
case 'g':
pg_log(ctx, PG_REPORT, "Running in debug mode\n");
ctx->debug = true;
break;
case 'G':
if ((ctx->debug_fd = fopen(optarg, "w")) == NULL)
{
pg_log(ctx, PG_FATAL, "cannot open debug file\n");
exit_nicely(ctx, false);
}
break;
case 'k':
ctx->transfer_mode = TRANSFER_MODE_LINK;
break;
case 'l':
ctx->logfile = pg_strdup(ctx, optarg);
break;
case 'p':
if ((ctx->old.port = atoi(optarg)) <= 0)
{
pg_log(ctx, PG_FATAL, "invalid old port number\n");
exit_nicely(ctx, false);
}
break;
case 'P':
if ((ctx->new.port = atoi(optarg)) <= 0)
{
pg_log(ctx, PG_FATAL, "invalid new port number\n");
exit_nicely(ctx, false);
}
break;
case 'u':
pg_free(ctx->user);
ctx->user = pg_strdup(ctx, optarg);
break;
case 'v':
pg_log(ctx, PG_REPORT, "Running in verbose mode\n");
ctx->verbose = true;
break;
default:
pg_log(ctx, PG_FATAL,
"Try \"%s --help\" for more information.\n",
ctx->progname);
break;
}
}
if (ctx->logfile != NULL)
{
/*
* We must use append mode so output generated by child processes via
* ">>" will not be overwritten, and we want the file truncated on
* start.
*/
/* truncate */
ctx->log_fd = fopen(ctx->logfile, "w");
if (!ctx->log_fd)
pg_log(ctx, PG_FATAL, "Cannot write to log file %s\n", ctx->logfile);
fclose(ctx->log_fd);
ctx->log_fd = fopen(ctx->logfile, "a");
if (!ctx->log_fd)
pg_log(ctx, PG_FATAL, "Cannot write to log file %s\n", ctx->logfile);
}
else
ctx->logfile = strdup(DEVNULL);
/* if no debug file name, output to the terminal */
if (ctx->debug && !ctx->debug_fd)
{
ctx->debug_fd = fopen(DEVTTY, "w");
if (!ctx->debug_fd)
pg_log(ctx, PG_FATAL, "Cannot write to terminal\n");
}
/* Get values from env if not already set */
validateDirectoryOption(ctx, &ctx->old.pgdata, "OLDDATADIR", "-d",
"old cluster data resides");
validateDirectoryOption(ctx, &ctx->new.pgdata, "NEWDATADIR", "-D",
"new cluster data resides");
validateDirectoryOption(ctx, &ctx->old.bindir, "OLDBINDIR", "-b",
"old cluster binaries reside");
validateDirectoryOption(ctx, &ctx->new.bindir, "NEWBINDIR", "-B",
"new cluster binaries reside");
get_pkglibdirs(ctx);
}
static void
usage(migratorContext *ctx)
{
printf(_("\nUsage: pg_upgrade [OPTIONS]...\n\
\n\
Options:\n\
-d, --old-datadir=OLDDATADIR old cluster data directory\n\
-D, --new-datadir=NEWDATADIR new cluster data directory\n\
-b, --old-bindir=OLDBINDIR old cluster executable directory\n\
-B, --new-bindir=NEWBINDIR new cluster executable directory\n\
-p, --old-port=portnum old cluster port number (default %d)\n\
-P, --new-port=portnum new cluster port number (default %d)\n\
\n\
-u, --user=username clusters superuser (default \"%s\")\n\
-c, --check check clusters only, don't change any data\n\
-g, --debug enable debugging\n\
-G, --debugfile=DEBUGFILENAME output debugging activity to file\n\
-k, --link link instead of copying files to new cluster\n\
-l, --logfile=LOGFILENAME log session activity to file\n\
-v, --verbose enable verbose output\n\
-V, --version display version information, then exit\n\
-h, --help show this help, then exit\n\
\n\
Before running pg_upgrade you must:\n\
create a new database cluster (using the new version of initdb)\n\
shutdown the postmaster servicing the old cluster\n\
shutdown the postmaster servicing the new cluster\n\
\n\
When you run pg_upgrade, you must provide the following information:\n\
the data directory for the old cluster (-d OLDDATADIR)\n\
the data directory for the new cluster (-D NEWDATADIR)\n\
the 'bin' directory for the old version (-b OLDBINDIR)\n\
the 'bin' directory for the new version (-B NEWBINDIR)\n\
\n\
For example:\n\
pg_upgrade -d oldCluster/data -D newCluster/data -b oldCluster/bin -B newCluster/bin\n\
or\n"), ctx->old.port, ctx->new.port, ctx->user);
#ifndef WIN32
printf(_("\
$ export OLDDATADIR=oldCluster/data\n\
$ export NEWDATADIR=newCluster/data\n\
$ export OLDBINDIR=oldCluster/bin\n\
$ export NEWBINDIR=newCluster/bin\n\
$ pg_upgrade\n"));
#else
printf(_("\
C:\\> set OLDDATADIR=oldCluster/data\n\
C:\\> set NEWDATADIR=newCluster/data\n\
C:\\> set OLDBINDIR=oldCluster/bin\n\
C:\\> set NEWBINDIR=newCluster/bin\n\
C:\\> pg_upgrade\n"));
#endif
printf(_("\n\
You may find it useful to save the preceding 5 commands in a shell script\n\
\n\
Report bugs to <pg-migrator-general@lists.pgfoundry.org>\n"));
}
/*
* validateDirectoryOption()
*
* Validates a directory option.
* dirpath - the directory name supplied on the command line
* envVarName - the name of an environment variable to get if dirpath is NULL
* cmdLineOption - the command line option corresponds to this directory (-o, -O, -n, -N)
* description - a description of this directory option
*
* We use the last two arguments to construct a meaningful error message if the
* user hasn't provided the required directory name.
*/
static void
validateDirectoryOption(migratorContext *ctx, char **dirpath,
char *envVarName, char *cmdLineOption, char *description)
{
if (*dirpath == NULL || (strlen(*dirpath) == 0))
{
const char *envVar;
if ((envVar = getenv(envVarName)) && strlen(envVar))
*dirpath = pg_strdup(ctx, envVar);
else
{
pg_log(ctx, PG_FATAL, "You must identify the directory where the %s\n"
"Please use the %s command-line option or the %s environment variable\n",
description, cmdLineOption, envVarName);
}
}
/*
* Trim off any trailing path separators
*/
if ((*dirpath)[strlen(*dirpath) - 1] == pathSeparator)
(*dirpath)[strlen(*dirpath) - 1] = 0;
}
static void
get_pkglibdirs(migratorContext *ctx)
{
ctx->old.libpath = get_pkglibdir(ctx, ctx->old.bindir);
ctx->new.libpath = get_pkglibdir(ctx, ctx->new.bindir);
}
static char *
get_pkglibdir(migratorContext *ctx, const char *bindir)
{
char cmd[MAXPGPATH];
char bufin[MAX_STRING];
FILE *output;
int i;
snprintf(cmd, sizeof(cmd), "\"%s/pg_config\" --pkglibdir", bindir);
if ((output = popen(cmd, "r")) == NULL)
pg_log(ctx, PG_FATAL, "Could not get pkglibdir data: %s\n",
getErrorText(errno));
fgets(bufin, sizeof(bufin), output);
if (output)
pclose(output);
/* Remove trailing newline */
i = strlen(bufin) - 1;
if (bufin[i] == '\n')
bufin[i] = '\0';
return pg_strdup(ctx, bufin);
}
/*
* page.c
*
* per-page conversion operations
*/
#include "pg_upgrade.h"
#include "dynloader.h"
#include "storage/bufpage.h"
#ifdef PAGE_CONVERSION
static const char *getPageVersion(migratorContext *ctx,
uint16 *version, const char *pathName);
static pageCnvCtx *loadConverterPlugin(migratorContext *ctx,
uint16 newPageVersion, uint16 oldPageVersion);
/*
* setupPageConverter()
*
* This function determines the PageLayoutVersion of the old cluster and
* the PageLayoutVersion of the new cluster. If the versions differ, this
* function loads a converter plugin and returns a pointer to a pageCnvCtx
* object (in *result) that knows how to convert pages from the old format
* to the new format. If the versions are identical, this function just
* returns a NULL pageCnvCtx pointer to indicate that page-by-page conversion
* is not required.
*
* If successful this function sets *result and returns NULL. If an error
* occurs, this function returns an error message in the form of an null-terminated
* string.
*/
const char *
setupPageConverter(migratorContext *ctx, pageCnvCtx **result)
{
uint16 oldPageVersion;
uint16 newPageVersion;
pageCnvCtx *converter;
const char *msg;
char dstName[MAXPGPATH];
char srcName[MAXPGPATH];
snprintf(dstName, sizeof(dstName), "%s/global/%u", ctx->new.pgdata,
ctx->new.pg_database_oid);
snprintf(srcName, sizeof(srcName), "%s/global/%u", ctx->old.pgdata,
ctx->old.pg_database_oid);
if ((msg = getPageVersion(ctx, &oldPageVersion, srcName)) != NULL)
return msg;
if ((msg = getPageVersion(ctx, &newPageVersion, dstName)) != NULL)
return msg;
/*
* If the old cluster and new cluster use the same page layouts, then we
* don't need a page converter.
*/
if (newPageVersion == oldPageVersion)
{
*result = NULL;
return NULL;
}
/*
* The clusters use differing page layouts, see if we can find a plugin
* that knows how to convert from the old page layout to the new page
* layout.
*/
if ((converter = loadConverterPlugin(ctx, newPageVersion, oldPageVersion)) == NULL)
return "can't find plugin to convert from old page layout to new page layout";
else
{
*result = converter;
return NULL;
}
}
/*
* getPageVersion()
*
* Retrieves the PageLayoutVersion for the given relation.
*
* Returns NULL on success (and stores the PageLayoutVersion at *version),
* if an error occurs, this function returns an error message (in the form
* of a null-terminated string).
*/
static const char *
getPageVersion(migratorContext *ctx, uint16 *version, const char *pathName)
{
int relfd;
PageHeaderData page;
ssize_t bytesRead;
if ((relfd = open(pathName, O_RDONLY, 0)) < 0)
return "can't open relation";
if ((bytesRead = read(relfd, &page, sizeof(page))) != sizeof(page))
return "can't read page header";
*version = PageGetPageLayoutVersion(&page);
close(relfd);
return NULL;
}
/*
* loadConverterPlugin()
*
* This function loads a page-converter plugin library and grabs a
* pointer to each of the (interesting) functions provided by that
* plugin. The name of the plugin library is derived from the given
* newPageVersion and oldPageVersion. If a plugin is found, this
* function returns a pointer to a pageCnvCtx object (which will contain
* a collection of plugin function pointers). If the required plugin
* is not found, this function returns NULL.
*/
static pageCnvCtx *
loadConverterPlugin(migratorContext *ctx, uint16 newPageVersion, uint16 oldPageVersion)
{
char pluginName[MAXPGPATH];
void *plugin;
/*
* Try to find a plugin that can convert pages of oldPageVersion into
* pages of newPageVersion. For example, if we oldPageVersion = 3 and
* newPageVersion is 4, we search for a plugin named:
* plugins/convertLayout_3_to_4.dll
*/
/*
* FIXME: we are searching for plugins relative to the current directory,
* we should really search relative to our own executable instead.
*/
snprintf(pluginName, sizeof(pluginName), "./plugins/convertLayout_%d_to_%d%s",
oldPageVersion, newPageVersion, DLSUFFIX);
if ((plugin = pg_dlopen(pluginName)) == NULL)
return NULL;
else
{
pageCnvCtx *result = (pageCnvCtx *) pg_malloc(ctx, sizeof(*result));
result->old.PageVersion = oldPageVersion;
result->new.PageVersion = newPageVersion;
result->startup = (pluginStartup) pg_dlsym(plugin, "init");
result->convertFile = (pluginConvertFile) pg_dlsym(plugin, "convertFile");
result->convertPage = (pluginConvertPage) pg_dlsym(plugin, "convertPage");
result->shutdown = (pluginShutdown) pg_dlsym(plugin, "fini");
result->pluginData = NULL;
/*
* If the plugin has exported an initializer, go ahead and invoke it.
*/
if (result->startup)
result->startup(MIGRATOR_API_VERSION, &result->pluginVersion,
newPageVersion, oldPageVersion, &result->pluginData);
return result;
}
}
#endif
This diff is collapsed.
This diff is collapsed.
/*
* pg_upgrade_sysoids.c
*
* server-side functions to set backend global variables
* to control oid and relfilenode assignment
*/
#include "postgres.h"
#include "fmgr.h"
#include "catalog/dependency.h"
#include "catalog/pg_class.h"
/* THIS IS USED ONLY FOR PG >= 9.0 */
/*
* Cannot include "catalog/pg_enum.h" here because we might
* not be compiling against PG 9.0.
*/
extern void EnumValuesCreate(Oid enumTypeOid, List *vals,
Oid binary_upgrade_next_pg_enum_oid);
#ifdef PG_MODULE_MAGIC
PG_MODULE_MAGIC;
#endif
extern PGDLLIMPORT Oid binary_upgrade_next_pg_type_oid;
extern PGDLLIMPORT Oid binary_upgrade_next_pg_type_array_oid;
extern PGDLLIMPORT Oid binary_upgrade_next_pg_type_toast_oid;
extern PGDLLIMPORT Oid binary_upgrade_next_heap_relfilenode;
extern PGDLLIMPORT Oid binary_upgrade_next_toast_relfilenode;
extern PGDLLIMPORT Oid binary_upgrade_next_index_relfilenode;
Datum set_next_pg_type_oid(PG_FUNCTION_ARGS);
Datum set_next_pg_type_array_oid(PG_FUNCTION_ARGS);
Datum set_next_pg_type_toast_oid(PG_FUNCTION_ARGS);
Datum set_next_heap_relfilenode(PG_FUNCTION_ARGS);
Datum set_next_toast_relfilenode(PG_FUNCTION_ARGS);
Datum set_next_index_relfilenode(PG_FUNCTION_ARGS);
Datum add_pg_enum_label(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(set_next_pg_type_oid);
PG_FUNCTION_INFO_V1(set_next_pg_type_array_oid);
PG_FUNCTION_INFO_V1(set_next_pg_type_toast_oid);
PG_FUNCTION_INFO_V1(set_next_heap_relfilenode);
PG_FUNCTION_INFO_V1(set_next_toast_relfilenode);
PG_FUNCTION_INFO_V1(set_next_index_relfilenode);
PG_FUNCTION_INFO_V1(add_pg_enum_label);
Datum
set_next_pg_type_oid(PG_FUNCTION_ARGS)
{
Oid typoid = PG_GETARG_OID(0);
binary_upgrade_next_pg_type_oid = typoid;
PG_RETURN_VOID();
}
Datum
set_next_pg_type_array_oid(PG_FUNCTION_ARGS)
{
Oid typoid = PG_GETARG_OID(0);
binary_upgrade_next_pg_type_array_oid = typoid;
PG_RETURN_VOID();
}
Datum
set_next_pg_type_toast_oid(PG_FUNCTION_ARGS)
{
Oid typoid = PG_GETARG_OID(0);
binary_upgrade_next_pg_type_toast_oid = typoid;
PG_RETURN_VOID();
}
Datum
set_next_heap_relfilenode(PG_FUNCTION_ARGS)
{
Oid relfilenode = PG_GETARG_OID(0);
binary_upgrade_next_heap_relfilenode = relfilenode;
PG_RETURN_VOID();
}
Datum
set_next_toast_relfilenode(PG_FUNCTION_ARGS)
{
Oid relfilenode = PG_GETARG_OID(0);
binary_upgrade_next_toast_relfilenode = relfilenode;
PG_RETURN_VOID();
}
Datum
set_next_index_relfilenode(PG_FUNCTION_ARGS)
{
Oid relfilenode = PG_GETARG_OID(0);
binary_upgrade_next_index_relfilenode = relfilenode;
PG_RETURN_VOID();
}
Datum
add_pg_enum_label(PG_FUNCTION_ARGS)
{
Oid enumoid = PG_GETARG_OID(0);
Oid typoid = PG_GETARG_OID(1);
Name label = PG_GETARG_NAME(2);
EnumValuesCreate(typoid, list_make1(makeString(NameStr(*label))),
enumoid);
PG_RETURN_VOID();
}
/*
* relfilenode.c
*
* relfilenode functions
*/
#include "pg_upgrade.h"
#ifdef EDB_NATIVE_LANG
#include <fcntl.h>
#endif
#include "catalog/pg_class.h"
#include "access/transam.h"
static void transfer_single_new_db(migratorContext *ctx, pageCnvCtx *pageConverter,
FileNameMap *maps, int size);
static void transfer_relfile(migratorContext *ctx, pageCnvCtx *pageConverter,
const char *fromfile, const char *tofile,
const char *oldnspname, const char *oldrelname,
const char *newnspname, const char *newrelname);
/*
* transfer_all_new_dbs()
*
* Responsible for upgrading all database. invokes routines to generate mappings and then
* physically link the databases.
*/
const char *
transfer_all_new_dbs(migratorContext *ctx, DbInfoArr *olddb_arr,
DbInfoArr *newdb_arr, char *old_pgdata, char *new_pgdata)
{
int dbnum;
const char *msg = NULL;
prep_status(ctx, "Restoring user relation files\n");
for (dbnum = 0; dbnum < newdb_arr->ndbs; dbnum++)
{
DbInfo *new_db = &newdb_arr->dbs[dbnum];
DbInfo *old_db = dbarr_lookup_db(olddb_arr, new_db->db_name);
FileNameMap *mappings;
int n_maps;
pageCnvCtx *pageConverter = NULL;
n_maps = 0;
mappings = gen_db_file_maps(ctx, old_db, new_db, &n_maps, old_pgdata,
new_pgdata);
if (n_maps)
{
print_maps(ctx, mappings, n_maps, new_db->db_name);
#ifdef PAGE_CONVERSION
msg = setupPageConverter(ctx, &pageConverter);
#endif
transfer_single_new_db(ctx, pageConverter, mappings, n_maps);
pg_free(mappings);
}
}
prep_status(ctx, ""); /* in case nothing printed */
check_ok(ctx);
return msg;
}
/*
* get_pg_database_relfilenode()
*
* Retrieves the relfilenode for a few system-catalog tables. We need these
* relfilenodes later in the upgrade process.
*/
void
get_pg_database_relfilenode(migratorContext *ctx, Cluster whichCluster)
{
PGconn *conn = connectToServer(ctx, "template1", whichCluster);
PGresult *res;
int i_relfile;
res = executeQueryOrDie(ctx, conn,
"SELECT c.relname, c.relfilenode "
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_namespace n "
"WHERE c.relnamespace = n.oid AND "
" n.nspname = 'pg_catalog' AND "
" c.relname = 'pg_database' "
"ORDER BY c.relname");
i_relfile = PQfnumber(res, "relfilenode");
if (whichCluster == CLUSTER_OLD)
ctx->old.pg_database_oid = atol(PQgetvalue(res, 0, i_relfile));
else
ctx->new.pg_database_oid = atol(PQgetvalue(res, 0, i_relfile));
PQclear(res);
PQfinish(conn);
}
/*
* transfer_single_new_db()
*
* create links for mappings stored in "maps" array.
*/
static void
transfer_single_new_db(migratorContext *ctx, pageCnvCtx *pageConverter,
FileNameMap *maps, int size)
{
int mapnum;
for (mapnum = 0; mapnum < size; mapnum++)
{
char old_file[MAXPGPATH];
char new_file[MAXPGPATH];
struct dirent **namelist = NULL;
int numFiles;
/* Copying files might take some time, so give feedback. */
snprintf(old_file, sizeof(old_file), "%s/%u", maps[mapnum].old_file, maps[mapnum].old);
snprintf(new_file, sizeof(new_file), "%s/%u", maps[mapnum].new_file, maps[mapnum].new);
pg_log(ctx, PG_REPORT, OVERWRITE_MESSAGE, old_file);
/*
* Copy/link the relation file to the new cluster
*/
unlink(new_file);
transfer_relfile(ctx, pageConverter, old_file, new_file,
maps[mapnum].old_nspname, maps[mapnum].old_relname,
maps[mapnum].new_nspname, maps[mapnum].new_relname);
/* fsm/vm files added in PG 8.4 */
if (GET_MAJOR_VERSION(ctx->old.major_version) >= 804)
{
/*
* Now copy/link any fsm and vm files, if they exist
*/
snprintf(scandir_file_pattern, sizeof(scandir_file_pattern), "%u_", maps[mapnum].old);
numFiles = pg_scandir(ctx, maps[mapnum].old_file, &namelist, dir_matching_filenames, NULL);
while (numFiles--)
{
snprintf(old_file, sizeof(old_file), "%s/%s", maps[mapnum].old_file,
namelist[numFiles]->d_name);
snprintf(new_file, sizeof(new_file), "%s/%u%s", maps[mapnum].new_file,
maps[mapnum].new, strchr(namelist[numFiles]->d_name, '_'));
unlink(new_file);
transfer_relfile(ctx, pageConverter, old_file, new_file,
maps[mapnum].old_nspname, maps[mapnum].old_relname,
maps[mapnum].new_nspname, maps[mapnum].new_relname);
pg_free(namelist[numFiles]);
}
pg_free(namelist);
}
/*
* Now copy/link any related segments as well. Remember, PG breaks
* large files into 1GB segments, the first segment has no extension,
* subsequent segments are named relfilenode.1, relfilenode.2,
* relfilenode.3, ... 'fsm' and 'vm' files use underscores so are not
* copied.
*/
snprintf(scandir_file_pattern, sizeof(scandir_file_pattern), "%u.", maps[mapnum].old);
numFiles = pg_scandir(ctx, maps[mapnum].old_file, &namelist, dir_matching_filenames, NULL);
while (numFiles--)
{
snprintf(old_file, sizeof(old_file), "%s/%s", maps[mapnum].old_file,
namelist[numFiles]->d_name);
snprintf(new_file, sizeof(new_file), "%s/%u%s", maps[mapnum].new_file,
maps[mapnum].new, strchr(namelist[numFiles]->d_name, '.'));
unlink(new_file);
transfer_relfile(ctx, pageConverter, old_file, new_file,
maps[mapnum].old_nspname, maps[mapnum].old_relname,
maps[mapnum].new_nspname, maps[mapnum].new_relname);
pg_free(namelist[numFiles]);
}
pg_free(namelist);
}
}
/*
* transfer_relfile()
*
* Copy or link file from old cluster to new one.
*/
static void
transfer_relfile(migratorContext *ctx, pageCnvCtx *pageConverter, const char *oldfile,
const char *newfile, const char *oldnspname, const char *oldrelname,
const char *newnspname, const char *newrelname)
{
const char *msg;
if ((ctx->transfer_mode == TRANSFER_MODE_LINK) && (pageConverter != NULL))
pg_log(ctx, PG_FATAL, "this migration requires page-by-page conversion, "
"you must use copy-mode instead of link-mode\n");
if (ctx->transfer_mode == TRANSFER_MODE_COPY)
{
pg_log(ctx, PG_INFO, "copying %s to %s\n", oldfile, newfile);
if ((msg = copyAndUpdateFile(ctx, pageConverter, oldfile, newfile, true)) != NULL)
pg_log(ctx, PG_FATAL, "error while copying %s.%s(%s) to %s.%s(%s): %s\n",
oldnspname, oldrelname, oldfile, newnspname, newrelname, newfile, msg);
}
else
{
pg_log(ctx, PG_INFO, "linking %s to %s\n", newfile, oldfile);
if ((msg = linkAndUpdateFile(ctx, pageConverter, oldfile, newfile)) != NULL)
pg_log(ctx, PG_FATAL,
"error while creating link from %s.%s(%s) to %s.%s(%s): %s\n",
oldnspname, oldrelname, oldfile, newnspname, newrelname,
newfile, msg);
}
return;
}
/*
* server.c
*
* database server functions
*/
#include "pg_upgrade.h"
#define POSTMASTER_UPTIME 20
#define STARTUP_WARNING_TRIES 2
static pgpid_t get_postmaster_pid(migratorContext *ctx, const char *datadir);
static bool test_server_conn(migratorContext *ctx, int timeout,
Cluster whichCluster);
/*
* connectToServer()
*
* Connects to the desired database on the designated server.
* If the connection attempt fails, this function logs an error
* message and calls exit_nicely() to kill the program.
*/
PGconn *
connectToServer(migratorContext *ctx, const char *db_name,
Cluster whichCluster)
{
char connectString[MAXPGPATH];
unsigned short port = (whichCluster == CLUSTER_OLD) ?
ctx->old.port : ctx->new.port;
PGconn *conn;
snprintf(connectString, sizeof(connectString),
"dbname = '%s' user = '%s' port = %d", db_name, ctx->user, port);
conn = PQconnectdb(connectString);
if (conn == NULL || PQstatus(conn) != CONNECTION_OK)
{
pg_log(ctx, PG_REPORT, "Connection to database failed: %s\n",
PQerrorMessage(conn));
if (conn)
PQfinish(conn);
exit_nicely(ctx, true);
}
return conn;
}
/*
* executeQueryOrDie()
*
* Formats a query string from the given arguments and executes the
* resulting query. If the query fails, this function logs an error
* message and calls exit_nicely() to kill the program.
*/
PGresult *
executeQueryOrDie(migratorContext *ctx, PGconn *conn, const char *fmt,...)
{
static char command[8192];
va_list args;
PGresult *result;
ExecStatusType status;
va_start(args, fmt);
vsnprintf(command, sizeof(command), fmt, args);
va_end(args);
pg_log(ctx, PG_DEBUG, "executing: %s\n", command);
result = PQexec(conn, command);
status = PQresultStatus(result);
if ((status != PGRES_TUPLES_OK) && (status != PGRES_COMMAND_OK))
{
pg_log(ctx, PG_REPORT, "DB command failed\n%s\n%s\n", command,
PQerrorMessage(conn));
PQclear(result);
PQfinish(conn);
exit_nicely(ctx, true);
return NULL; /* Never get here, but keeps compiler happy */
}
else
return result;
}
/*
* get_postmaster_pid()
*
* Returns the pid of the postmaster running on datadir. pid is retrieved
* from the postmaster.pid file
*/
static pgpid_t
get_postmaster_pid(migratorContext *ctx, const char *datadir)
{
FILE *pidf;
long pid;
char pid_file[MAXPGPATH];
snprintf(pid_file, sizeof(pid_file), "%s/postmaster.pid", datadir);
pidf = fopen(pid_file, "r");
if (pidf == NULL)
return (pgpid_t) 0;
if (fscanf(pidf, "%ld", &pid) != 1)
{
fclose(pidf);
pg_log(ctx, PG_FATAL, "%s: invalid data in PID file \"%s\"\n",
ctx->progname, pid_file);
}
fclose(pidf);
return (pgpid_t) pid;
}
/*
* get_major_server_version()
*
* gets the version (in unsigned int form) for the given "datadir". Assumes
* that datadir is an absolute path to a valid pgdata directory. The version
* is retrieved by reading the PG_VERSION file.
*/
uint32
get_major_server_version(migratorContext *ctx, char **verstr, Cluster whichCluster)
{
const char *datadir = whichCluster == CLUSTER_OLD ?
ctx->old.pgdata : ctx->new.pgdata;
FILE *version_fd;
char ver_file[MAXPGPATH];
int integer_version = 0;
int fractional_version = 0;
*verstr = pg_malloc(ctx, 64);
snprintf(ver_file, sizeof(ver_file), "%s/PG_VERSION", datadir);
if ((version_fd = fopen(ver_file, "r")) == NULL)
return 0;
if (fscanf(version_fd, "%63s", *verstr) == 0 ||
sscanf(*verstr, "%d.%d", &integer_version, &fractional_version) != 2)
{
pg_log(ctx, PG_FATAL, "could not get version from %s\n", datadir);
fclose(version_fd);
return 0;
}
return (100 * integer_version + fractional_version) * 100;
}
void
start_postmaster(migratorContext *ctx, Cluster whichCluster, bool quiet)
{
char cmd[MAXPGPATH];
const char *bindir;
const char *datadir;
unsigned short port;
if (whichCluster == CLUSTER_OLD)
{
bindir = ctx->old.bindir;
datadir = ctx->old.pgdata;
port = ctx->old.port;
}
else
{
bindir = ctx->new.bindir;
datadir = ctx->new.pgdata;
port = ctx->new.port;
}
/* use -l for Win32 */
sprintf(cmd, SYSTEMQUOTE "\"%s/pg_ctl\" -l \"%s\" -D \"%s\" "
"-o \"-p %d -c autovacuum=off -c autovacuum_freeze_max_age=2000000000\" "
"start >> \"%s\" 2>&1" SYSTEMQUOTE,
bindir, ctx->logfile, datadir, port, ctx->logfile);
exec_prog(ctx, true, "%s", cmd);
/* wait for the server to start properly */
if (test_server_conn(ctx, POSTMASTER_UPTIME, whichCluster) == false)
pg_log(ctx, PG_FATAL, " Unable to start %s postmaster with the command: %s\nPerhaps pg_hba.conf was not set to \"trust\".",
CLUSTERNAME(whichCluster), cmd);
if ((ctx->postmasterPID = get_postmaster_pid(ctx, datadir)) == 0)
pg_log(ctx, PG_FATAL, " Unable to get postmaster pid\n");
ctx->running_cluster = whichCluster;
}
void
stop_postmaster(migratorContext *ctx, bool fast, bool quiet)
{
const char *bindir;
const char *datadir;
if (ctx->running_cluster == CLUSTER_OLD)
{
bindir = ctx->old.bindir;
datadir = ctx->old.pgdata;
}
else if (ctx->running_cluster == CLUSTER_NEW)
{
bindir = ctx->new.bindir;
datadir = ctx->new.pgdata;
}
else
return; /* no cluster running */
/* use -l for Win32 */
exec_prog(ctx, fast ? false : true,
SYSTEMQUOTE "\"%s/pg_ctl\" -l \"%s\" -D \"%s\" %s stop >> \"%s\" 2>&1" SYSTEMQUOTE,
bindir, ctx->logfile, datadir, fast ? "-m fast" : "", ctx->logfile);
ctx->postmasterPID = 0;
ctx->running_cluster = NONE;
}
/*
* test_server_conn()
*
* tests whether postmaster is running or not by trying to connect
* to it. If connection is unsuccessfull we do a sleep of 1 sec and then
* try the connection again. This process continues "timeout" times.
*
* Returns true if the connection attempt was successfull, false otherwise.
*/
static bool
test_server_conn(migratorContext *ctx, int timeout, Cluster whichCluster)
{
PGconn *conn = NULL;
char con_opts[MAX_STRING];
int tries;
unsigned short port = (whichCluster == CLUSTER_OLD) ?
ctx->old.port : ctx->new.port;
bool ret = false;
snprintf(con_opts, sizeof(con_opts),
"dbname = 'template1' user = '%s' port = %d ", ctx->user, port);
for (tries = 0; tries < timeout; tries++)
{
sleep(1);
if ((conn = PQconnectdb(con_opts)) != NULL &&
PQstatus(conn) == CONNECTION_OK)
{
PQfinish(conn);
ret = true;
break;
}
if (tries == STARTUP_WARNING_TRIES)
prep_status(ctx, "Trying to start %s server ",
CLUSTERNAME(whichCluster));
else if (tries > STARTUP_WARNING_TRIES)
pg_log(ctx, PG_REPORT, ".");
}
if (tries > STARTUP_WARNING_TRIES)
check_ok(ctx);
return ret;
}
/*
* check_for_libpq_envvars()
*
* tests whether any libpq environment variables are set.
* Since pg_upgrade connects to both the old and the new server,
* it is potentially dangerous to have any of these set.
*
* If any are found, will log them and cancel.
*/
void
check_for_libpq_envvars(migratorContext *ctx)
{
PQconninfoOption *option;
PQconninfoOption *start;
bool found = false;
/* Get valid libpq env vars from the PQconndefaults function */
start = option = PQconndefaults();
while (option->keyword != NULL)
{
const char *value;
if (option->envvar && (value = getenv(option->envvar)) && strlen(value) > 0)
{
found = true;
pg_log(ctx, PG_WARNING,
"libpq env var %-20s is currently set to: %s\n", option->envvar, value);
}
option++;
}
/* Free the memory that libpq allocated on our behalf */
PQconninfoFree(start);
if (found)
pg_log(ctx, PG_FATAL,
"libpq env vars have been found and listed above, please unset them for pg_upgrade\n");
}
/*
* tablespace.c
*
* tablespace functions
*/
#include "pg_upgrade.h"
static void get_tablespace_paths(migratorContext *ctx);
static void set_tablespace_directory_suffix(migratorContext *ctx,
Cluster whichCluster);
void
init_tablespaces(migratorContext *ctx)
{
get_tablespace_paths(ctx);
set_tablespace_directory_suffix(ctx, CLUSTER_OLD);
set_tablespace_directory_suffix(ctx, CLUSTER_NEW);
if (ctx->num_tablespaces > 0 &&
strcmp(ctx->old.tablespace_suffix, ctx->new.tablespace_suffix) == 0)
pg_log(ctx, PG_FATAL,
"Cannot migrate to/from the same system catalog version when\n"
"using tablespaces.\n");
}
/*
* get_tablespace_paths()
*
* Scans pg_tablespace and returns a malloc'ed array of all tablespace
* paths. Its the caller's responsibility to free the array.
*/
static void
get_tablespace_paths(migratorContext *ctx)
{
PGconn *conn = connectToServer(ctx, "template1", CLUSTER_OLD);
PGresult *res;
int ntups;
int tblnum;
int i_spclocation;
res = executeQueryOrDie(ctx, conn,
"SELECT spclocation "
"FROM pg_catalog.pg_tablespace "
"WHERE spcname != 'pg_default' AND "
" spcname != 'pg_global'");
ctx->num_tablespaces = ntups = PQntuples(res);
ctx->tablespaces = (char **) pg_malloc(ctx, ntups * sizeof(char *));
i_spclocation = PQfnumber(res, "spclocation");
for (tblnum = 0; tblnum < ntups; tblnum++)
ctx->tablespaces[tblnum] = pg_strdup(ctx,
PQgetvalue(res, tblnum, i_spclocation));
PQclear(res);
PQfinish(conn);
return;
}
static void
set_tablespace_directory_suffix(migratorContext *ctx, Cluster whichCluster)
{
ClusterInfo *cluster = (whichCluster == CLUSTER_OLD) ? &ctx->old : &ctx->new;
if (GET_MAJOR_VERSION(cluster->major_version) <= 804)
cluster->tablespace_suffix = pg_strdup(ctx, "");
else
{
/* This cluster has a version-specific subdirectory */
cluster->tablespace_suffix = pg_malloc(ctx, 4 + strlen(cluster->major_version_str) +
10 /* OIDCHARS */ + 1);
/* The leading slash is needed to start a new directory. */
sprintf(cluster->tablespace_suffix, "/PG_%s_%d", cluster->major_version_str,
cluster->controldata.cat_ver);
}
}
This diff is collapsed.
/*
* version.c
*
* Postgres-version-specific routines
*/
#include "pg_upgrade.h"
#include "access/transam.h"
/*
* new_9_0_populate_pg_largeobject_metadata()
* new >= 9.0, old <= 8.4
* 9.0 has a new pg_largeobject permission table
*/
void
new_9_0_populate_pg_largeobject_metadata(migratorContext *ctx, bool check_mode,
Cluster whichCluster)
{
ClusterInfo *active_cluster = (whichCluster == CLUSTER_OLD) ?
&ctx->old : &ctx->new;
int dbnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status(ctx, "Checking for large objects");
snprintf(output_path, sizeof(output_path), "%s/pg_largeobject.sql",
ctx->output_dir);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
PGresult *res;
int i_count;
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, whichCluster);
/* find if there are any large objects */
res = executeQueryOrDie(ctx, conn,
"SELECT count(*) "
"FROM pg_catalog.pg_largeobject ");
i_count = PQfnumber(res, "count");
if (atoi(PQgetvalue(res, 0, i_count)) != 0)
{
found = true;
if (!check_mode)
{
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", output_path);
fprintf(script, "\\connect %s\n",
quote_identifier(ctx, active_db->db_name));
fprintf(script,
"SELECT pg_catalog.lo_create(t.loid)\n"
"FROM (SELECT DISTINCT loid FROM pg_catalog.pg_largeobject) AS t;\n");
}
}
PQclear(res);
PQfinish(conn);
}
if (found)
{
if (!check_mode)
fclose(script);
report_status(ctx, PG_WARNING, "warning");
if (check_mode)
pg_log(ctx, PG_WARNING, "\n"
"| Your installation contains large objects.\n"
"| The new database has an additional large object\n"
"| permission table. After migration, you will be\n"
"| given a command to populate the pg_largeobject\n"
"| permission table with default permissions.\n\n");
else
pg_log(ctx, PG_WARNING, "\n"
"| Your installation contains large objects.\n"
"| The new database has an additional large object\n"
"| permission table so default permissions must be\n"
"| defined for all large objects. The file:\n"
"| \t%s\n"
"| when executed by psql by the database super-user\n"
"| will define the default permissions.\n\n",
output_path);
}
else
check_ok(ctx);
}
This diff is collapsed.
<!-- $PostgreSQL: pgsql/doc/src/sgml/contrib.sgml,v 1.16 2010/01/28 23:59:52 adunstan Exp $ --> <!-- $PostgreSQL: pgsql/doc/src/sgml/contrib.sgml,v 1.17 2010/05/12 02:19:11 momjian Exp $ -->
<appendix id="contrib"> <appendix id="contrib">
<title>Additional Supplied Modules</title> <title>Additional Supplied Modules</title>
...@@ -110,6 +110,7 @@ psql -d dbname -f <replaceable>SHAREDIR</>/contrib/<replaceable>module</>.sql ...@@ -110,6 +110,7 @@ psql -d dbname -f <replaceable>SHAREDIR</>/contrib/<replaceable>module</>.sql
&pgstatstatements; &pgstatstatements;
&pgstattuple; &pgstattuple;
&pgtrgm; &pgtrgm;
&pgupgrade;
&seg; &seg;
&contrib-spi; &contrib-spi;
&sslinfo; &sslinfo;
......
<!-- $PostgreSQL: pgsql/doc/src/sgml/filelist.sgml,v 1.67 2010/02/22 11:47:30 heikki Exp $ --> <!-- $PostgreSQL: pgsql/doc/src/sgml/filelist.sgml,v 1.68 2010/05/12 02:19:11 momjian Exp $ -->
<!entity history SYSTEM "history.sgml"> <!entity history SYSTEM "history.sgml">
<!entity info SYSTEM "info.sgml"> <!entity info SYSTEM "info.sgml">
...@@ -122,6 +122,7 @@ ...@@ -122,6 +122,7 @@
<!entity pgstatstatements SYSTEM "pgstatstatements.sgml"> <!entity pgstatstatements SYSTEM "pgstatstatements.sgml">
<!entity pgstattuple SYSTEM "pgstattuple.sgml"> <!entity pgstattuple SYSTEM "pgstattuple.sgml">
<!entity pgtrgm SYSTEM "pgtrgm.sgml"> <!entity pgtrgm SYSTEM "pgtrgm.sgml">
<!entity pgupgrade SYSTEM "pgupgrade.sgml">
<!entity seg SYSTEM "seg.sgml"> <!entity seg SYSTEM "seg.sgml">
<!entity contrib-spi SYSTEM "contrib-spi.sgml"> <!entity contrib-spi SYSTEM "contrib-spi.sgml">
<!entity sslinfo SYSTEM "sslinfo.sgml"> <!entity sslinfo SYSTEM "sslinfo.sgml">
......
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment