Commit c2e9b2f2 authored by Bruce Momjian's avatar Bruce Momjian

Add pg_upgrade to /contrib; will be in 9.0 beta2.

Add documentation.

Supports migration from PG 8.3 and 8.4.
parent 28e17422
#
# Makefile for pg_upgrade
#
# targets: all, clean, install, uninstall
#
# This Makefile generates an executable and a shared object file
#
PROGRAM = pg_upgrade
OBJS = check.o controldata.o dump.o exec.o file.o function.o info.o \
option.o page.o pg_upgrade.o relfilenode.o server.o \
tablespace.o util.o version.o version_old_8_3.o $(WIN32RES)
PG_CPPFLAGS = -DFRONTEND -DDLSUFFIX=\"$(DLSUFFIX)\" -I$(srcdir) -I$(libpq_srcdir)
PG_LIBS = $(libpq_pgport)
PGFILEDESC = "pg_upgrade - In-Place Binary Upgrade Utility"
PGAPPICON = win32
MODULES = pg_upgrade_sysoids
ifdef USE_PGXS
PG_CONFIG = pg_config
PGXS := $(shell $(PG_CONFIG) --pgxs)
include $(PGXS)
else
subdir = contrib/pg_upgrade
top_builddir = ../..
include $(top_builddir)/src/Makefile.global
include $(top_srcdir)/contrib/contrib-global.mk
endif
/*
* check.c
*
* server checks and output routines
*/
#include "pg_upgrade.h"
static void set_locale_and_encoding(migratorContext *ctx, Cluster whichCluster);
static void check_new_db_is_empty(migratorContext *ctx);
static void check_locale_and_encoding(migratorContext *ctx, ControlData *oldctrl,
ControlData *newctrl);
void
output_check_banner(migratorContext *ctx, bool *live_check)
{
if (ctx->check && is_server_running(ctx, ctx->old.pgdata))
{
*live_check = true;
if (ctx->old.port == ctx->new.port)
pg_log(ctx, PG_FATAL, "When checking a live server, "
"the old and new port numbers must be different.\n");
pg_log(ctx, PG_REPORT, "PerForming Consistency Checks on Old Live Server\n");
pg_log(ctx, PG_REPORT, "------------------------------------------------\n");
}
else
{
pg_log(ctx, PG_REPORT, "Performing Consistency Checks\n");
pg_log(ctx, PG_REPORT, "-----------------------------\n");
}
}
void
check_old_cluster(migratorContext *ctx, bool live_check,
char **sequence_script_file_name)
{
/* -- OLD -- */
if (!live_check)
start_postmaster(ctx, CLUSTER_OLD, false);
set_locale_and_encoding(ctx, CLUSTER_OLD);
get_pg_database_relfilenode(ctx, CLUSTER_OLD);
/* Extract a list of databases and tables from the old cluster */
get_db_and_rel_infos(ctx, &ctx->old.dbarr, CLUSTER_OLD);
init_tablespaces(ctx);
get_loadable_libraries(ctx);
/*
* Check for various failure cases
*/
old_8_3_check_for_isn_and_int8_passing_mismatch(ctx, CLUSTER_OLD);
/* old = PG 8.3 checks? */
if (GET_MAJOR_VERSION(ctx->old.major_version) <= 803)
{
old_8_3_check_for_name_data_type_usage(ctx, CLUSTER_OLD);
old_8_3_check_for_tsquery_usage(ctx, CLUSTER_OLD);
if (ctx->check)
{
old_8_3_rebuild_tsvector_tables(ctx, true, CLUSTER_OLD);
old_8_3_invalidate_hash_gin_indexes(ctx, true, CLUSTER_OLD);
old_8_3_invalidate_bpchar_pattern_ops_indexes(ctx, true, CLUSTER_OLD);
}
else
/*
* While we have the old server running, create the script to
* properly restore its sequence values but we report this at the
* end.
*/
*sequence_script_file_name =
old_8_3_create_sequence_script(ctx, CLUSTER_OLD);
}
/* Pre-PG 9.0 had no large object permissions */
if (GET_MAJOR_VERSION(ctx->old.major_version) <= 804)
new_9_0_populate_pg_largeobject_metadata(ctx, true, CLUSTER_OLD);
/*
* While not a check option, we do this now because this is the only time
* the old server is running.
*/
if (!ctx->check)
{
generate_old_dump(ctx);
split_old_dump(ctx);
}
if (!live_check)
stop_postmaster(ctx, false, false);
}
void
check_new_cluster(migratorContext *ctx)
{
set_locale_and_encoding(ctx, CLUSTER_NEW);
check_new_db_is_empty(ctx);
check_loadable_libraries(ctx);
check_locale_and_encoding(ctx, &ctx->old.controldata, &ctx->new.controldata);
if (ctx->transfer_mode == TRANSFER_MODE_LINK)
check_hard_link(ctx);
}
void
report_clusters_compatible(migratorContext *ctx)
{
if (ctx->check)
{
pg_log(ctx, PG_REPORT, "\n*Clusters are compatible*\n");
/* stops new cluster */
stop_postmaster(ctx, false, false);
exit_nicely(ctx, false);
}
pg_log(ctx, PG_REPORT, "\n"
"| If pg_upgrade fails after this point, you must\n"
"| re-initdb the new cluster before continuing.\n"
"| You will also need to remove the \".old\" suffix\n"
"| from %s/global/pg_control.old.\n", ctx->old.pgdata);
}
void
issue_warnings(migratorContext *ctx, char *sequence_script_file_name)
{
/* old = PG 8.3 warnings? */
if (GET_MAJOR_VERSION(ctx->old.major_version) <= 803)
{
start_postmaster(ctx, CLUSTER_NEW, true);
/* restore proper sequence values using file created from old server */
if (sequence_script_file_name)
{
prep_status(ctx, "Adjusting sequences");
exec_prog(ctx, true,
SYSTEMQUOTE "\"%s/%s\" --set ON_ERROR_STOP=on --port %d "
"-f \"%s\" --dbname template1 >> \"%s\"" SYSTEMQUOTE,
ctx->new.bindir, ctx->new.psql_exe, ctx->new.port,
sequence_script_file_name, ctx->logfile);
unlink(sequence_script_file_name);
pg_free(sequence_script_file_name);
check_ok(ctx);
}
old_8_3_rebuild_tsvector_tables(ctx, false, CLUSTER_NEW);
old_8_3_invalidate_hash_gin_indexes(ctx, false, CLUSTER_NEW);
old_8_3_invalidate_bpchar_pattern_ops_indexes(ctx, false, CLUSTER_NEW);
stop_postmaster(ctx, false, true);
}
/* Create dummy large object permissions for old < PG 9.0? */
if (GET_MAJOR_VERSION(ctx->old.major_version) <= 804)
{
start_postmaster(ctx, CLUSTER_NEW, true);
new_9_0_populate_pg_largeobject_metadata(ctx, false, CLUSTER_NEW);
stop_postmaster(ctx, false, true);
}
}
void
output_completion_banner(migratorContext *ctx, char *deletion_script_file_name)
{
/* Did we migrate the free space files? */
if (GET_MAJOR_VERSION(ctx->old.major_version) >= 804)
pg_log(ctx, PG_REPORT,
"| Optimizer statistics is not transferred by pg_upgrade\n"
"| so consider running:\n"
"| \tvacuumdb --all --analyze-only\n"
"| on the newly-upgraded cluster.\n\n");
else
pg_log(ctx, PG_REPORT,
"| Optimizer statistics and free space information\n"
"| are not transferred by pg_upgrade so consider\n"
"| running:\n"
"| \tvacuumdb --all --analyze\n"
"| on the newly-upgraded cluster.\n\n");
pg_log(ctx, PG_REPORT,
"| Running this script will delete the old cluster's data files:\n"
"| \t%s\n",
deletion_script_file_name);
}
void
check_cluster_versions(migratorContext *ctx)
{
/* get old and new cluster versions */
ctx->old.major_version = get_major_server_version(ctx, &ctx->old.major_version_str, CLUSTER_OLD);
ctx->new.major_version = get_major_server_version(ctx, &ctx->new.major_version_str, CLUSTER_NEW);
/* We allow migration from/to the same major version for beta upgrades */
if (GET_MAJOR_VERSION(ctx->old.major_version) < 803)
pg_log(ctx, PG_FATAL, "This utility can only upgrade from PostgreSQL version 8.3 and later.\n");
/* Only current PG version is supported as a target */
if (GET_MAJOR_VERSION(ctx->new.major_version) != GET_MAJOR_VERSION(PG_VERSION_NUM))
pg_log(ctx, PG_FATAL, "This utility can only upgrade to PostgreSQL version %s.\n",
PG_MAJORVERSION);
/*
* We can't allow downgrading because we use the target pg_dumpall, and
* pg_dumpall cannot operate on new datbase versions, only older versions.
*/
if (ctx->old.major_version > ctx->new.major_version)
pg_log(ctx, PG_FATAL, "This utility cannot be used to downgrade to older major PostgreSQL versions.\n");
}
void
check_cluster_compatibility(migratorContext *ctx, bool live_check)
{
char libfile[MAXPGPATH];
FILE *lib_test;
/*
* Test pg_upgrade_sysoids.so is in the proper place. We cannot copy it
* ourselves because install directories are typically root-owned.
*/
snprintf(libfile, sizeof(libfile), "%s/pg_upgrade_sysoids%s", ctx->new.libpath,
DLSUFFIX);
if ((lib_test = fopen(libfile, "r")) == NULL)
pg_log(ctx, PG_FATAL,
"\npg_upgrade%s must be created and installed in %s\n", DLSUFFIX, libfile);
else
fclose(lib_test);
/* get/check pg_control data of servers */
get_control_data(ctx, &ctx->old, live_check);
get_control_data(ctx, &ctx->new, false);
check_control_data(ctx, &ctx->old.controldata, &ctx->new.controldata);
/* Is it 9.0 but without tablespace directories? */
if (GET_MAJOR_VERSION(ctx->new.major_version) == 900 &&
ctx->new.controldata.cat_ver < TABLE_SPACE_SUBDIRS)
pg_log(ctx, PG_FATAL, "This utility can only upgrade to PostgreSQL version 9.0 after 2010-01-11\n"
"because of backend API changes made during development.\n");
}
/*
* set_locale_and_encoding()
*
* query the database to get the template0 locale
*/
static void
set_locale_and_encoding(migratorContext *ctx, Cluster whichCluster)
{
PGconn *conn;
PGresult *res;
int i_encoding;
ControlData *ctrl = (whichCluster == CLUSTER_OLD) ?
&ctx->old.controldata : &ctx->new.controldata;
int cluster_version = (whichCluster == CLUSTER_OLD) ?
ctx->old.major_version : ctx->new.major_version;
conn = connectToServer(ctx, "template1", whichCluster);
/* for pg < 80400, we got the values from pg_controldata */
if (cluster_version >= 80400)
{
int i_datcollate;
int i_datctype;
res = executeQueryOrDie(ctx, conn,
"SELECT datcollate, datctype "
"FROM pg_catalog.pg_database "
"WHERE datname = 'template0' ");
assert(PQntuples(res) == 1);
i_datcollate = PQfnumber(res, "datcollate");
i_datctype = PQfnumber(res, "datctype");
ctrl->lc_collate = pg_strdup(ctx, PQgetvalue(res, 0, i_datcollate));
ctrl->lc_ctype = pg_strdup(ctx, PQgetvalue(res, 0, i_datctype));
PQclear(res);
}
res = executeQueryOrDie(ctx, conn,
"SELECT pg_catalog.pg_encoding_to_char(encoding) "
"FROM pg_catalog.pg_database "
"WHERE datname = 'template0' ");
assert(PQntuples(res) == 1);
i_encoding = PQfnumber(res, "pg_encoding_to_char");
ctrl->encoding = pg_strdup(ctx, PQgetvalue(res, 0, i_encoding));
PQclear(res);
PQfinish(conn);
}
/*
* check_locale_and_encoding()
*
* locale is not in pg_controldata in 8.4 and later so
* we probably had to get via a database query.
*/
static void
check_locale_and_encoding(migratorContext *ctx, ControlData *oldctrl,
ControlData *newctrl)
{
if (strcmp(oldctrl->lc_collate, newctrl->lc_collate) != 0)
pg_log(ctx, PG_FATAL,
"old and new cluster lc_collate values do not match\n");
if (strcmp(oldctrl->lc_ctype, newctrl->lc_ctype) != 0)
pg_log(ctx, PG_FATAL,
"old and new cluster lc_ctype values do not match\n");
if (strcmp(oldctrl->encoding, newctrl->encoding) != 0)
pg_log(ctx, PG_FATAL,
"old and new cluster encoding values do not match\n");
}
static void
check_new_db_is_empty(migratorContext *ctx)
{
int dbnum;
bool found = false;
get_db_and_rel_infos(ctx, &ctx->new.dbarr, CLUSTER_NEW);
for (dbnum = 0; dbnum < ctx->new.dbarr.ndbs; dbnum++)
{
int relnum;
RelInfoArr *rel_arr = &ctx->new.dbarr.dbs[dbnum].rel_arr;
for (relnum = 0; relnum < rel_arr->nrels;
relnum++)
{
/* pg_largeobject and its index should be skipped */
if (strcmp(rel_arr->rels[relnum].nspname, "pg_catalog") != 0)
{
found = true;
break;
}
}
}
dbarr_free(&ctx->new.dbarr);
if (found)
pg_log(ctx, PG_FATAL, "New cluster is not empty; exiting\n");
}
/*
* create_script_for_old_cluster_deletion()
*
* This is particularly useful for tablespace deletion.
*/
void
create_script_for_old_cluster_deletion(migratorContext *ctx,
char **deletion_script_file_name)
{
FILE *script = NULL;
int tblnum;
*deletion_script_file_name = pg_malloc(ctx, MAXPGPATH);
prep_status(ctx, "Creating script to delete old cluster");
snprintf(*deletion_script_file_name, MAXPGPATH, "%s/delete_old_cluster.%s",
ctx->output_dir, EXEC_EXT);
if ((script = fopen(*deletion_script_file_name, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n",
*deletion_script_file_name);
#ifndef WIN32
/* add shebang header */
fprintf(script, "#!/bin/sh\n\n");
#endif
/* delete old cluster's default tablespace */
fprintf(script, RMDIR_CMD " %s\n", ctx->old.pgdata);
/* delete old cluster's alternate tablespaces */
for (tblnum = 0; tblnum < ctx->num_tablespaces; tblnum++)
{
/*
* Do the old cluster's per-database directories share a directory
* with a new version-specific tablespace?
*/
if (strlen(ctx->old.tablespace_suffix) == 0)
{
/* delete per-database directories */
int dbnum;
fprintf(script, "\n");
for (dbnum = 0; dbnum < ctx->new.dbarr.ndbs; dbnum++)
{
fprintf(script, RMDIR_CMD " %s%s/%d\n",
ctx->tablespaces[tblnum], ctx->old.tablespace_suffix,
ctx->old.dbarr.dbs[dbnum].db_oid);
}
}
else
/*
* Simply delete the tablespace directory, which might be ".old"
* or a version-specific subdirectory.
*/
fprintf(script, RMDIR_CMD " %s%s\n",
ctx->tablespaces[tblnum], ctx->old.tablespace_suffix);
}
fclose(script);
if (chmod(*deletion_script_file_name, S_IRWXU) != 0)
pg_log(ctx, PG_FATAL, "Could not add execute permission to file: %s\n",
*deletion_script_file_name);
check_ok(ctx);
}
/*
* controldata.c
*
* controldata functions
*/
#include "pg_upgrade.h"
#include <ctype.h>
#include <stdlib.h>
#ifdef EDB_NATIVE_LANG
#include "access/tuptoaster.h"
#endif
/*
* get_control_data()
*
* gets pg_control information in "ctrl". Assumes that bindir and
* datadir are valid absolute paths to postgresql bin and pgdata
* directories respectively *and* pg_resetxlog is version compatible
* with datadir. The main purpose of this function is to get pg_control
* data in a version independent manner.
*
* The approach taken here is to invoke pg_resetxlog with -n option
* and then pipe its output. With little string parsing we get the
* pg_control data. pg_resetxlog cannot be run while the server is running
* so we use pg_controldata; pg_controldata doesn't provide all the fields
* we need to actually perform the migration, but it provides enough for
* check mode. We do not implement pg_resetxlog -n because it is hard to
* return valid xid data for a running server.
*/
void
get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check)
{
char cmd[MAXPGPATH];
char bufin[MAX_STRING];
FILE *output;
char *p;
bool got_xid = false;
bool got_oid = false;
bool got_log_id = false;
bool got_log_seg = false;
bool got_tli = false;
bool got_align = false;
bool got_blocksz = false;
bool got_largesz = false;
bool got_walsz = false;
bool got_walseg = false;
bool got_ident = false;
bool got_index = false;
bool got_toast = false;
bool got_date_is_int = false;
bool got_float8_pass_by_value = false;
char *lang = NULL;
/*
* Because we test the pg_resetxlog output strings, it has to be in
* English.
*/
if (getenv("LANG"))
lang = pg_strdup(ctx, getenv("LANG"));
#ifndef WIN32
putenv(pg_strdup(ctx, "LANG=C"));
#else
SetEnvironmentVariableA("LANG", "C");
#endif
sprintf(cmd, SYSTEMQUOTE "\"%s/%s \"%s\"" SYSTEMQUOTE,
cluster->bindir,
live_check ? "pg_controldata\"" : "pg_resetxlog\" -n",
cluster->pgdata);
fflush(stdout);
fflush(stderr);
if ((output = popen(cmd, "r")) == NULL)
pg_log(ctx, PG_FATAL, "Could not get control data: %s\n",
getErrorText(errno));
/* Only pre-8.4 has these so if they are not set below we will check later */
cluster->controldata.lc_collate = NULL;
cluster->controldata.lc_ctype = NULL;
/* Only in <= 8.3 */
if (GET_MAJOR_VERSION(cluster->major_version) <= 803)
{
cluster->controldata.float8_pass_by_value = false;
got_float8_pass_by_value = true;
}
#ifdef EDB_NATIVE_LANG
/* EDB AS 8.3 is an 8.2 code base */
if (cluster->is_edb_as && GET_MAJOR_VERSION(cluster->major_version) <= 803)
{
cluster->controldata.toast = TOAST_MAX_CHUNK_SIZE;
got_toast = true;
}
#endif
/* we have the result of cmd in "output". so parse it line by line now */
while (fgets(bufin, sizeof(bufin), output))
{
if (ctx->debug)
fprintf(ctx->debug_fd, bufin);
#ifdef WIN32
/*
* Due to an installer bug, LANG=C doesn't work for PG 8.3.3, but does
* work 8.2.6 and 8.3.7, so check for non-ASCII output and suggest a
* minor upgrade.
*/
if (GET_MAJOR_VERSION(cluster->major_version) <= 803)
{
for (p = bufin; *p; p++)
if (!isascii(*p))
pg_log(ctx, PG_FATAL,
"The 8.3 cluster's pg_controldata is incapable of outputting ASCII, even\n"
"with LANG=C. You must upgrade this cluster to a newer version of Postgres\n"
"8.3 to fix this bug. Postgres 8.3.7 and later are known to work properly.\n");
}
#endif
if ((p = strstr(bufin, "pg_control version number:")) != NULL)
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: pg_resetxlog problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.ctrl_ver = (uint32) atol(p);
}
else if ((p = strstr(bufin, "Catalog version number:")) != NULL)
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.cat_ver = (uint32) atol(p);
}
else if ((p = strstr(bufin, "First log file ID after reset:")) != NULL ||
(cluster->is_edb_as && GET_MAJOR_VERSION(cluster->major_version) <= 803 &&
(p = strstr(bufin, "Current log file ID:")) != NULL))
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.logid = (uint32) atol(p);
got_log_id = true;
}
else if ((p = strstr(bufin, "First log file segment after reset:")) != NULL ||
(cluster->is_edb_as && GET_MAJOR_VERSION(cluster->major_version) <= 803 &&
(p = strstr(bufin, "Next log file segment:")) != NULL))
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.nxtlogseg = (uint32) atol(p);
got_log_seg = true;
}
else if ((p = strstr(bufin, "Latest checkpoint's TimeLineID:")) != NULL)
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.chkpnt_tli = (uint32) atol(p);
got_tli = true;
}
else if ((p = strstr(bufin, "Latest checkpoint's NextXID:")) != NULL)
{
char *op = strchr(p, '/');
if (op == NULL)
op = strchr(p, ':');
if (op == NULL || strlen(op) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
op++; /* removing ':' char */
cluster->controldata.chkpnt_nxtxid = (uint32) atol(op);
got_xid = true;
}
else if ((p = strstr(bufin, "Latest checkpoint's NextOID:")) != NULL)
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.chkpnt_nxtoid = (uint32) atol(p);
got_oid = true;
}
else if ((p = strstr(bufin, "Maximum data alignment:")) != NULL)
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.align = (uint32) atol(p);
got_align = true;
}
else if ((p = strstr(bufin, "Database block size:")) != NULL)
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.blocksz = (uint32) atol(p);
got_blocksz = true;
}
else if ((p = strstr(bufin, "Blocks per segment of large relation:")) != NULL)
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.largesz = (uint32) atol(p);
got_largesz = true;
}
else if ((p = strstr(bufin, "WAL block size:")) != NULL)
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.walsz = (uint32) atol(p);
got_walsz = true;
}
else if ((p = strstr(bufin, "Bytes per WAL segment:")) != NULL)
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.walseg = (uint32) atol(p);
got_walseg = true;
}
else if ((p = strstr(bufin, "Maximum length of identifiers:")) != NULL)
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.ident = (uint32) atol(p);
got_ident = true;
}
else if ((p = strstr(bufin, "Maximum columns in an index:")) != NULL)
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.index = (uint32) atol(p);
got_index = true;
}
else if ((p = strstr(bufin, "Maximum size of a TOAST chunk:")) != NULL)
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.toast = (uint32) atol(p);
got_toast = true;
}
else if ((p = strstr(bufin, "Date/time type storage:")) != NULL)
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
cluster->controldata.date_is_int = strstr(p, "64-bit integers") != NULL;
got_date_is_int = true;
}
else if ((p = strstr(bufin, "Float8 argument passing:")) != NULL)
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
/* used later for /contrib check */
cluster->controldata.float8_pass_by_value = strstr(p, "by value") != NULL;
got_float8_pass_by_value = true;
}
/* In pre-8.4 only */
else if ((p = strstr(bufin, "LC_COLLATE:")) != NULL)
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
/* skip leading spaces and remove trailing newline */
p += strspn(p, " ");
if (strlen(p) > 0 && *(p + strlen(p) - 1) == '\n')
*(p + strlen(p) - 1) = '\0';
cluster->controldata.lc_collate = pg_strdup(ctx, p);
}
/* In pre-8.4 only */
else if ((p = strstr(bufin, "LC_CTYPE:")) != NULL)
{
p = strchr(p, ':');
if (p == NULL || strlen(p) <= 1)
pg_log(ctx, PG_FATAL, "%d: controldata retrieval problem\n", __LINE__);
p++; /* removing ':' char */
/* skip leading spaces and remove trailing newline */
p += strspn(p, " ");
if (strlen(p) > 0 && *(p + strlen(p) - 1) == '\n')
*(p + strlen(p) - 1) = '\0';
cluster->controldata.lc_ctype = pg_strdup(ctx, p);
}
}
if (output)
pclose(output);
/* restore LANG */
if (lang)
{
#ifndef WIN32
char *envstr = (char *) pg_malloc(ctx, strlen(lang) + 6);
sprintf(envstr, "LANG=%s", lang);
putenv(envstr);
#else
SetEnvironmentVariableA("LANG", lang);
#endif
pg_free(lang);
}
else
{
#ifndef WIN32
unsetenv("LANG");
#else
SetEnvironmentVariableA("LANG", "");
#endif
}
/* verify that we got all the mandatory pg_control data */
if (!got_xid || !got_oid ||
(!live_check && !got_log_id) ||
(!live_check && !got_log_seg) ||
!got_tli ||
!got_align || !got_blocksz || !got_largesz || !got_walsz ||
!got_walseg || !got_ident || !got_index || !got_toast ||
!got_date_is_int || !got_float8_pass_by_value)
{
pg_log(ctx, PG_REPORT,
"Some required control information is missing; cannot find:\n");
if (!got_xid)
pg_log(ctx, PG_REPORT, " checkpoint next XID\n");
if (!got_oid)
pg_log(ctx, PG_REPORT, " latest checkpoint next OID\n");
if (!live_check && !got_log_id)
pg_log(ctx, PG_REPORT, " first log file ID after reset\n");
if (!live_check && !got_log_seg)
pg_log(ctx, PG_REPORT, " first log file segment after reset\n");
if (!got_tli)
pg_log(ctx, PG_REPORT, " latest checkpoint timeline ID\n");
if (!got_align)
pg_log(ctx, PG_REPORT, " maximum alignment\n");
if (!got_blocksz)
pg_log(ctx, PG_REPORT, " block size\n");
if (!got_largesz)
pg_log(ctx, PG_REPORT, " large relation segment size\n");
if (!got_walsz)
pg_log(ctx, PG_REPORT, " WAL block size\n");
if (!got_walseg)
pg_log(ctx, PG_REPORT, " WAL segment size\n");
if (!got_ident)
pg_log(ctx, PG_REPORT, " maximum identifier length\n");
if (!got_index)
pg_log(ctx, PG_REPORT, " maximum number of indexed columns\n");
if (!got_toast)
pg_log(ctx, PG_REPORT, " maximum TOAST chunk size\n");
if (!got_date_is_int)
pg_log(ctx, PG_REPORT, " dates/times are integers?\n");
/* value added in Postgres 8.4 */
if (!got_float8_pass_by_value)
pg_log(ctx, PG_REPORT, " float8 argument passing method\n");
pg_log(ctx, PG_FATAL,
"Unable to continue without required control information, terminating\n");
}
}
/*
* check_control_data()
*
* check to make sure the control data settings are compatible
*/
void
check_control_data(migratorContext *ctx, ControlData *oldctrl,
ControlData *newctrl)
{
if (oldctrl->align == 0 || oldctrl->align != newctrl->align)
pg_log(ctx, PG_FATAL,
"old and new pg_controldata alignments are invalid or do not match\n");
if (oldctrl->blocksz == 0 || oldctrl->blocksz != newctrl->blocksz)
pg_log(ctx, PG_FATAL,
"old and new pg_controldata block sizes are invalid or do not match\n");
if (oldctrl->largesz == 0 || oldctrl->largesz != newctrl->largesz)
pg_log(ctx, PG_FATAL,
"old and new pg_controldata maximum relation segement sizes are invalid or do not match\n");
if (oldctrl->walsz == 0 || oldctrl->walsz != newctrl->walsz)
pg_log(ctx, PG_FATAL,
"old and new pg_controldata WAL block sizes are invalid or do not match\n");
if (oldctrl->walseg == 0 || oldctrl->walseg != newctrl->walseg)
pg_log(ctx, PG_FATAL,
"old and new pg_controldata WAL segment sizes are invalid or do not match\n");
if (oldctrl->ident == 0 || oldctrl->ident != newctrl->ident)
pg_log(ctx, PG_FATAL,
"old and new pg_controldata maximum identifier lengths are invalid or do not match\n");
if (oldctrl->index == 0 || oldctrl->index != newctrl->index)
pg_log(ctx, PG_FATAL,
"old and new pg_controldata maximum indexed columns are invalid or do not match\n");
if (oldctrl->toast == 0 || oldctrl->toast != newctrl->toast)
pg_log(ctx, PG_FATAL,
"old and new pg_controldata maximum TOAST chunk sizes are invalid or do not match\n");
if (oldctrl->date_is_int != newctrl->date_is_int)
{
pg_log(ctx, PG_WARNING,
"\nOld and new pg_controldata date/time storage types do not match.\n");
/*
* This is a common 8.3 -> 8.4 migration problem, so we are more
* verboase
*/
pg_log(ctx, PG_FATAL,
"You will need to rebuild the new server with configure\n"
"--disable-integer-datetimes or get server binaries built\n"
"with those options.\n");
}
}
void
rename_old_pg_control(migratorContext *ctx)
{
char old_path[MAXPGPATH],
new_path[MAXPGPATH];
prep_status(ctx, "Adding \".old\" suffix to old global/pg_control");
snprintf(old_path, sizeof(old_path), "%s/global/pg_control", ctx->old.pgdata);
snprintf(new_path, sizeof(new_path), "%s/global/pg_control.old", ctx->old.pgdata);
if (pg_mv_file(old_path, new_path) != 0)
pg_log(ctx, PG_FATAL, "Unable to rename %s to %s.\n", old_path, new_path);
check_ok(ctx);
}
/*
* dump.c
*
* dump functions
*/
#include "pg_upgrade.h"
void
generate_old_dump(migratorContext *ctx)
{
/* run new pg_dumpall binary */
prep_status(ctx, "Creating catalog dump");
/*
* --binary-upgrade records the width of dropped columns in pg_class, and
* restores the frozenid's for databases and relations.
*/
exec_prog(ctx, true,
SYSTEMQUOTE "\"%s/pg_dumpall\" --port %d --schema-only "
"--binary-upgrade > \"%s/" ALL_DUMP_FILE "\"" SYSTEMQUOTE,
ctx->new.bindir, ctx->old.port, ctx->output_dir);
check_ok(ctx);
}
/*
* split_old_dump
*
* This function splits pg_dumpall output into global values and
* database creation, and per-db schemas. This allows us to create
* the toast place holders between restoring these two parts of the
* dump. We split on the first "\connect " after a CREATE ROLE
* username match; this is where the per-db restore starts.
*
* We suppress recreation of our own username so we don't generate
* an error during restore
*/
void
split_old_dump(migratorContext *ctx)
{
FILE *all_dump,
*globals_dump,
*db_dump;
FILE *current_output;
char line[LINE_ALLOC];
bool start_of_line = true;
char create_role_str[MAX_STRING];
char create_role_str_quote[MAX_STRING];
char filename[MAXPGPATH];
bool suppressed_username = false;
snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, ALL_DUMP_FILE);
if ((all_dump = fopen(filename, "r")) == NULL)
pg_log(ctx, PG_FATAL, "Cannot open dump file %s\n", filename);
snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, GLOBALS_DUMP_FILE);
if ((globals_dump = fopen(filename, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Cannot write to dump file %s\n", filename);
snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, DB_DUMP_FILE);
if ((db_dump = fopen(filename, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Cannot write to dump file %s\n", filename);
current_output = globals_dump;
/* patterns used to prevent our own username from being recreated */
snprintf(create_role_str, sizeof(create_role_str),
"CREATE ROLE %s;", ctx->user);
snprintf(create_role_str_quote, sizeof(create_role_str_quote),
"CREATE ROLE %s;", quote_identifier(ctx, ctx->user));
while (fgets(line, sizeof(line), all_dump) != NULL)
{
/* switch to db_dump file output? */
if (current_output == globals_dump && start_of_line &&
suppressed_username &&
strncmp(line, "\\connect ", strlen("\\connect ")) == 0)
current_output = db_dump;
/* output unless we are recreating our own username */
if (current_output != globals_dump || !start_of_line ||
(strncmp(line, create_role_str, strlen(create_role_str)) != 0 &&
strncmp(line, create_role_str_quote, strlen(create_role_str_quote)) != 0))
fputs(line, current_output);
else
suppressed_username = true;
if (strlen(line) > 0 && line[strlen(line) - 1] == '\n')
start_of_line = true;
else
start_of_line = false;
}
fclose(all_dump);
fclose(globals_dump);
fclose(db_dump);
}
/*
* exec.c
*
* execution functions
*/
#include "pg_upgrade.h"
#include <fcntl.h>
#include <grp.h>
static void checkBinDir(migratorContext *ctx, ClusterInfo *cluster);
static int check_exec(migratorContext *ctx, const char *dir, const char *cmdName,
const char *alternative);
static const char *validate_exec(const char *path);
static int check_data_dir(migratorContext *ctx, const char *pg_data);
/*
* exec_prog()
*
* Formats a command from the given argument list and executes that
* command. If the command executes, exec_prog() returns 1 otherwise
* exec_prog() logs an error message and returns 0.
*
* If throw_error is TRUE, this function will throw a PG_FATAL error
* instead of returning should an error occur.
*/
int
exec_prog(migratorContext *ctx, bool throw_error, const char *fmt,...)
{
va_list args;
int result;
char cmd[MAXPGPATH];
va_start(args, fmt);
vsnprintf(cmd, MAXPGPATH, fmt, args);
va_end(args);
pg_log(ctx, PG_INFO, "%s\n", cmd);
result = system(cmd);
if (result != 0)
{
pg_log(ctx, throw_error ? PG_FATAL : PG_INFO,
"\nThere were problems executing %s\n", cmd);
return 1;
}
return 0;
}
/*
* verify_directories()
*
* does all the hectic work of verifying directories and executables
* of old and new server.
*
* NOTE: May update the values of all parameters
*/
void
verify_directories(migratorContext *ctx)
{
prep_status(ctx, "Checking old data directory (%s)", ctx->old.pgdata);
if (check_data_dir(ctx, ctx->old.pgdata) != 0)
pg_log(ctx, PG_FATAL, "Failed\n");
checkBinDir(ctx, &ctx->old);
check_ok(ctx);
prep_status(ctx, "Checking new data directory (%s)", ctx->new.pgdata);
if (check_data_dir(ctx, ctx->new.pgdata) != 0)
pg_log(ctx, PG_FATAL, "Failed\n");
checkBinDir(ctx, &ctx->new);
check_ok(ctx);
}
/*
* checkBinDir()
*
* This function searches for the executables that we expect to find
* in the binaries directory. If we find that a required executable
* is missing (or secured against us), we display an error message and
* exit().
*/
static void
checkBinDir(migratorContext *ctx, ClusterInfo *cluster)
{
check_exec(ctx, cluster->bindir, "postgres", "edb-postgres");
check_exec(ctx, cluster->bindir, "pg_ctl", NULL);
check_exec(ctx, cluster->bindir, "pg_dumpall", NULL);
#ifdef EDB_NATIVE_LANG
/* check for edb-psql first because we need to detect EDB AS */
if (check_exec(ctx, cluster->bindir, "edb-psql", "psql") == 1)
{
cluster->psql_exe = "edb-psql";
cluster->is_edb_as = true;
}
else
#else
if (check_exec(ctx, cluster->bindir, "psql", NULL) == 1)
#endif
cluster->psql_exe = "psql";
}
/*
* is_server_running()
*
* checks whether postmaster on the given data directory is running or not.
* The check is performed by looking for the existence of postmaster.pid file.
*/
bool
is_server_running(migratorContext *ctx, const char *datadir)
{
char path[MAXPGPATH];
int fd;
snprintf(path, sizeof(path), "%s/postmaster.pid", datadir);
if ((fd = open(path, O_RDONLY)) < 0)
{
if (errno != ENOENT)
pg_log(ctx, PG_FATAL, "\ncould not open file \"%s\" for reading\n",
path);
return false;
}
close(fd);
return true;
}
/*
* check_exec()
*
* Checks whether either of the two command names (cmdName and alternative)
* appears to be an executable (in the given directory). If dir/cmdName is
* an executable, this function returns 1. If dir/alternative is an
* executable, this function returns 2. If neither of the given names is
* a valid executable, this function returns 0 to indicated failure.
*/
static int
check_exec(migratorContext *ctx, const char *dir, const char *cmdName,
const char *alternative)
{
char path[MAXPGPATH];
const char *errMsg;
snprintf(path, sizeof(path), "%s%c%s", dir, pathSeparator, cmdName);
if ((errMsg = validate_exec(path)) == NULL)
{
return 1; /* 1 -> first alternative OK */
}
else
{
if (alternative)
{
report_status(ctx, PG_WARNING, "check for %s warning: %s",
cmdName, errMsg);
if (check_exec(ctx, dir, alternative, NULL) == 1)
return 2; /* 2 -> second alternative OK */
}
else
pg_log(ctx, PG_FATAL, "check for %s failed - %s\n", cmdName, errMsg);
}
return 0; /* 0 -> neither alternative is acceptable */
}
/*
* validate_exec()
*
* validate "path" as an executable file
* returns 0 if the file is found and no error is encountered.
* -1 if the regular file "path" does not exist or cannot be executed.
* -2 if the file is otherwise valid but cannot be read.
*/
static const char *
validate_exec(const char *path)
{
struct stat buf;
#ifndef WIN32
uid_t euid;
struct group *gp;
struct passwd *pwp;
int in_grp = 0;
#else
char path_exe[MAXPGPATH + sizeof(EXE_EXT) - 1];
#endif
#ifdef WIN32
/* Win32 requires a .exe suffix for stat() */
if (strlen(path) >= strlen(EXE_EXT) &&
pg_strcasecmp(path + strlen(path) - strlen(EXE_EXT), EXE_EXT) != 0)
{
strcpy(path_exe, path);
strcat(path_exe, EXE_EXT);
path = path_exe;
}
#endif
/*
* Ensure that the file exists and is a regular file.
*/
if (stat(path, &buf) < 0)
return getErrorText(errno);
if ((buf.st_mode & S_IFMT) != S_IFREG)
return "not an executable file";
/*
* Ensure that we are using an authorized executable.
*/
/*
* Ensure that the file is both executable and readable (required for
* dynamic loading).
*/
#ifndef WIN32
euid = geteuid();
/* If owned by us, just check owner bits */
if (euid == buf.st_uid)
{
if ((buf.st_mode & S_IRUSR) == 0)
return "can't read file (permission denied)";
if ((buf.st_mode & S_IXUSR) == 0)
return "can't execute (permission denied)";
return NULL;
}
/* OK, check group bits */
pwp = getpwuid(euid); /* not thread-safe */
if (pwp)
{
if (pwp->pw_gid == buf.st_gid) /* my primary group? */
++in_grp;
else if (pwp->pw_name &&
(gp = getgrgid(buf.st_gid)) != NULL &&
/* not thread-safe */ gp->gr_mem != NULL)
{
/* try list of member groups */
int i;
for (i = 0; gp->gr_mem[i]; ++i)
{
if (!strcmp(gp->gr_mem[i], pwp->pw_name))
{
++in_grp;
break;
}
}
}
if (in_grp)
{
if ((buf.st_mode & S_IRGRP) == 0)
return "can't read file (permission denied)";
if ((buf.st_mode & S_IXGRP) == 0)
return "can't execute (permission denied)";
return NULL;
}
}
/* Check "other" bits */
if ((buf.st_mode & S_IROTH) == 0)
return "can't read file (permission denied)";
if ((buf.st_mode & S_IXOTH) == 0)
return "can't execute (permission denied)";
return NULL;
#else
if ((buf.st_mode & S_IRUSR) == 0)
return "can't read file (permission denied)";
if ((buf.st_mode & S_IXUSR) == 0)
return "can't execute (permission denied)";
return NULL;
#endif
}
/*
* check_data_dir()
*
* This function validates the given cluster directory - we search for a
* small set of subdirectories that we expect to find in a valid $PGDATA
* directory. If any of the subdirectories are missing (or secured against
* us) we display an error message and exit()
*
*/
static int
check_data_dir(migratorContext *ctx, const char *pg_data)
{
char subDirName[MAXPGPATH];
const char *requiredSubdirs[] = {"base", "global", "pg_clog",
"pg_multixact", "pg_subtrans",
"pg_tblspc", "pg_twophase", "pg_xlog"};
bool fail = false;
int subdirnum;
for (subdirnum = 0; subdirnum < sizeof(requiredSubdirs) / sizeof(requiredSubdirs[0]); ++subdirnum)
{
struct stat statBuf;
snprintf(subDirName, sizeof(subDirName), "%s%c%s", pg_data,
pathSeparator, requiredSubdirs[subdirnum]);
if ((stat(subDirName, &statBuf)) != 0)
{
report_status(ctx, PG_WARNING, "check for %s warning: %s",
requiredSubdirs[subdirnum], getErrorText(errno));
fail = true;
}
else
{
if (!S_ISDIR(statBuf.st_mode))
{
report_status(ctx, PG_WARNING, "%s is not a directory",
requiredSubdirs[subdirnum]);
fail = true;
}
}
}
return (fail) ? -1 : 0;
}
/*
* file.c
*
* file system operations
*/
#include "pg_upgrade.h"
#include <sys/types.h>
#include <fcntl.h>
#ifdef EDB_NATIVE_LANG
#include <fcntl.h>
#endif
#ifdef WIN32
#include <windows.h>
#endif
#ifndef WIN32
char pathSeparator = '/';
#else
char pathSeparator = '\\';
#endif
static int copy_file(const char *fromfile, const char *tofile, bool force);
#ifdef WIN32
static int win32_pghardlink(const char *src, const char *dst);
#endif
#ifdef NOT_USED
static int copy_dir(const char *from, const char *to, bool force);
#endif
#if defined(sun) || defined(WIN32)
static int pg_scandir_internal(migratorContext *ctx, const char *dirname,
struct dirent *** namelist,
int (*selector) (const struct dirent *));
#endif
/*
* copyAndUpdateFile()
*
* Copies a relation file from src to dst. If pageConverter is non-NULL, this function
* uses that pageConverter to do a page-by-page conversion.
*/
const char *
copyAndUpdateFile(migratorContext *ctx, pageCnvCtx *pageConverter,
const char *src, const char *dst, bool force)
{
if (pageConverter == NULL)
{
if (pg_copy_file(src, dst, force) == -1)
return getErrorText(errno);
else
return NULL;
}
else
{
/*
* We have a pageConverter object - that implies that the
* PageLayoutVersion differs between the two clusters so we have to
* perform a page-by-page conversion.
*
* If the pageConverter can convert the entire file at once, invoke
* that plugin function, otherwise, read each page in the relation
* file and call the convertPage plugin function.
*/
#ifdef PAGE_CONVERSION
if (pageConverter->convertFile)
return pageConverter->convertFile(pageConverter->pluginData,
dst, src);
else
#endif
{
int src_fd;
int dstfd;
char buf[BLCKSZ];
ssize_t bytesRead;
const char *msg = NULL;
if ((src_fd = open(src, O_RDONLY, 0)) < 0)
return "can't open source file";
if ((dstfd = open(dst, O_RDWR | O_CREAT | O_EXCL, S_IRUSR | S_IWUSR)) < 0)
return "can't create destination file";
while ((bytesRead = read(src_fd, buf, BLCKSZ)) == BLCKSZ)
{
#ifdef PAGE_CONVERSION
if ((msg = pageConverter->convertPage(pageConverter->pluginData, buf, buf)) != NULL)
break;
#endif
if (write(dstfd, buf, BLCKSZ) != BLCKSZ)
{
msg = "can't write new page to destination";
break;
}
}
close(src_fd);
close(dstfd);
if (msg)
return msg;
else if (bytesRead != 0)
return "found partial page in source file";
else
return NULL;
}
}
}
/*
* linkAndUpdateFile()
*
* Creates a symbolic link between the given relation files. We use
* this function to perform a true in-place update. If the on-disk
* format of the new cluster is bit-for-bit compatible with the on-disk
* format of the old cluster, we can simply symlink each relation
* instead of copying the data from the old cluster to the new cluster.
*/
const char *
linkAndUpdateFile(migratorContext *ctx, pageCnvCtx *pageConverter,
const char *src, const char *dst)
{
if (pageConverter != NULL)
return "Can't in-place update this cluster, page-by-page conversion is required";
if (pg_link_file(src, dst) == -1)
return getErrorText(errno);
else
return NULL;
}
static int
copy_file(const char *srcfile, const char *dstfile, bool force)
{
#define COPY_BUF_SIZE (50 * BLCKSZ)
int src_fd;
int dest_fd;
char *buffer;
if ((srcfile == NULL) || (dstfile == NULL))
return -1;
if ((src_fd = open(srcfile, O_RDONLY, 0)) < 0)
return -1;
if ((dest_fd = open(dstfile, O_RDWR | O_CREAT | (force ? 0 : O_EXCL), S_IRUSR | S_IWUSR)) < 0)
{
if (src_fd != 0)
close(src_fd);
return -1;
}
buffer = (char *) malloc(COPY_BUF_SIZE);
if (buffer == NULL)
{
if (src_fd != 0)
close(src_fd);
if (dest_fd != 0)
close(dest_fd);
return -1;
}
/* perform data copying i.e read src source, write to destination */
while (true)
{
ssize_t nbytes = read(src_fd, buffer, COPY_BUF_SIZE);
if (nbytes < 0)
{
if (buffer != NULL)
free(buffer);
if (src_fd != 0)
close(src_fd);
if (dest_fd != 0)
close(dest_fd);
return -1;
}
if (nbytes == 0)
break;
errno = 0;
if (write(dest_fd, buffer, nbytes) != nbytes)
{
/* if write didn't set errno, assume problem is no disk space */
if (errno == 0)
errno = ENOSPC;
if (buffer != NULL)
free(buffer);
if (src_fd != 0)
close(src_fd);
if (dest_fd != 0)
close(dest_fd);
return -1;
}
}
if (buffer != NULL)
free(buffer);
if (src_fd != 0)
close(src_fd);
if (dest_fd != 0)
close(dest_fd);
return 1;
}
/*
* pg_scandir()
*
* Wrapper for portable scandir functionality
*
*/
int
pg_scandir(migratorContext *ctx, const char *dirname,
struct dirent *** namelist, int (*selector) (const struct dirent *),
int (*cmp) (const void *, const void *))
{
#if defined(sun) || defined(WIN32)
return pg_scandir_internal(ctx, dirname, namelist, selector);
/*
* Here we try to guess which libc's need const, and which don't. The net
* goal here is to try to supress a compiler warning due to a prototype
* mismatch of const usage. Ideally we would do this via autoconf, but
* Postgres's autoconf doesn't test for this and it is overkill to add
* autoconf just for this. scandir() is from BSD 4.3, which had the third
* argument as non-const. Linux and other C libraries have updated it to
* use a const.
* http://unix.derkeiler.com/Mailing-Lists/FreeBSD/questions/2005-12/msg002
* 14.html
*/
#elif defined(freebsd) || defined(bsdi) || defined(darwin) || defined(openbsd)
/* no const */
return scandir(dirname, namelist, (int (*) (struct dirent *)) selector, cmp);
#else
/* use const */
return scandir(dirname, namelist, selector, cmp);
#endif
}
#if defined(sun) || defined(WIN32)
/*
* pg_scandir_internal()
*
* We'll provide our own scandir function for sun, since it is not
* part of the standard system library.
*
* Returns count of files that meet the selection criteria coded in
* the function pointed to by selector. Creates an array of pointers
* to dirent structures. Address of array returned in namelist.
*
* Note that the number of dirent structures needed is dynamically
* allocated using realloc. Realloc can be inneficient if invoked a
* large number of times. Its use in pg_upgrade is to find filesystem
* filenames that have extended beyond the initial segment (file.1,
* .2, etc.) and should therefore be invoked a small number of times.
*/
static int
pg_scandir_internal(migratorContext *ctx, const char *dirname,
struct dirent *** namelist, int (*selector) (const struct dirent *))
{
DIR *dirdesc;
struct dirent *direntry;
int count = 0;
int name_num = 0;
size_t entrysize;
if ((dirdesc = opendir(dirname)) == NULL)
pg_log(ctx, PG_FATAL, "Could not open directory \"%s\": %m\n", dirname);
*namelist = NULL;
while ((direntry = readdir(dirdesc)) != NULL)
{
/* Invoke the selector function to see if the direntry matches */
if ((*selector) (direntry))
{
count++;
*namelist = (struct dirent **) realloc((void *) (*namelist),
(size_t) ((name_num + 1) * sizeof(struct dirent *)));
if (*namelist == NULL)
return -1;
entrysize = sizeof(struct dirent) - sizeof(direntry->d_name) +
strlen(direntry->d_name) + 1;
(*namelist)[name_num] = (struct dirent *) malloc(entrysize);
if ((*namelist)[name_num] == NULL)
return -1;
memcpy((*namelist)[name_num], direntry, entrysize);
name_num++;
}
}
closedir(dirdesc);
return count;
}
#endif
/*
* dir_matching_filenames
*
* Return only matching file names during directory scan
*/
int
dir_matching_filenames(const struct dirent * scan_ent)
{
/* we only compare for string length because the number suffix varies */
if (!strncmp(scandir_file_pattern, scan_ent->d_name, strlen(scandir_file_pattern)))
return 1;
return 0;
}
void
check_hard_link(migratorContext *ctx)
{
char existing_file[MAXPGPATH];
char new_link_file[MAXPGPATH];
snprintf(existing_file, sizeof(existing_file), "%s/PG_VERSION", ctx->old.pgdata);
snprintf(new_link_file, sizeof(new_link_file), "%s/PG_VERSION.linktest", ctx->new.pgdata);
unlink(new_link_file); /* might fail */
if (pg_link_file(existing_file, new_link_file) == -1)
{
pg_log(ctx, PG_FATAL,
"Could not create hard link between old and new data directories: %s\n"
"In link mode the old and new data directories must be on the same file system volume.\n",
getErrorText(errno));
}
unlink(new_link_file);
}
#ifdef WIN32
static int
win32_pghardlink(const char *src, const char *dst)
{
/*
* CreateHardLinkA returns zero for failure
* http://msdn.microsoft.com/en-us/library/aa363860(VS.85).aspx
*/
if (CreateHardLinkA(dst, src, NULL) == 0)
return -1;
else
return 0;
}
#endif
#ifdef NOT_USED
/*
* copy_dir()
*
* Copies either a directory or a single file within a directory. If the
* source argument names a directory, we recursively copy that directory,
* otherwise we copy a single file.
*/
static int
copy_dir(const char *src, const char *dst, bool force)
{
DIR *srcdir;
struct dirent *de = NULL;
struct stat fst;
if (src == NULL || dst == NULL)
return -1;
/*
* Try to open the source directory - if it turns out not to be a
* directory, assume that it's a file and copy that instead.
*/
if ((srcdir = opendir(src)) == NULL)
{
if (errno == ENOTDIR)
return copy_file(src, dst, true);
return -1;
}
if (mkdir(dst, S_IRWXU) != 0)
{
/*
* ignore directory already exist error
*/
if (errno != EEXIST)
return -1;
}
while ((de = readdir(srcdir)) != NULL)
{
char src_file[MAXPGPATH];
char dest_file[MAXPGPATH];
if (strcmp(de->d_name, ".") == 0 || strcmp(de->d_name, "..") == 0)
continue;
memset(src_file, 0, sizeof(src_file));
memset(dest_file, 0, sizeof(dest_file));
snprintf(src_file, sizeof(src_file), "%s/%s", src, de->d_name);
snprintf(dest_file, sizeof(dest_file), "%s/%s", dst, de->d_name);
if (stat(src_file, &fst) < 0)
{
if (srcdir != NULL)
{
closedir(srcdir);
srcdir = NULL;
}
return -1;
}
if (fst.st_mode & S_IFDIR)
{
/* recurse to handle subdirectories */
if (force)
copy_dir(src_file, dest_file, true);
}
else if (fst.st_mode & S_IFREG)
{
if ((copy_file(src_file, dest_file, 1)) == -1)
{
if (srcdir != NULL)
{
closedir(srcdir);
srcdir = NULL;
}
return -1;
}
}
}
if (srcdir != NULL)
{
closedir(srcdir);
srcdir = NULL;
}
return 1;
}
#endif
/*
* function.c
*
* server-side function support
*/
#include "pg_upgrade.h"
#include "access/transam.h"
/*
* install_support_functions()
*
* pg_upgrade requires some support functions that enable it to modify
* backend behavior.
*/
void
install_support_functions(migratorContext *ctx)
{
int dbnum;
prep_status(ctx, "Adding support functions to new cluster");
for (dbnum = 0; dbnum < ctx->new.dbarr.ndbs; dbnum++)
{
DbInfo *newdb = &ctx->new.dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, newdb->db_name, CLUSTER_NEW);
/* suppress NOTICE of dropped objects */
PQclear(executeQueryOrDie(ctx, conn,
"SET client_min_messages = warning;"));
PQclear(executeQueryOrDie(ctx, conn,
"DROP SCHEMA IF EXISTS binary_upgrade CASCADE;"));
PQclear(executeQueryOrDie(ctx, conn,
"RESET client_min_messages;"));
PQclear(executeQueryOrDie(ctx, conn,
"CREATE SCHEMA binary_upgrade;"));
PQclear(executeQueryOrDie(ctx, conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.set_next_pg_type_oid(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_sysoids' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(ctx, conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.set_next_pg_type_array_oid(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_sysoids' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(ctx, conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.set_next_pg_type_toast_oid(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_sysoids' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(ctx, conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.set_next_heap_relfilenode(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_sysoids' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(ctx, conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.set_next_toast_relfilenode(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_sysoids' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(ctx, conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.set_next_index_relfilenode(OID) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_sysoids' "
"LANGUAGE C STRICT;"));
PQclear(executeQueryOrDie(ctx, conn,
"CREATE OR REPLACE FUNCTION "
" binary_upgrade.add_pg_enum_label(OID, OID, NAME) "
"RETURNS VOID "
"AS '$libdir/pg_upgrade_sysoids' "
"LANGUAGE C STRICT;"));
PQfinish(conn);
}
check_ok(ctx);
}
void
uninstall_support_functions(migratorContext *ctx)
{
int dbnum;
prep_status(ctx, "Removing support functions from new cluster");
for (dbnum = 0; dbnum < ctx->new.dbarr.ndbs; dbnum++)
{
DbInfo *newdb = &ctx->new.dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, newdb->db_name, CLUSTER_NEW);
/* suppress NOTICE of dropped objects */
PQclear(executeQueryOrDie(ctx, conn,
"SET client_min_messages = warning;"));
PQclear(executeQueryOrDie(ctx, conn,
"DROP SCHEMA binary_upgrade CASCADE;"));
PQclear(executeQueryOrDie(ctx, conn,
"RESET client_min_messages;"));
PQfinish(conn);
}
check_ok(ctx);
}
/*
* get_loadable_libraries()
*
* Fetch the names of all old libraries containing C-language functions.
* We will later check that they all exist in the new installation.
*/
void
get_loadable_libraries(migratorContext *ctx)
{
ClusterInfo *active_cluster = &ctx->old;
PGresult **ress;
int totaltups;
int dbnum;
ress = (PGresult **)
pg_malloc(ctx, active_cluster->dbarr.ndbs * sizeof(PGresult *));
totaltups = 0;
/* Fetch all library names, removing duplicates within each DB */
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, CLUSTER_OLD);
/* Fetch all libraries referenced in this DB */
ress[dbnum] = executeQueryOrDie(ctx, conn,
"SELECT DISTINCT probin "
"FROM pg_catalog.pg_proc "
"WHERE prolang = 13 /* C */ AND "
" probin IS NOT NULL AND "
" oid >= %u;",
FirstNormalObjectId);
totaltups += PQntuples(ress[dbnum]);
PQfinish(conn);
}
/* Allocate what's certainly enough space */
if (totaltups > 0)
ctx->libraries = (char **) pg_malloc(ctx, totaltups * sizeof(char *));
else
ctx->libraries = NULL;
/*
* Now remove duplicates across DBs. This is pretty inefficient code, but
* there probably aren't enough entries to matter.
*/
totaltups = 0;
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
PGresult *res = ress[dbnum];
int ntups;
int rowno;
ntups = PQntuples(res);
for (rowno = 0; rowno < ntups; rowno++)
{
char *lib = PQgetvalue(res, rowno, 0);
bool dup = false;
int n;
for (n = 0; n < totaltups; n++)
{
if (strcmp(lib, ctx->libraries[n]) == 0)
{
dup = true;
break;
}
}
if (!dup)
ctx->libraries[totaltups++] = pg_strdup(ctx, lib);
}
PQclear(res);
}
ctx->num_libraries = totaltups;
pg_free(ress);
}
/*
* check_loadable_libraries()
*
* Check that the new cluster contains all required libraries.
* We do this by actually trying to LOAD each one, thereby testing
* compatibility as well as presence.
*/
void
check_loadable_libraries(migratorContext *ctx)
{
PGconn *conn = connectToServer(ctx, "template1", CLUSTER_NEW);
int libnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status(ctx, "Checking for presence of required libraries");
snprintf(output_path, sizeof(output_path), "%s/loadable_libraries.txt",
ctx->output_dir);
for (libnum = 0; libnum < ctx->num_libraries; libnum++)
{
char *lib = ctx->libraries[libnum];
int llen = strlen(lib);
char *cmd = (char *) pg_malloc(ctx, 8 + 2 * llen + 1);
PGresult *res;
strcpy(cmd, "LOAD '");
PQescapeStringConn(conn, cmd + 6, lib, llen, NULL);
strcat(cmd, "'");
res = PQexec(conn, cmd);
if (PQresultStatus(res) != PGRES_COMMAND_OK)
{
found = true;
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n",
output_path);
fprintf(script, "Failed to load library: %s\n%s\n",
lib,
PQerrorMessage(conn));
}
PQclear(res);
pg_free(cmd);
}
PQfinish(conn);
if (found)
{
fclose(script);
pg_log(ctx, PG_REPORT, "fatal\n");
pg_log(ctx, PG_FATAL,
"| Your installation uses loadable libraries that are missing\n"
"| from the new installation. You can add these libraries to\n"
"| the new installation, or remove the functions using them\n"
"| from the old installation. A list of the problem libraries\n"
"| is in the file\n"
"| \"%s\".\n\n", output_path);
}
else
check_ok(ctx);
}
/*
* info.c
*
* information support functions
*/
#include "pg_upgrade.h"
#include "access/transam.h"
static void get_db_infos(migratorContext *ctx, DbInfoArr *dbinfos,
Cluster whichCluster);
static void dbarr_print(migratorContext *ctx, DbInfoArr *arr,
Cluster whichCluster);
static void relarr_print(migratorContext *ctx, RelInfoArr *arr);
static void get_rel_infos(migratorContext *ctx, const DbInfo *dbinfo,
RelInfoArr *relarr, Cluster whichCluster);
static void relarr_free(RelInfoArr *rel_arr);
static void map_rel(migratorContext *ctx, const RelInfo *oldrel,
const RelInfo *newrel, const DbInfo *old_db,
const DbInfo *new_db, const char *olddata,
const char *newdata, FileNameMap *map);
static void map_rel_by_id(migratorContext *ctx, Oid oldid, Oid newid,
const char *old_nspname, const char *old_relname,
const char *new_nspname, const char *new_relname,
const char *old_tablespace, const DbInfo *old_db,
const DbInfo *new_db, const char *olddata,
const char *newdata, FileNameMap *map);
static RelInfo *relarr_lookup_reloid(migratorContext *ctx,
RelInfoArr *rel_arr, Oid oid, Cluster whichCluster);
static RelInfo *relarr_lookup_rel(migratorContext *ctx, RelInfoArr *rel_arr,
const char *nspname, const char *relname,
Cluster whichCluster);
/*
* gen_db_file_maps()
*
* generates database mappings for "old_db" and "new_db". Returns a malloc'ed
* array of mappings. nmaps is a return parameter which refers to the number
* mappings.
*
* NOTE: Its the Caller's responsibility to free the returned array.
*/
FileNameMap *
gen_db_file_maps(migratorContext *ctx, DbInfo *old_db, DbInfo *new_db,
int *nmaps, const char *old_pgdata, const char *new_pgdata)
{
FileNameMap *maps;
int relnum;
int num_maps = 0;
maps = (FileNameMap *) pg_malloc(ctx, sizeof(FileNameMap) *
new_db->rel_arr.nrels);
for (relnum = 0; relnum < new_db->rel_arr.nrels; relnum++)
{
RelInfo *newrel = &new_db->rel_arr.rels[relnum];
RelInfo *oldrel;
/* toast tables are handled by their parent */
if (strcmp(newrel->nspname, "pg_toast") == 0)
continue;
oldrel = relarr_lookup_rel(ctx, &(old_db->rel_arr), newrel->nspname,
newrel->relname, CLUSTER_OLD);
map_rel(ctx, oldrel, newrel, old_db, new_db, old_pgdata, new_pgdata,
maps + num_maps);
num_maps++;
/*
* so much for the mapping of this relation. Now we need a mapping for
* its corresponding toast relation if any.
*/
if (oldrel->toastrelid > 0)
{
RelInfo *new_toast;
RelInfo *old_toast;
char new_name[MAXPGPATH];
char old_name[MAXPGPATH];
/* construct the new and old relnames for the toast relation */
snprintf(old_name, sizeof(old_name), "pg_toast_%u",
oldrel->reloid);
snprintf(new_name, sizeof(new_name), "pg_toast_%u",
newrel->reloid);
/* look them up in their respective arrays */
old_toast = relarr_lookup_reloid(ctx, &old_db->rel_arr,
oldrel->toastrelid, CLUSTER_OLD);
new_toast = relarr_lookup_rel(ctx, &new_db->rel_arr,
"pg_toast", new_name, CLUSTER_NEW);
/* finally create a mapping for them */
map_rel(ctx, old_toast, new_toast, old_db, new_db, old_pgdata, new_pgdata,
maps + num_maps);
num_maps++;
/*
* also need to provide a mapping for the index of this toast
* relation. The procedure is similar to what we did above for
* toast relation itself, the only difference being that the
* relnames need to be appended with _index.
*/
/*
* construct the new and old relnames for the toast index
* relations
*/
snprintf(old_name, sizeof(old_name), "%s_index", old_toast->relname);
snprintf(new_name, sizeof(new_name), "pg_toast_%u_index",
newrel->reloid);
/* look them up in their respective arrays */
old_toast = relarr_lookup_rel(ctx, &old_db->rel_arr,
"pg_toast", old_name, CLUSTER_OLD);
new_toast = relarr_lookup_rel(ctx, &new_db->rel_arr,
"pg_toast", new_name, CLUSTER_NEW);
/* finally create a mapping for them */
map_rel(ctx, old_toast, new_toast, old_db, new_db, old_pgdata,
new_pgdata, maps + num_maps);
num_maps++;
}
}
*nmaps = num_maps;
return maps;
}
static void
map_rel(migratorContext *ctx, const RelInfo *oldrel, const RelInfo *newrel,
const DbInfo *old_db, const DbInfo *new_db, const char *olddata,
const char *newdata, FileNameMap *map)
{
map_rel_by_id(ctx, oldrel->relfilenode, newrel->relfilenode, oldrel->nspname,
oldrel->relname, newrel->nspname, newrel->relname, oldrel->tablespace, old_db,
new_db, olddata, newdata, map);
}
/*
* map_rel_by_id()
*
* fills a file node map structure and returns it in "map".
*/
static void
map_rel_by_id(migratorContext *ctx, Oid oldid, Oid newid,
const char *old_nspname, const char *old_relname,
const char *new_nspname, const char *new_relname,
const char *old_tablespace, const DbInfo *old_db,
const DbInfo *new_db, const char *olddata,
const char *newdata, FileNameMap *map)
{
map->new = newid;
map->old = oldid;
snprintf(map->old_nspname, sizeof(map->old_nspname), "%s", old_nspname);
snprintf(map->old_relname, sizeof(map->old_relname), "%s", old_relname);
snprintf(map->new_nspname, sizeof(map->new_nspname), "%s", new_nspname);
snprintf(map->new_relname, sizeof(map->new_relname), "%s", new_relname);
if (strlen(old_tablespace) == 0)
{
/*
* relation belongs to the default tablespace, hence relfiles would
* exist in the data directories.
*/
snprintf(map->old_file, sizeof(map->old_file), "%s/base/%u", olddata, old_db->db_oid);
snprintf(map->new_file, sizeof(map->new_file), "%s/base/%u", newdata, new_db->db_oid);
}
else
{
/*
* relation belongs to some tablespace, hence copy its physical
* location
*/
snprintf(map->old_file, sizeof(map->old_file), "%s%s/%u", old_tablespace,
ctx->old.tablespace_suffix, old_db->db_oid);
snprintf(map->new_file, sizeof(map->new_file), "%s%s/%u", old_tablespace,
ctx->new.tablespace_suffix, new_db->db_oid);
}
}
void
print_maps(migratorContext *ctx, FileNameMap *maps, int n, const char *dbName)
{
if (ctx->debug)
{
int mapnum;
pg_log(ctx, PG_DEBUG, "mappings for db %s:\n", dbName);
for (mapnum = 0; mapnum < n; mapnum++)
pg_log(ctx, PG_DEBUG, "%s.%s:%u ==> %s.%s:%u\n",
maps[mapnum].old_nspname, maps[mapnum].old_relname, maps[mapnum].old,
maps[mapnum].new_nspname, maps[mapnum].new_relname, maps[mapnum].new);
pg_log(ctx, PG_DEBUG, "\n\n");
}
}
/*
* get_db_infos()
*
* Scans pg_database system catalog and returns (in dbinfs_arr) all user
* databases.
*/
static void
get_db_infos(migratorContext *ctx, DbInfoArr *dbinfs_arr, Cluster whichCluster)
{
PGconn *conn = connectToServer(ctx, "template1", whichCluster);
PGresult *res;
int ntups;
int tupnum;
DbInfo *dbinfos;
int i_datname;
int i_oid;
int i_spclocation;
res = executeQueryOrDie(ctx, conn,
"SELECT d.oid, d.datname, t.spclocation "
"FROM pg_catalog.pg_database d "
" LEFT OUTER JOIN pg_catalog.pg_tablespace t "
" ON d.dattablespace = t.oid "
"WHERE d.datname != 'template0'");
i_datname = PQfnumber(res, "datname");
i_oid = PQfnumber(res, "oid");
i_spclocation = PQfnumber(res, "spclocation");
ntups = PQntuples(res);
dbinfos = (DbInfo *) pg_malloc(ctx, sizeof(DbInfo) * ntups);
for (tupnum = 0; tupnum < ntups; tupnum++)
{
dbinfos[tupnum].db_oid = atol(PQgetvalue(res, tupnum, i_oid));
snprintf(dbinfos[tupnum].db_name, sizeof(dbinfos[tupnum].db_name), "%s",
PQgetvalue(res, tupnum, i_datname));
snprintf(dbinfos[tupnum].db_tblspace, sizeof(dbinfos[tupnum].db_tblspace), "%s",
PQgetvalue(res, tupnum, i_spclocation));
}
PQclear(res);
PQfinish(conn);
dbinfs_arr->dbs = dbinfos;
dbinfs_arr->ndbs = ntups;
}
/*
* get_db_and_rel_infos()
*
* higher level routine to generate dbinfos for the database running
* on the given "port". Assumes that server is already running.
*/
void
get_db_and_rel_infos(migratorContext *ctx, DbInfoArr *db_arr, Cluster whichCluster)
{
int dbnum;
get_db_infos(ctx, db_arr, whichCluster);
for (dbnum = 0; dbnum < db_arr->ndbs; dbnum++)
get_rel_infos(ctx, &db_arr->dbs[dbnum],
&(db_arr->dbs[dbnum].rel_arr), whichCluster);
if (ctx->debug)
dbarr_print(ctx, db_arr, whichCluster);
}
/*
* get_rel_infos()
*
* gets the relinfos for all the user tables of the database refered
* by "db".
*
* NOTE: we assume that relations/entities with oids greater than
* FirstNormalObjectId belongs to the user
*/
static void
get_rel_infos(migratorContext *ctx, const DbInfo *dbinfo,
RelInfoArr *relarr, Cluster whichCluster)
{
PGconn *conn = connectToServer(ctx, dbinfo->db_name, whichCluster);
bool is_edb_as = (whichCluster == CLUSTER_OLD) ?
ctx->old.is_edb_as : ctx->new.is_edb_as;
PGresult *res;
RelInfo *relinfos;
int ntups;
int relnum;
int num_rels = 0;
char *nspname = NULL;
char *relname = NULL;
int i_spclocation = -1;
int i_nspname = -1;
int i_relname = -1;
int i_oid = -1;
int i_relfilenode = -1;
int i_reltoastrelid = -1;
char query[QUERY_ALLOC];
/*
* pg_largeobject contains user data that does not appear the pg_dumpall
* --schema-only output, so we have to migrate that system table heap and
* index. Ideally we could just get the relfilenode from template1 but
* pg_largeobject_loid_pn_index's relfilenode can change if the table was
* reindexed so we get the relfilenode for each database and migrate it as
* a normal user table.
*/
snprintf(query, sizeof(query),
"SELECT DISTINCT c.oid, n.nspname, c.relname, "
" c.relfilenode, c.reltoastrelid, t.spclocation "
"FROM pg_catalog.pg_class c JOIN "
" pg_catalog.pg_namespace n "
" ON c.relnamespace = n.oid "
" LEFT OUTER JOIN pg_catalog.pg_tablespace t "
" ON c.reltablespace = t.oid "
"WHERE (( n.nspname NOT IN ('pg_catalog', 'information_schema') "
" AND c.oid >= %u "
" ) OR ( "
" n.nspname = 'pg_catalog' "
" AND (relname = 'pg_largeobject' OR "
" relname = 'pg_largeobject_loid_pn_index') )) "
" AND "
" (relkind = 'r' OR relkind = 't' OR "
" relkind = 'i'%s)%s"
"GROUP BY c.oid, n.nspname, c.relname, c.relfilenode,"
" c.reltoastrelid, t.spclocation, "
" n.nspname "
"ORDER BY n.nspname, c.relname;",
FirstNormalObjectId,
/* see the comment at the top of v8_3_create_sequence_script() */
(GET_MAJOR_VERSION(ctx->old.major_version) <= 803) ?
"" : " OR relkind = 'S'",
/*
* EDB AS installs pgagent by default via initdb. We have to ignore it,
* and not migrate any old table contents.
*/
(is_edb_as && strcmp(dbinfo->db_name, "edb") == 0) ?
" AND "
" n.nspname != 'pgagent' AND "
/* skip pgagent TOAST tables */
" c.oid NOT IN "
" ( "
" SELECT c2.reltoastrelid "
" FROM pg_catalog.pg_class c2 JOIN "
" pg_catalog.pg_namespace n2 "
" ON c2.relnamespace = n2.oid "
" WHERE n2.nspname = 'pgagent' AND "
" c2.reltoastrelid != 0 "
" ) AND "
/* skip pgagent TOAST table indexes */
" c.oid NOT IN "
" ( "
" SELECT c3.reltoastidxid "
" FROM pg_catalog.pg_class c2 JOIN "
" pg_catalog.pg_namespace n2 "
" ON c2.relnamespace = n2.oid JOIN "
" pg_catalog.pg_class c3 "
" ON c2.reltoastrelid = c3.oid "
" WHERE n2.nspname = 'pgagent' AND "
" c2.reltoastrelid != 0 AND "
" c3.reltoastidxid != 0 "
" ) " : "");
res = executeQueryOrDie(ctx, conn, query);
ntups = PQntuples(res);
relinfos = (RelInfo *) pg_malloc(ctx, sizeof(RelInfo) * ntups);
i_oid = PQfnumber(res, "oid");
i_nspname = PQfnumber(res, "nspname");
i_relname = PQfnumber(res, "relname");
i_relfilenode = PQfnumber(res, "relfilenode");
i_reltoastrelid = PQfnumber(res, "reltoastrelid");
i_spclocation = PQfnumber(res, "spclocation");
for (relnum = 0; relnum < ntups; relnum++)
{
RelInfo *curr = &relinfos[num_rels++];
const char *tblspace;
curr->reloid = atol(PQgetvalue(res, relnum, i_oid));
nspname = PQgetvalue(res, relnum, i_nspname);
snprintf(curr->nspname, sizeof(curr->nspname), nspname);
relname = PQgetvalue(res, relnum, i_relname);
snprintf(curr->relname, sizeof(curr->relname), relname);
curr->relfilenode = atol(PQgetvalue(res, relnum, i_relfilenode));
curr->toastrelid = atol(PQgetvalue(res, relnum, i_reltoastrelid));
tblspace = PQgetvalue(res, relnum, i_spclocation);
/* if no table tablespace, use the database tablespace */
if (strlen(tblspace) == 0)
tblspace = dbinfo->db_tblspace;
snprintf(curr->tablespace, sizeof(curr->tablespace), "%s", tblspace);
}
PQclear(res);
PQfinish(conn);
relarr->rels = relinfos;
relarr->nrels = num_rels;
}
/*
* dbarr_lookup_db()
*
* Returns the pointer to the DbInfo structure
*/
DbInfo *
dbarr_lookup_db(DbInfoArr *db_arr, const char *db_name)
{
int dbnum;
if (!db_arr || !db_name)
return NULL;
for (dbnum = 0; dbnum < db_arr->ndbs; dbnum++)
{
if (strcmp(db_arr->dbs[dbnum].db_name, db_name) == 0)
return &db_arr->dbs[dbnum];
}
return NULL;
}
/*
* relarr_lookup_rel()
*
* Searches "relname" in rel_arr. Returns the *real* pointer to the
* RelInfo structure.
*/
static RelInfo *
relarr_lookup_rel(migratorContext *ctx, RelInfoArr *rel_arr,
const char *nspname, const char *relname,
Cluster whichCluster)
{
int relnum;
if (!rel_arr || !relname)
return NULL;
for (relnum = 0; relnum < rel_arr->nrels; relnum++)
{
if (strcmp(rel_arr->rels[relnum].nspname, nspname) == 0 &&
strcmp(rel_arr->rels[relnum].relname, relname) == 0)
return &rel_arr->rels[relnum];
}
pg_log(ctx, PG_FATAL, "Could not find %s.%s in %s cluster\n",
nspname, relname, CLUSTERNAME(whichCluster));
return NULL;
}
/*
* relarr_lookup_reloid()
*
* Returns a pointer to the RelInfo structure for the
* given oid or NULL if the desired entry cannot be
* found.
*/
static RelInfo *
relarr_lookup_reloid(migratorContext *ctx, RelInfoArr *rel_arr, Oid oid,
Cluster whichCluster)
{
int relnum;
if (!rel_arr || !oid)
return NULL;
for (relnum = 0; relnum < rel_arr->nrels; relnum++)
{
if (rel_arr->rels[relnum].reloid == oid)
return &rel_arr->rels[relnum];
}
pg_log(ctx, PG_FATAL, "Could not find %d in %s cluster\n",
oid, CLUSTERNAME(whichCluster));
return NULL;
}
static void
relarr_free(RelInfoArr *rel_arr)
{
pg_free(rel_arr->rels);
rel_arr->nrels = 0;
}
void
dbarr_free(DbInfoArr *db_arr)
{
int dbnum;
for (dbnum = 0; dbnum < db_arr->ndbs; dbnum++)
relarr_free(&db_arr->dbs[dbnum].rel_arr);
db_arr->ndbs = 0;
}
static void
dbarr_print(migratorContext *ctx, DbInfoArr *arr, Cluster whichCluster)
{
int dbnum;
pg_log(ctx, PG_DEBUG, "%s databases\n", CLUSTERNAME(whichCluster));
for (dbnum = 0; dbnum < arr->ndbs; dbnum++)
{
pg_log(ctx, PG_DEBUG, "Database: %s\n", arr->dbs[dbnum].db_name);
relarr_print(ctx, &arr->dbs[dbnum].rel_arr);
pg_log(ctx, PG_DEBUG, "\n\n");
}
}
static void
relarr_print(migratorContext *ctx, RelInfoArr *arr)
{
int relnum;
for (relnum = 0; relnum < arr->nrels; relnum++)
pg_log(ctx, PG_DEBUG, "relname: %s.%s: reloid: %u reltblspace: %s\n",
arr->rels[relnum].nspname, arr->rels[relnum].relname,
arr->rels[relnum].reloid, arr->rels[relnum].tablespace);
}
/*
* opt.c
*
* options functions
*/
#include "pg_upgrade.h"
#include "getopt_long.h"
#ifdef WIN32
#include <io.h>
#endif
static void usage(migratorContext *ctx);
static void validateDirectoryOption(migratorContext *ctx, char **dirpath,
char *envVarName, char *cmdLineOption, char *description);
static void get_pkglibdirs(migratorContext *ctx);
static char *get_pkglibdir(migratorContext *ctx, const char *bindir);
/*
* parseCommandLine()
*
* Parses the command line (argc, argv[]) into the given migratorContext object
* and initializes the rest of the object.
*/
void
parseCommandLine(migratorContext *ctx, int argc, char *argv[])
{
static struct option long_options[] = {
{"old-datadir", required_argument, NULL, 'd'},
{"new-datadir", required_argument, NULL, 'D'},
{"old-bindir", required_argument, NULL, 'b'},
{"new-bindir", required_argument, NULL, 'B'},
{"old-port", required_argument, NULL, 'p'},
{"new-port", required_argument, NULL, 'P'},
{"user", required_argument, NULL, 'u'},
{"check", no_argument, NULL, 'c'},
{"debug", no_argument, NULL, 'g'},
{"debugfile", required_argument, NULL, 'G'},
{"link", no_argument, NULL, 'k'},
{"logfile", required_argument, NULL, 'l'},
{"verbose", no_argument, NULL, 'v'},
{NULL, 0, NULL, 0}
};
char option; /* Command line option */
int optindex = 0; /* used by getopt_long */
if (getenv("PGUSER"))
{
pg_free(ctx->user);
ctx->user = pg_strdup(ctx, getenv("PGUSER"));
}
ctx->progname = get_progname(argv[0]);
ctx->old.port = getenv("PGPORT") ? atoi(getenv("PGPORT")) : DEF_PGPORT;
ctx->new.port = getenv("PGPORT") ? atoi(getenv("PGPORT")) : DEF_PGPORT;
/* must save value, getenv()'s pointer is not stable */
ctx->transfer_mode = TRANSFER_MODE_COPY;
if (argc > 1)
{
if (strcmp(argv[1], "--help") == 0 || strcmp(argv[1], "-h") == 0 ||
strcmp(argv[1], "-?") == 0)
{
usage(ctx);
exit_nicely(ctx, false);
}
if (strcmp(argv[1], "--version") == 0 || strcmp(argv[1], "-V") == 0)
{
pg_log(ctx, PG_REPORT, "pg_upgrade " PG_VERSION "\n");
exit_nicely(ctx, false);
}
}
if ((get_user_info(ctx, &ctx->user)) == 0)
pg_log(ctx, PG_FATAL, "%s: cannot be run as root\n", ctx->progname);
#ifndef WIN32
get_home_path(ctx->home_dir);
#else
{
char *tmppath;
/* TMP is the best place on Windows, rather than APPDATA */
if ((tmppath = getenv("TMP")) == NULL)
pg_log(ctx, PG_FATAL, "TMP environment variable is not set.\n");
snprintf(ctx->home_dir, MAXPGPATH, "%s", tmppath);
}
#endif
snprintf(ctx->output_dir, MAXPGPATH, "%s/" OUTPUT_SUBDIR, ctx->home_dir);
while ((option = getopt_long(argc, argv, "d:D:b:B:cgG:kl:p:P:u:v",
long_options, &optindex)) != -1)
{
switch (option)
{
case 'd':
ctx->old.pgdata = pg_strdup(ctx, optarg);
break;
case 'D':
ctx->new.pgdata = pg_strdup(ctx, optarg);
break;
case 'b':
ctx->old.bindir = pg_strdup(ctx, optarg);
break;
case 'B':
ctx->new.bindir = pg_strdup(ctx, optarg);
break;
case 'c':
ctx->check = true;
break;
case 'g':
pg_log(ctx, PG_REPORT, "Running in debug mode\n");
ctx->debug = true;
break;
case 'G':
if ((ctx->debug_fd = fopen(optarg, "w")) == NULL)
{
pg_log(ctx, PG_FATAL, "cannot open debug file\n");
exit_nicely(ctx, false);
}
break;
case 'k':
ctx->transfer_mode = TRANSFER_MODE_LINK;
break;
case 'l':
ctx->logfile = pg_strdup(ctx, optarg);
break;
case 'p':
if ((ctx->old.port = atoi(optarg)) <= 0)
{
pg_log(ctx, PG_FATAL, "invalid old port number\n");
exit_nicely(ctx, false);
}
break;
case 'P':
if ((ctx->new.port = atoi(optarg)) <= 0)
{
pg_log(ctx, PG_FATAL, "invalid new port number\n");
exit_nicely(ctx, false);
}
break;
case 'u':
pg_free(ctx->user);
ctx->user = pg_strdup(ctx, optarg);
break;
case 'v':
pg_log(ctx, PG_REPORT, "Running in verbose mode\n");
ctx->verbose = true;
break;
default:
pg_log(ctx, PG_FATAL,
"Try \"%s --help\" for more information.\n",
ctx->progname);
break;
}
}
if (ctx->logfile != NULL)
{
/*
* We must use append mode so output generated by child processes via
* ">>" will not be overwritten, and we want the file truncated on
* start.
*/
/* truncate */
ctx->log_fd = fopen(ctx->logfile, "w");
if (!ctx->log_fd)
pg_log(ctx, PG_FATAL, "Cannot write to log file %s\n", ctx->logfile);
fclose(ctx->log_fd);
ctx->log_fd = fopen(ctx->logfile, "a");
if (!ctx->log_fd)
pg_log(ctx, PG_FATAL, "Cannot write to log file %s\n", ctx->logfile);
}
else
ctx->logfile = strdup(DEVNULL);
/* if no debug file name, output to the terminal */
if (ctx->debug && !ctx->debug_fd)
{
ctx->debug_fd = fopen(DEVTTY, "w");
if (!ctx->debug_fd)
pg_log(ctx, PG_FATAL, "Cannot write to terminal\n");
}
/* Get values from env if not already set */
validateDirectoryOption(ctx, &ctx->old.pgdata, "OLDDATADIR", "-d",
"old cluster data resides");
validateDirectoryOption(ctx, &ctx->new.pgdata, "NEWDATADIR", "-D",
"new cluster data resides");
validateDirectoryOption(ctx, &ctx->old.bindir, "OLDBINDIR", "-b",
"old cluster binaries reside");
validateDirectoryOption(ctx, &ctx->new.bindir, "NEWBINDIR", "-B",
"new cluster binaries reside");
get_pkglibdirs(ctx);
}
static void
usage(migratorContext *ctx)
{
printf(_("\nUsage: pg_upgrade [OPTIONS]...\n\
\n\
Options:\n\
-d, --old-datadir=OLDDATADIR old cluster data directory\n\
-D, --new-datadir=NEWDATADIR new cluster data directory\n\
-b, --old-bindir=OLDBINDIR old cluster executable directory\n\
-B, --new-bindir=NEWBINDIR new cluster executable directory\n\
-p, --old-port=portnum old cluster port number (default %d)\n\
-P, --new-port=portnum new cluster port number (default %d)\n\
\n\
-u, --user=username clusters superuser (default \"%s\")\n\
-c, --check check clusters only, don't change any data\n\
-g, --debug enable debugging\n\
-G, --debugfile=DEBUGFILENAME output debugging activity to file\n\
-k, --link link instead of copying files to new cluster\n\
-l, --logfile=LOGFILENAME log session activity to file\n\
-v, --verbose enable verbose output\n\
-V, --version display version information, then exit\n\
-h, --help show this help, then exit\n\
\n\
Before running pg_upgrade you must:\n\
create a new database cluster (using the new version of initdb)\n\
shutdown the postmaster servicing the old cluster\n\
shutdown the postmaster servicing the new cluster\n\
\n\
When you run pg_upgrade, you must provide the following information:\n\
the data directory for the old cluster (-d OLDDATADIR)\n\
the data directory for the new cluster (-D NEWDATADIR)\n\
the 'bin' directory for the old version (-b OLDBINDIR)\n\
the 'bin' directory for the new version (-B NEWBINDIR)\n\
\n\
For example:\n\
pg_upgrade -d oldCluster/data -D newCluster/data -b oldCluster/bin -B newCluster/bin\n\
or\n"), ctx->old.port, ctx->new.port, ctx->user);
#ifndef WIN32
printf(_("\
$ export OLDDATADIR=oldCluster/data\n\
$ export NEWDATADIR=newCluster/data\n\
$ export OLDBINDIR=oldCluster/bin\n\
$ export NEWBINDIR=newCluster/bin\n\
$ pg_upgrade\n"));
#else
printf(_("\
C:\\> set OLDDATADIR=oldCluster/data\n\
C:\\> set NEWDATADIR=newCluster/data\n\
C:\\> set OLDBINDIR=oldCluster/bin\n\
C:\\> set NEWBINDIR=newCluster/bin\n\
C:\\> pg_upgrade\n"));
#endif
printf(_("\n\
You may find it useful to save the preceding 5 commands in a shell script\n\
\n\
Report bugs to <pg-migrator-general@lists.pgfoundry.org>\n"));
}
/*
* validateDirectoryOption()
*
* Validates a directory option.
* dirpath - the directory name supplied on the command line
* envVarName - the name of an environment variable to get if dirpath is NULL
* cmdLineOption - the command line option corresponds to this directory (-o, -O, -n, -N)
* description - a description of this directory option
*
* We use the last two arguments to construct a meaningful error message if the
* user hasn't provided the required directory name.
*/
static void
validateDirectoryOption(migratorContext *ctx, char **dirpath,
char *envVarName, char *cmdLineOption, char *description)
{
if (*dirpath == NULL || (strlen(*dirpath) == 0))
{
const char *envVar;
if ((envVar = getenv(envVarName)) && strlen(envVar))
*dirpath = pg_strdup(ctx, envVar);
else
{
pg_log(ctx, PG_FATAL, "You must identify the directory where the %s\n"
"Please use the %s command-line option or the %s environment variable\n",
description, cmdLineOption, envVarName);
}
}
/*
* Trim off any trailing path separators
*/
if ((*dirpath)[strlen(*dirpath) - 1] == pathSeparator)
(*dirpath)[strlen(*dirpath) - 1] = 0;
}
static void
get_pkglibdirs(migratorContext *ctx)
{
ctx->old.libpath = get_pkglibdir(ctx, ctx->old.bindir);
ctx->new.libpath = get_pkglibdir(ctx, ctx->new.bindir);
}
static char *
get_pkglibdir(migratorContext *ctx, const char *bindir)
{
char cmd[MAXPGPATH];
char bufin[MAX_STRING];
FILE *output;
int i;
snprintf(cmd, sizeof(cmd), "\"%s/pg_config\" --pkglibdir", bindir);
if ((output = popen(cmd, "r")) == NULL)
pg_log(ctx, PG_FATAL, "Could not get pkglibdir data: %s\n",
getErrorText(errno));
fgets(bufin, sizeof(bufin), output);
if (output)
pclose(output);
/* Remove trailing newline */
i = strlen(bufin) - 1;
if (bufin[i] == '\n')
bufin[i] = '\0';
return pg_strdup(ctx, bufin);
}
/*
* page.c
*
* per-page conversion operations
*/
#include "pg_upgrade.h"
#include "dynloader.h"
#include "storage/bufpage.h"
#ifdef PAGE_CONVERSION
static const char *getPageVersion(migratorContext *ctx,
uint16 *version, const char *pathName);
static pageCnvCtx *loadConverterPlugin(migratorContext *ctx,
uint16 newPageVersion, uint16 oldPageVersion);
/*
* setupPageConverter()
*
* This function determines the PageLayoutVersion of the old cluster and
* the PageLayoutVersion of the new cluster. If the versions differ, this
* function loads a converter plugin and returns a pointer to a pageCnvCtx
* object (in *result) that knows how to convert pages from the old format
* to the new format. If the versions are identical, this function just
* returns a NULL pageCnvCtx pointer to indicate that page-by-page conversion
* is not required.
*
* If successful this function sets *result and returns NULL. If an error
* occurs, this function returns an error message in the form of an null-terminated
* string.
*/
const char *
setupPageConverter(migratorContext *ctx, pageCnvCtx **result)
{
uint16 oldPageVersion;
uint16 newPageVersion;
pageCnvCtx *converter;
const char *msg;
char dstName[MAXPGPATH];
char srcName[MAXPGPATH];
snprintf(dstName, sizeof(dstName), "%s/global/%u", ctx->new.pgdata,
ctx->new.pg_database_oid);
snprintf(srcName, sizeof(srcName), "%s/global/%u", ctx->old.pgdata,
ctx->old.pg_database_oid);
if ((msg = getPageVersion(ctx, &oldPageVersion, srcName)) != NULL)
return msg;
if ((msg = getPageVersion(ctx, &newPageVersion, dstName)) != NULL)
return msg;
/*
* If the old cluster and new cluster use the same page layouts, then we
* don't need a page converter.
*/
if (newPageVersion == oldPageVersion)
{
*result = NULL;
return NULL;
}
/*
* The clusters use differing page layouts, see if we can find a plugin
* that knows how to convert from the old page layout to the new page
* layout.
*/
if ((converter = loadConverterPlugin(ctx, newPageVersion, oldPageVersion)) == NULL)
return "can't find plugin to convert from old page layout to new page layout";
else
{
*result = converter;
return NULL;
}
}
/*
* getPageVersion()
*
* Retrieves the PageLayoutVersion for the given relation.
*
* Returns NULL on success (and stores the PageLayoutVersion at *version),
* if an error occurs, this function returns an error message (in the form
* of a null-terminated string).
*/
static const char *
getPageVersion(migratorContext *ctx, uint16 *version, const char *pathName)
{
int relfd;
PageHeaderData page;
ssize_t bytesRead;
if ((relfd = open(pathName, O_RDONLY, 0)) < 0)
return "can't open relation";
if ((bytesRead = read(relfd, &page, sizeof(page))) != sizeof(page))
return "can't read page header";
*version = PageGetPageLayoutVersion(&page);
close(relfd);
return NULL;
}
/*
* loadConverterPlugin()
*
* This function loads a page-converter plugin library and grabs a
* pointer to each of the (interesting) functions provided by that
* plugin. The name of the plugin library is derived from the given
* newPageVersion and oldPageVersion. If a plugin is found, this
* function returns a pointer to a pageCnvCtx object (which will contain
* a collection of plugin function pointers). If the required plugin
* is not found, this function returns NULL.
*/
static pageCnvCtx *
loadConverterPlugin(migratorContext *ctx, uint16 newPageVersion, uint16 oldPageVersion)
{
char pluginName[MAXPGPATH];
void *plugin;
/*
* Try to find a plugin that can convert pages of oldPageVersion into
* pages of newPageVersion. For example, if we oldPageVersion = 3 and
* newPageVersion is 4, we search for a plugin named:
* plugins/convertLayout_3_to_4.dll
*/
/*
* FIXME: we are searching for plugins relative to the current directory,
* we should really search relative to our own executable instead.
*/
snprintf(pluginName, sizeof(pluginName), "./plugins/convertLayout_%d_to_%d%s",
oldPageVersion, newPageVersion, DLSUFFIX);
if ((plugin = pg_dlopen(pluginName)) == NULL)
return NULL;
else
{
pageCnvCtx *result = (pageCnvCtx *) pg_malloc(ctx, sizeof(*result));
result->old.PageVersion = oldPageVersion;
result->new.PageVersion = newPageVersion;
result->startup = (pluginStartup) pg_dlsym(plugin, "init");
result->convertFile = (pluginConvertFile) pg_dlsym(plugin, "convertFile");
result->convertPage = (pluginConvertPage) pg_dlsym(plugin, "convertPage");
result->shutdown = (pluginShutdown) pg_dlsym(plugin, "fini");
result->pluginData = NULL;
/*
* If the plugin has exported an initializer, go ahead and invoke it.
*/
if (result->startup)
result->startup(MIGRATOR_API_VERSION, &result->pluginVersion,
newPageVersion, oldPageVersion, &result->pluginData);
return result;
}
}
#endif
/*
* pg_upgrade.c
*
* main source file
*/
#include "pg_upgrade.h"
#ifdef HAVE_LANGINFO_H
#include <langinfo.h>
#endif
static void disable_old_cluster(migratorContext *ctx);
static void prepare_new_cluster(migratorContext *ctx);
static void prepare_new_databases(migratorContext *ctx);
static void create_new_objects(migratorContext *ctx);
static void copy_clog_xlog_xid(migratorContext *ctx);
static void set_frozenxids(migratorContext *ctx);
static void setup(migratorContext *ctx, char *argv0, bool live_check);
static void cleanup(migratorContext *ctx);
static void create_empty_output_directory(migratorContext *ctx);
int
main(int argc, char **argv)
{
migratorContext ctx;
char *sequence_script_file_name = NULL;
char *deletion_script_file_name = NULL;
bool live_check = false;
memset(&ctx, 0, sizeof(ctx));
parseCommandLine(&ctx, argc, argv);
output_check_banner(&ctx, &live_check);
setup(&ctx, argv[0], live_check);
create_empty_output_directory(&ctx);
check_cluster_versions(&ctx);
check_cluster_compatibility(&ctx, live_check);
check_old_cluster(&ctx, live_check, &sequence_script_file_name);
/* -- NEW -- */
start_postmaster(&ctx, CLUSTER_NEW, false);
check_new_cluster(&ctx);
report_clusters_compatible(&ctx);
pg_log(&ctx, PG_REPORT, "\nPerforming Migration\n");
pg_log(&ctx, PG_REPORT, "--------------------\n");
disable_old_cluster(&ctx);
prepare_new_cluster(&ctx);
stop_postmaster(&ctx, false, false);
/*
* Destructive Changes to New Cluster
*/
copy_clog_xlog_xid(&ctx);
/* New now using xids of the old system */
prepare_new_databases(&ctx);
create_new_objects(&ctx);
transfer_all_new_dbs(&ctx, &ctx.old.dbarr, &ctx.new.dbarr,
ctx.old.pgdata, ctx.new.pgdata);
/*
* Assuming OIDs are only used in system tables, there is no need to
* restore the OID counter because we have not transferred any OIDs from
* the old system, but we do it anyway just in case. We do it late here
* because there is no need to have the schema load use new oids.
*/
prep_status(&ctx, "Setting next oid for new cluster");
exec_prog(&ctx, true, SYSTEMQUOTE "\"%s/pg_resetxlog\" -o %u \"%s\" > " DEVNULL SYSTEMQUOTE,
ctx.new.bindir, ctx.old.controldata.chkpnt_nxtoid, ctx.new.pgdata);
check_ok(&ctx);
create_script_for_old_cluster_deletion(&ctx, &deletion_script_file_name);
issue_warnings(&ctx, sequence_script_file_name);
pg_log(&ctx, PG_REPORT, "\nUpgrade complete\n");
pg_log(&ctx, PG_REPORT, "----------------\n");
output_completion_banner(&ctx, deletion_script_file_name);
pg_free(deletion_script_file_name);
pg_free(sequence_script_file_name);
cleanup(&ctx);
return 0;
}
static void
setup(migratorContext *ctx, char *argv0, bool live_check)
{
char exec_path[MAXPGPATH]; /* full path to my executable */
/*
* make sure the user has a clean environment, otherwise, we may confuse
* libpq when we connect to one (or both) of the servers.
*/
check_for_libpq_envvars(ctx);
verify_directories(ctx);
/* no postmasters should be running */
if (!live_check && is_server_running(ctx, ctx->old.pgdata))
{
pg_log(ctx, PG_FATAL, "There seems to be a postmaster servicing the old cluster.\n"
"Please shutdown that postmaster and try again.\n");
}
/* same goes for the new postmaster */
if (is_server_running(ctx, ctx->new.pgdata))
{
pg_log(ctx, PG_FATAL, "There seems to be a postmaster servicing the new cluster.\n"
"Please shutdown that postmaster and try again.\n");
}
/* get path to pg_upgrade executable */
if (find_my_exec(argv0, exec_path) < 0)
pg_log(ctx, PG_FATAL, "Could not get pathname to pg_upgrade: %s\n", getErrorText(errno));
/* Trim off program name and keep just path */
*last_dir_separator(exec_path) = '\0';
canonicalize_path(exec_path);
ctx->exec_path = pg_strdup(ctx, exec_path);
}
static void
disable_old_cluster(migratorContext *ctx)
{
/* rename pg_control so old server cannot be accidentally started */
rename_old_pg_control(ctx);
}
static void
prepare_new_cluster(migratorContext *ctx)
{
/*
* It would make more sense to freeze after loading the schema, but that
* would cause us to lose the frozenids restored by the load. We use
* --analyze so autovacuum doesn't update statistics later
*/
prep_status(ctx, "Analyzing all rows in the new cluster");
exec_prog(ctx, true,
SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --all --analyze >> %s 2>&1" SYSTEMQUOTE,
ctx->new.bindir, ctx->new.port, ctx->logfile);
check_ok(ctx);
/*
* We do freeze after analyze so pg_statistic is also frozen
*/
prep_status(ctx, "Freezing all rows on the new cluster");
exec_prog(ctx, true,
SYSTEMQUOTE "\"%s/vacuumdb\" --port %d --all --freeze >> %s 2>&1" SYSTEMQUOTE,
ctx->new.bindir, ctx->new.port, ctx->logfile);
check_ok(ctx);
get_pg_database_relfilenode(ctx, CLUSTER_NEW);
}
static void
prepare_new_databases(migratorContext *ctx)
{
/* -- NEW -- */
start_postmaster(ctx, CLUSTER_NEW, false);
/*
* We set autovacuum_freeze_max_age to its maximum value so autovacuum
* does not launch here and delete clog files, before the frozen xids are
* set.
*/
set_frozenxids(ctx);
/*
* We have to create the databases first so we can create the toast table
* placeholder relfiles.
*/
prep_status(ctx, "Creating databases in the new cluster");
exec_prog(ctx, true,
SYSTEMQUOTE "\"%s/%s\" --set ON_ERROR_STOP=on --port %d "
"-f \"%s/%s\" --dbname template1 >> \"%s\"" SYSTEMQUOTE,
ctx->new.bindir, ctx->new.psql_exe, ctx->new.port,
ctx->output_dir, GLOBALS_DUMP_FILE, ctx->logfile);
check_ok(ctx);
get_db_and_rel_infos(ctx, &ctx->new.dbarr, CLUSTER_NEW);
stop_postmaster(ctx, false, false);
}
static void
create_new_objects(migratorContext *ctx)
{
/* -- NEW -- */
start_postmaster(ctx, CLUSTER_NEW, false);
install_support_functions(ctx);
prep_status(ctx, "Restoring database schema to new cluster");
exec_prog(ctx, true,
SYSTEMQUOTE "\"%s/%s\" --set ON_ERROR_STOP=on --port %d "
"-f \"%s/%s\" --dbname template1 >> \"%s\"" SYSTEMQUOTE,
ctx->new.bindir, ctx->new.psql_exe, ctx->new.port,
ctx->output_dir, DB_DUMP_FILE, ctx->logfile);
check_ok(ctx);
/* regenerate now that we have db schemas */
dbarr_free(&ctx->new.dbarr);
get_db_and_rel_infos(ctx, &ctx->new.dbarr, CLUSTER_NEW);
uninstall_support_functions(ctx);
stop_postmaster(ctx, false, false);
}
static void
copy_clog_xlog_xid(migratorContext *ctx)
{
char old_clog_path[MAXPGPATH];
char new_clog_path[MAXPGPATH];
/* copy old commit logs to new data dir */
prep_status(ctx, "Deleting new commit clogs");
snprintf(old_clog_path, sizeof(old_clog_path), "%s/pg_clog", ctx->old.pgdata);
snprintf(new_clog_path, sizeof(new_clog_path), "%s/pg_clog", ctx->new.pgdata);
if (rmtree(new_clog_path, true) != true)
pg_log(ctx, PG_FATAL, "Unable to delete directory %s\n", new_clog_path);
check_ok(ctx);
prep_status(ctx, "Copying old commit clogs to new server");
/* libpgport's copydir() doesn't work in FRONTEND code */
#ifndef WIN32
exec_prog(ctx, true, SYSTEMQUOTE "%s \"%s\" \"%s\"" SYSTEMQUOTE,
"cp -Rf",
#else
/* flags: everything, no confirm, quiet, overwrite read-only */
exec_prog(ctx, true, SYSTEMQUOTE "%s \"%s\" \"%s\\\"" SYSTEMQUOTE,
"xcopy /e /y /q /r",
#endif
old_clog_path, new_clog_path);
check_ok(ctx);
/* set the next transaction id of the new cluster */
prep_status(ctx, "Setting next transaction id for new cluster");
exec_prog(ctx, true, SYSTEMQUOTE "\"%s/pg_resetxlog\" -f -x %u \"%s\" > " DEVNULL SYSTEMQUOTE,
ctx->new.bindir, ctx->old.controldata.chkpnt_nxtxid, ctx->new.pgdata);
check_ok(ctx);
/* now reset the wal archives in the new cluster */
prep_status(ctx, "Resetting WAL archives");
exec_prog(ctx, true, SYSTEMQUOTE "\"%s/pg_resetxlog\" -l %u,%u,%u \"%s\" >> \"%s\" 2>&1" SYSTEMQUOTE,
ctx->new.bindir, ctx->old.controldata.chkpnt_tli,
ctx->old.controldata.logid, ctx->old.controldata.nxtlogseg,
ctx->new.pgdata, ctx->logfile);
check_ok(ctx);
}
/*
* set_frozenxids()
*
* We have frozen all xids, so set relfrozenxid and datfrozenxid
* to be the old cluster's xid counter, which we just set in the new
* cluster. User-table frozenxid values will be set by pg_dumpall
* --binary-upgrade, but objects not set by the pg_dump must have
* proper frozen counters.
*/
static
void
set_frozenxids(migratorContext *ctx)
{
int dbnum;
PGconn *conn;
PGresult *dbres;
int ntups;
prep_status(ctx, "Setting frozenxid counters in new cluster");
conn = connectToServer(ctx, "template1", CLUSTER_NEW);
/* set pg_database.datfrozenxid */
PQclear(executeQueryOrDie(ctx, conn,
"UPDATE pg_catalog.pg_database "
"SET datfrozenxid = '%u' "
/* cannot connect to 'template0', so ignore */
"WHERE datname != 'template0'",
ctx->old.controldata.chkpnt_nxtxid));
/* get database names */
dbres = executeQueryOrDie(ctx, conn,
"SELECT datname "
"FROM pg_catalog.pg_database "
"WHERE datname != 'template0'");
/* free dbres below */
PQfinish(conn);
ntups = PQntuples(dbres);
for (dbnum = 0; dbnum < ntups; dbnum++)
{
conn = connectToServer(ctx, PQgetvalue(dbres, dbnum, 0), CLUSTER_NEW);
/* set pg_class.relfrozenxid */
PQclear(executeQueryOrDie(ctx, conn,
"UPDATE pg_catalog.pg_class "
"SET relfrozenxid = '%u' "
/* only heap and TOAST are vacuumed */
"WHERE relkind = 'r' OR "
" relkind = 't'",
ctx->old.controldata.chkpnt_nxtxid));
PQfinish(conn);
}
PQclear(dbres);
check_ok(ctx);
}
static void
cleanup(migratorContext *ctx)
{
int tblnum;
char filename[MAXPGPATH];
for (tblnum = 0; tblnum < ctx->num_tablespaces; tblnum++)
pg_free(ctx->tablespaces[tblnum]);
pg_free(ctx->tablespaces);
dbarr_free(&ctx->old.dbarr);
dbarr_free(&ctx->new.dbarr);
pg_free(ctx->logfile);
pg_free(ctx->user);
pg_free(ctx->old.major_version_str);
pg_free(ctx->new.major_version_str);
pg_free(ctx->old.controldata.lc_collate);
pg_free(ctx->new.controldata.lc_collate);
pg_free(ctx->old.controldata.lc_ctype);
pg_free(ctx->new.controldata.lc_ctype);
pg_free(ctx->old.controldata.encoding);
pg_free(ctx->new.controldata.encoding);
pg_free(ctx->old.tablespace_suffix);
pg_free(ctx->new.tablespace_suffix);
if (ctx->log_fd != NULL)
{
fclose(ctx->log_fd);
ctx->log_fd = NULL;
}
if (ctx->debug_fd)
fclose(ctx->debug_fd);
snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, ALL_DUMP_FILE);
unlink(filename);
snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, GLOBALS_DUMP_FILE);
unlink(filename);
snprintf(filename, sizeof(filename), "%s/%s", ctx->output_dir, DB_DUMP_FILE);
unlink(filename);
}
/*
* create_empty_output_directory
*
* Create empty directory for output files
*/
static void
create_empty_output_directory(migratorContext *ctx)
{
/*
* rmtree() outputs a warning if the directory does not exist,
* so we try to create the directory first.
*/
if (mkdir(ctx->output_dir, S_IRWXU) != 0)
{
if (errno == EEXIST)
rmtree(ctx->output_dir, false);
else
pg_log(ctx, PG_FATAL, "Cannot create subdirectory %s: %s\n",
ctx->output_dir, getErrorText(errno));
}
}
/*
* pg_upgrade.h
*/
#include "postgres.h"
#include <unistd.h>
#include <string.h>
#include <stdio.h>
#include <assert.h>
#include <dirent.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/types.h>
#ifdef WIN32
#include <shlobj.h>
#endif
#include "libpq-fe.h"
/* Allocate for null byte */
#define NAMEDATASIZE (NAMEDATALEN + 1)
#define USER_NAME_SIZE 128
#define MAX_STRING 1024
#define LINE_ALLOC 4096
#define QUERY_ALLOC 8192
#define MIGRATOR_API_VERSION 1
#define MESSAGE_WIDTH "60"
#define OVERWRITE_MESSAGE " %-" MESSAGE_WIDTH "." MESSAGE_WIDTH "s\r"
#define GET_MAJOR_VERSION(v) ((v) / 100)
#define OUTPUT_SUBDIR "pg_upgrade_output"
#define ALL_DUMP_FILE "pg_upgrade_dump_all.sql"
/* contains both global db information and CREATE DATABASE commands */
#define GLOBALS_DUMP_FILE "pg_upgrade_dump_globals.sql"
#define DB_DUMP_FILE "pg_upgrade_dump_db.sql"
#ifndef WIN32
#define pg_copy_file copy_file
#define pg_mv_file rename
#define pg_link_file link
#define DEVNULL "/dev/null"
#define DEVTTY "/dev/tty"
#define RMDIR_CMD "rm -rf"
#define EXEC_EXT "sh"
#else
#define pg_copy_file CopyFile
#define pg_mv_file pgrename
#define pg_link_file win32_pghardlink
#define EXE_EXT ".exe"
#define sleep(x) Sleep(x * 1000)
#define DEVNULL "nul"
/* "con" does not work from the Msys 1.0.10 console (part of MinGW). */
#define DEVTTY "con"
/* from pgport */
extern int pgrename(const char *from, const char *to);
extern int pgunlink(const char *path);
#define rename(from, to) pgrename(from, to)
#define unlink(path) pgunlink(path)
#define RMDIR_CMD "RMDIR /s/q"
#define EXEC_EXT "bat"
#endif
#define CLUSTERNAME(cluster) ((cluster) == CLUSTER_OLD ? "old" : "new")
/* OID system catalog preservation added during PG 9.0 development */
#define TABLE_SPACE_SUBDIRS 201001111
/* from pgport */
extern void copydir(char *fromdir, char *todir, bool recurse);
extern bool rmtree(const char *path, bool rmtopdir);
extern char pathSeparator;
/*
* Each relation is represented by a relinfo structure.
*/
typedef struct
{
char nspname[NAMEDATASIZE]; /* namespace name */
char relname[NAMEDATASIZE]; /* relation name */
Oid reloid; /* relation oid */
Oid relfilenode; /* relation relfile node */
Oid toastrelid; /* oid of the toast relation */
char tablespace[MAXPGPATH]; /* relations tablespace path */
} RelInfo;
typedef struct
{
RelInfo *rels;
int nrels;
} RelInfoArr;
/*
* The following structure represents a relation mapping.
*/
typedef struct
{
Oid old; /* Relfilenode of the old relation */
Oid new; /* Relfilenode of the new relation */
char old_file[MAXPGPATH];
char new_file[MAXPGPATH];
char old_nspname[NAMEDATASIZE]; /* old name of the namespace */
char old_relname[NAMEDATASIZE]; /* old name of the relation */
char new_nspname[NAMEDATASIZE]; /* new name of the namespace */
char new_relname[NAMEDATASIZE]; /* new name of the relation */
} FileNameMap;
/*
* Structure to store database information
*/
typedef struct
{
Oid db_oid; /* oid of the database */
char db_name[NAMEDATASIZE]; /* database name */
char db_tblspace[MAXPGPATH]; /* database default tablespace path */
RelInfoArr rel_arr; /* array of all user relinfos */
} DbInfo;
typedef struct
{
DbInfo *dbs; /* array of db infos */
int ndbs; /* number of db infos */
} DbInfoArr;
/*
* The following structure is used to hold pg_control information.
* Rather than using the backend's control structure we use our own
* structure to avoid pg_control version issues between releases.
*/
typedef struct
{
uint32 ctrl_ver;
uint32 cat_ver;
uint32 logid;
uint32 nxtlogseg;
uint32 chkpnt_tli;
uint32 chkpnt_nxtxid;
uint32 chkpnt_nxtoid;
uint32 align;
uint32 blocksz;
uint32 largesz;
uint32 walsz;
uint32 walseg;
uint32 ident;
uint32 index;
uint32 toast;
bool date_is_int;
bool float8_pass_by_value;
char *lc_collate;
char *lc_ctype;
char *encoding;
} ControlData;
/*
* Enumeration to denote link modes
*/
typedef enum
{
TRANSFER_MODE_COPY,
TRANSFER_MODE_LINK
} transferMode;
/*
* Enumeration to denote pg_log modes
*/
typedef enum
{
PG_INFO,
PG_REPORT,
PG_WARNING,
PG_FATAL,
PG_DEBUG
} eLogType;
/*
* Enumeration to distinguish between old cluster and new cluster
*/
typedef enum
{
NONE = 0, /* used for no running servers */
CLUSTER_OLD,
CLUSTER_NEW
} Cluster;
typedef long pgpid_t;
/*
* cluster
*
* information about each cluster
*/
typedef struct
{
ControlData controldata; /* pg_control information */
DbInfoArr dbarr; /* dbinfos array */
char *pgdata; /* pathname for cluster's $PGDATA directory */
char *bindir; /* pathname for cluster's executable directory */
const char *psql_exe; /* name of the psql command to execute
* in the cluster */
unsigned short port; /* port number where postmaster is waiting */
uint32 major_version; /* PG_VERSION of cluster */
char *major_version_str; /* string PG_VERSION of cluster */
Oid pg_database_oid; /* OID of pg_database relation */
char *libpath; /* pathname for cluster's pkglibdir */
/* EDB AS is PG 8.2 with 8.3 enhancements backpatched. */
bool is_edb_as; /* EnterpriseDB's Postgres Plus Advanced Server? */
char *tablespace_suffix; /* directory specification */
} ClusterInfo;
/*
* migratorContext
*
* We create a migratorContext object to store all of the information
* that we need to migrate a single cluster.
*/
typedef struct
{
ClusterInfo old, new; /* old and new cluster information */
const char *progname; /* complete pathname for this program */
char *exec_path; /* full path to my executable */
char *user; /* username for clusters */
char home_dir[MAXPGPATH]; /* name of user's home directory */
char output_dir[MAXPGPATH]; /* directory for pg_upgrade output */
char **tablespaces; /* tablespaces */
int num_tablespaces;
char **libraries; /* loadable libraries */
int num_libraries;
pgpid_t postmasterPID; /* PID of currently running postmaster */
Cluster running_cluster;
char *logfile; /* name of log file (may be /dev/null) */
FILE *log_fd; /* log FILE */
FILE *debug_fd; /* debug-level log FILE */
bool check; /* TRUE -> ask user for permission to make
* changes */
bool verbose; /* TRUE -> be verbose in messages */
bool debug; /* TRUE -> log more information */
transferMode transfer_mode; /* copy files or link them? */
} migratorContext;
/*
* Global variables
*/
char scandir_file_pattern[MAXPGPATH];
/* check.c */
void output_check_banner(migratorContext *ctx, bool *live_check);
void check_old_cluster(migratorContext *ctx, bool live_check,
char **sequence_script_file_name);
void check_new_cluster(migratorContext *ctx);
void report_clusters_compatible(migratorContext *ctx);
void issue_warnings(migratorContext *ctx,
char *sequence_script_file_name);
void output_completion_banner(migratorContext *ctx,
char *deletion_script_file_name);
void check_cluster_versions(migratorContext *ctx);
void check_cluster_compatibility(migratorContext *ctx, bool live_check);
void create_script_for_old_cluster_deletion(migratorContext *ctx,
char **deletion_script_file_name);
/* controldata.c */
void get_control_data(migratorContext *ctx, ClusterInfo *cluster, bool live_check);
void check_control_data(migratorContext *ctx, ControlData *oldctrl,
ControlData *newctrl);
/* dump.c */
void generate_old_dump(migratorContext *ctx);
void split_old_dump(migratorContext *ctx);
/* exec.c */
int exec_prog(migratorContext *ctx, bool throw_error,
const char *cmd,...);
void verify_directories(migratorContext *ctx);
bool is_server_running(migratorContext *ctx, const char *datadir);
void rename_old_pg_control(migratorContext *ctx);
/* file.c */
#ifdef PAGE_CONVERSION
typedef const char *(*pluginStartup) (uint16 migratorVersion,
uint16 *pluginVersion, uint16 newPageVersion,
uint16 oldPageVersion, void **pluginData);
typedef const char *(*pluginConvertFile) (void *pluginData,
const char *dstName, const char *srcName);
typedef const char *(*pluginConvertPage) (void *pluginData,
const char *dstPage, const char *srcPage);
typedef const char *(*pluginShutdown) (void *pluginData);
typedef struct
{
uint16 oldPageVersion; /* Page layout version of the old
* cluster */
uint16 newPageVersion; /* Page layout version of the new
* cluster */
uint16 pluginVersion; /* API version of converter plugin */
void *pluginData; /* Plugin data (set by plugin) */
pluginStartup startup; /* Pointer to plugin's startup function */
pluginConvertFile convertFile; /* Pointer to plugin's file converter
* function */
pluginConvertPage convertPage; /* Pointer to plugin's page converter
* function */
pluginShutdown shutdown; /* Pointer to plugin's shutdown function */
} pageCnvCtx;
const char *setupPageConverter(migratorContext *ctx, pageCnvCtx **result);
#else
/* dummy */
typedef void *pageCnvCtx;
#endif
int dir_matching_filenames(const struct dirent *scan_ent);
int pg_scandir(migratorContext *ctx, const char *dirname,
struct dirent ***namelist, int (*selector) (const struct dirent *),
int (*cmp) (const void *, const void *));
const char *copyAndUpdateFile(migratorContext *ctx,
pageCnvCtx *pageConverter, const char *src,
const char *dst, bool force);
const char *linkAndUpdateFile(migratorContext *ctx,
pageCnvCtx *pageConverter, const char *src, const char *dst);
void check_hard_link(migratorContext *ctx);
/* function.c */
void install_support_functions(migratorContext *ctx);
void uninstall_support_functions(migratorContext *ctx);
void get_loadable_libraries(migratorContext *ctx);
void check_loadable_libraries(migratorContext *ctx);
/* info.c */
FileNameMap *gen_db_file_maps(migratorContext *ctx, DbInfo *old_db,
DbInfo *new_db, int *nmaps, const char *old_pgdata,
const char *new_pgdata);
void get_db_and_rel_infos(migratorContext *ctx, DbInfoArr *db_arr,
Cluster whichCluster);
DbInfo *dbarr_lookup_db(DbInfoArr *db_arr, const char *db_name);
void dbarr_free(DbInfoArr *db_arr);
void print_maps(migratorContext *ctx, FileNameMap *maps, int n,
const char *dbName);
/* option.c */
void parseCommandLine(migratorContext *ctx, int argc, char *argv[]);
/* relfilenode.c */
void get_pg_database_relfilenode(migratorContext *ctx, Cluster whichCluster);
const char *transfer_all_new_dbs(migratorContext *ctx, DbInfoArr *olddb_arr,
DbInfoArr *newdb_arr, char *old_pgdata, char *new_pgdata);
/* tablespace.c */
void init_tablespaces(migratorContext *ctx);
/* server.c */
PGconn *connectToServer(migratorContext *ctx, const char *db_name,
Cluster whichCluster);
PGresult *executeQueryOrDie(migratorContext *ctx, PGconn *conn,
const char *fmt,...);
void start_postmaster(migratorContext *ctx, Cluster whichCluster, bool quiet);
void stop_postmaster(migratorContext *ctx, bool fast, bool quiet);
uint32 get_major_server_version(migratorContext *ctx, char **verstr,
Cluster whichCluster);
void check_for_libpq_envvars(migratorContext *ctx);
/* util.c */
void exit_nicely(migratorContext *ctx, bool need_cleanup);
void *pg_malloc(migratorContext *ctx, int n);
void pg_free(void *p);
char *pg_strdup(migratorContext *ctx, const char *s);
char *quote_identifier(migratorContext *ctx, const char *s);
int get_user_info(migratorContext *ctx, char **user_name);
void check_ok(migratorContext *ctx);
void report_status(migratorContext *ctx, eLogType type, const char *fmt,...);
void pg_log(migratorContext *ctx, eLogType type, char *fmt,...);
void prep_status(migratorContext *ctx, const char *fmt,...);
void check_ok(migratorContext *ctx);
char *pg_strdup(migratorContext *ctx, const char *s);
void *pg_malloc(migratorContext *ctx, int size);
void pg_free(void *ptr);
const char *getErrorText(int errNum);
/* version.c */
void new_9_0_populate_pg_largeobject_metadata(migratorContext *ctx,
bool check_mode, Cluster whichCluster);
/* version_old_8_3.c */
void old_8_3_check_for_name_data_type_usage(migratorContext *ctx,
Cluster whichCluster);
void old_8_3_check_for_tsquery_usage(migratorContext *ctx,
Cluster whichCluster);
void old_8_3_check_for_isn_and_int8_passing_mismatch(migratorContext *ctx,
Cluster whichCluster);
void old_8_3_rebuild_tsvector_tables(migratorContext *ctx,
bool check_mode, Cluster whichCluster);
void old_8_3_invalidate_hash_gin_indexes(migratorContext *ctx,
bool check_mode, Cluster whichCluster);
void old_8_3_invalidate_bpchar_pattern_ops_indexes(migratorContext *ctx,
bool check_mode, Cluster whichCluster);
char *old_8_3_create_sequence_script(migratorContext *ctx,
Cluster whichCluster);
/*
* pg_upgrade_sysoids.c
*
* server-side functions to set backend global variables
* to control oid and relfilenode assignment
*/
#include "postgres.h"
#include "fmgr.h"
#include "catalog/dependency.h"
#include "catalog/pg_class.h"
/* THIS IS USED ONLY FOR PG >= 9.0 */
/*
* Cannot include "catalog/pg_enum.h" here because we might
* not be compiling against PG 9.0.
*/
extern void EnumValuesCreate(Oid enumTypeOid, List *vals,
Oid binary_upgrade_next_pg_enum_oid);
#ifdef PG_MODULE_MAGIC
PG_MODULE_MAGIC;
#endif
extern PGDLLIMPORT Oid binary_upgrade_next_pg_type_oid;
extern PGDLLIMPORT Oid binary_upgrade_next_pg_type_array_oid;
extern PGDLLIMPORT Oid binary_upgrade_next_pg_type_toast_oid;
extern PGDLLIMPORT Oid binary_upgrade_next_heap_relfilenode;
extern PGDLLIMPORT Oid binary_upgrade_next_toast_relfilenode;
extern PGDLLIMPORT Oid binary_upgrade_next_index_relfilenode;
Datum set_next_pg_type_oid(PG_FUNCTION_ARGS);
Datum set_next_pg_type_array_oid(PG_FUNCTION_ARGS);
Datum set_next_pg_type_toast_oid(PG_FUNCTION_ARGS);
Datum set_next_heap_relfilenode(PG_FUNCTION_ARGS);
Datum set_next_toast_relfilenode(PG_FUNCTION_ARGS);
Datum set_next_index_relfilenode(PG_FUNCTION_ARGS);
Datum add_pg_enum_label(PG_FUNCTION_ARGS);
PG_FUNCTION_INFO_V1(set_next_pg_type_oid);
PG_FUNCTION_INFO_V1(set_next_pg_type_array_oid);
PG_FUNCTION_INFO_V1(set_next_pg_type_toast_oid);
PG_FUNCTION_INFO_V1(set_next_heap_relfilenode);
PG_FUNCTION_INFO_V1(set_next_toast_relfilenode);
PG_FUNCTION_INFO_V1(set_next_index_relfilenode);
PG_FUNCTION_INFO_V1(add_pg_enum_label);
Datum
set_next_pg_type_oid(PG_FUNCTION_ARGS)
{
Oid typoid = PG_GETARG_OID(0);
binary_upgrade_next_pg_type_oid = typoid;
PG_RETURN_VOID();
}
Datum
set_next_pg_type_array_oid(PG_FUNCTION_ARGS)
{
Oid typoid = PG_GETARG_OID(0);
binary_upgrade_next_pg_type_array_oid = typoid;
PG_RETURN_VOID();
}
Datum
set_next_pg_type_toast_oid(PG_FUNCTION_ARGS)
{
Oid typoid = PG_GETARG_OID(0);
binary_upgrade_next_pg_type_toast_oid = typoid;
PG_RETURN_VOID();
}
Datum
set_next_heap_relfilenode(PG_FUNCTION_ARGS)
{
Oid relfilenode = PG_GETARG_OID(0);
binary_upgrade_next_heap_relfilenode = relfilenode;
PG_RETURN_VOID();
}
Datum
set_next_toast_relfilenode(PG_FUNCTION_ARGS)
{
Oid relfilenode = PG_GETARG_OID(0);
binary_upgrade_next_toast_relfilenode = relfilenode;
PG_RETURN_VOID();
}
Datum
set_next_index_relfilenode(PG_FUNCTION_ARGS)
{
Oid relfilenode = PG_GETARG_OID(0);
binary_upgrade_next_index_relfilenode = relfilenode;
PG_RETURN_VOID();
}
Datum
add_pg_enum_label(PG_FUNCTION_ARGS)
{
Oid enumoid = PG_GETARG_OID(0);
Oid typoid = PG_GETARG_OID(1);
Name label = PG_GETARG_NAME(2);
EnumValuesCreate(typoid, list_make1(makeString(NameStr(*label))),
enumoid);
PG_RETURN_VOID();
}
/*
* relfilenode.c
*
* relfilenode functions
*/
#include "pg_upgrade.h"
#ifdef EDB_NATIVE_LANG
#include <fcntl.h>
#endif
#include "catalog/pg_class.h"
#include "access/transam.h"
static void transfer_single_new_db(migratorContext *ctx, pageCnvCtx *pageConverter,
FileNameMap *maps, int size);
static void transfer_relfile(migratorContext *ctx, pageCnvCtx *pageConverter,
const char *fromfile, const char *tofile,
const char *oldnspname, const char *oldrelname,
const char *newnspname, const char *newrelname);
/*
* transfer_all_new_dbs()
*
* Responsible for upgrading all database. invokes routines to generate mappings and then
* physically link the databases.
*/
const char *
transfer_all_new_dbs(migratorContext *ctx, DbInfoArr *olddb_arr,
DbInfoArr *newdb_arr, char *old_pgdata, char *new_pgdata)
{
int dbnum;
const char *msg = NULL;
prep_status(ctx, "Restoring user relation files\n");
for (dbnum = 0; dbnum < newdb_arr->ndbs; dbnum++)
{
DbInfo *new_db = &newdb_arr->dbs[dbnum];
DbInfo *old_db = dbarr_lookup_db(olddb_arr, new_db->db_name);
FileNameMap *mappings;
int n_maps;
pageCnvCtx *pageConverter = NULL;
n_maps = 0;
mappings = gen_db_file_maps(ctx, old_db, new_db, &n_maps, old_pgdata,
new_pgdata);
if (n_maps)
{
print_maps(ctx, mappings, n_maps, new_db->db_name);
#ifdef PAGE_CONVERSION
msg = setupPageConverter(ctx, &pageConverter);
#endif
transfer_single_new_db(ctx, pageConverter, mappings, n_maps);
pg_free(mappings);
}
}
prep_status(ctx, ""); /* in case nothing printed */
check_ok(ctx);
return msg;
}
/*
* get_pg_database_relfilenode()
*
* Retrieves the relfilenode for a few system-catalog tables. We need these
* relfilenodes later in the upgrade process.
*/
void
get_pg_database_relfilenode(migratorContext *ctx, Cluster whichCluster)
{
PGconn *conn = connectToServer(ctx, "template1", whichCluster);
PGresult *res;
int i_relfile;
res = executeQueryOrDie(ctx, conn,
"SELECT c.relname, c.relfilenode "
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_namespace n "
"WHERE c.relnamespace = n.oid AND "
" n.nspname = 'pg_catalog' AND "
" c.relname = 'pg_database' "
"ORDER BY c.relname");
i_relfile = PQfnumber(res, "relfilenode");
if (whichCluster == CLUSTER_OLD)
ctx->old.pg_database_oid = atol(PQgetvalue(res, 0, i_relfile));
else
ctx->new.pg_database_oid = atol(PQgetvalue(res, 0, i_relfile));
PQclear(res);
PQfinish(conn);
}
/*
* transfer_single_new_db()
*
* create links for mappings stored in "maps" array.
*/
static void
transfer_single_new_db(migratorContext *ctx, pageCnvCtx *pageConverter,
FileNameMap *maps, int size)
{
int mapnum;
for (mapnum = 0; mapnum < size; mapnum++)
{
char old_file[MAXPGPATH];
char new_file[MAXPGPATH];
struct dirent **namelist = NULL;
int numFiles;
/* Copying files might take some time, so give feedback. */
snprintf(old_file, sizeof(old_file), "%s/%u", maps[mapnum].old_file, maps[mapnum].old);
snprintf(new_file, sizeof(new_file), "%s/%u", maps[mapnum].new_file, maps[mapnum].new);
pg_log(ctx, PG_REPORT, OVERWRITE_MESSAGE, old_file);
/*
* Copy/link the relation file to the new cluster
*/
unlink(new_file);
transfer_relfile(ctx, pageConverter, old_file, new_file,
maps[mapnum].old_nspname, maps[mapnum].old_relname,
maps[mapnum].new_nspname, maps[mapnum].new_relname);
/* fsm/vm files added in PG 8.4 */
if (GET_MAJOR_VERSION(ctx->old.major_version) >= 804)
{
/*
* Now copy/link any fsm and vm files, if they exist
*/
snprintf(scandir_file_pattern, sizeof(scandir_file_pattern), "%u_", maps[mapnum].old);
numFiles = pg_scandir(ctx, maps[mapnum].old_file, &namelist, dir_matching_filenames, NULL);
while (numFiles--)
{
snprintf(old_file, sizeof(old_file), "%s/%s", maps[mapnum].old_file,
namelist[numFiles]->d_name);
snprintf(new_file, sizeof(new_file), "%s/%u%s", maps[mapnum].new_file,
maps[mapnum].new, strchr(namelist[numFiles]->d_name, '_'));
unlink(new_file);
transfer_relfile(ctx, pageConverter, old_file, new_file,
maps[mapnum].old_nspname, maps[mapnum].old_relname,
maps[mapnum].new_nspname, maps[mapnum].new_relname);
pg_free(namelist[numFiles]);
}
pg_free(namelist);
}
/*
* Now copy/link any related segments as well. Remember, PG breaks
* large files into 1GB segments, the first segment has no extension,
* subsequent segments are named relfilenode.1, relfilenode.2,
* relfilenode.3, ... 'fsm' and 'vm' files use underscores so are not
* copied.
*/
snprintf(scandir_file_pattern, sizeof(scandir_file_pattern), "%u.", maps[mapnum].old);
numFiles = pg_scandir(ctx, maps[mapnum].old_file, &namelist, dir_matching_filenames, NULL);
while (numFiles--)
{
snprintf(old_file, sizeof(old_file), "%s/%s", maps[mapnum].old_file,
namelist[numFiles]->d_name);
snprintf(new_file, sizeof(new_file), "%s/%u%s", maps[mapnum].new_file,
maps[mapnum].new, strchr(namelist[numFiles]->d_name, '.'));
unlink(new_file);
transfer_relfile(ctx, pageConverter, old_file, new_file,
maps[mapnum].old_nspname, maps[mapnum].old_relname,
maps[mapnum].new_nspname, maps[mapnum].new_relname);
pg_free(namelist[numFiles]);
}
pg_free(namelist);
}
}
/*
* transfer_relfile()
*
* Copy or link file from old cluster to new one.
*/
static void
transfer_relfile(migratorContext *ctx, pageCnvCtx *pageConverter, const char *oldfile,
const char *newfile, const char *oldnspname, const char *oldrelname,
const char *newnspname, const char *newrelname)
{
const char *msg;
if ((ctx->transfer_mode == TRANSFER_MODE_LINK) && (pageConverter != NULL))
pg_log(ctx, PG_FATAL, "this migration requires page-by-page conversion, "
"you must use copy-mode instead of link-mode\n");
if (ctx->transfer_mode == TRANSFER_MODE_COPY)
{
pg_log(ctx, PG_INFO, "copying %s to %s\n", oldfile, newfile);
if ((msg = copyAndUpdateFile(ctx, pageConverter, oldfile, newfile, true)) != NULL)
pg_log(ctx, PG_FATAL, "error while copying %s.%s(%s) to %s.%s(%s): %s\n",
oldnspname, oldrelname, oldfile, newnspname, newrelname, newfile, msg);
}
else
{
pg_log(ctx, PG_INFO, "linking %s to %s\n", newfile, oldfile);
if ((msg = linkAndUpdateFile(ctx, pageConverter, oldfile, newfile)) != NULL)
pg_log(ctx, PG_FATAL,
"error while creating link from %s.%s(%s) to %s.%s(%s): %s\n",
oldnspname, oldrelname, oldfile, newnspname, newrelname,
newfile, msg);
}
return;
}
/*
* server.c
*
* database server functions
*/
#include "pg_upgrade.h"
#define POSTMASTER_UPTIME 20
#define STARTUP_WARNING_TRIES 2
static pgpid_t get_postmaster_pid(migratorContext *ctx, const char *datadir);
static bool test_server_conn(migratorContext *ctx, int timeout,
Cluster whichCluster);
/*
* connectToServer()
*
* Connects to the desired database on the designated server.
* If the connection attempt fails, this function logs an error
* message and calls exit_nicely() to kill the program.
*/
PGconn *
connectToServer(migratorContext *ctx, const char *db_name,
Cluster whichCluster)
{
char connectString[MAXPGPATH];
unsigned short port = (whichCluster == CLUSTER_OLD) ?
ctx->old.port : ctx->new.port;
PGconn *conn;
snprintf(connectString, sizeof(connectString),
"dbname = '%s' user = '%s' port = %d", db_name, ctx->user, port);
conn = PQconnectdb(connectString);
if (conn == NULL || PQstatus(conn) != CONNECTION_OK)
{
pg_log(ctx, PG_REPORT, "Connection to database failed: %s\n",
PQerrorMessage(conn));
if (conn)
PQfinish(conn);
exit_nicely(ctx, true);
}
return conn;
}
/*
* executeQueryOrDie()
*
* Formats a query string from the given arguments and executes the
* resulting query. If the query fails, this function logs an error
* message and calls exit_nicely() to kill the program.
*/
PGresult *
executeQueryOrDie(migratorContext *ctx, PGconn *conn, const char *fmt,...)
{
static char command[8192];
va_list args;
PGresult *result;
ExecStatusType status;
va_start(args, fmt);
vsnprintf(command, sizeof(command), fmt, args);
va_end(args);
pg_log(ctx, PG_DEBUG, "executing: %s\n", command);
result = PQexec(conn, command);
status = PQresultStatus(result);
if ((status != PGRES_TUPLES_OK) && (status != PGRES_COMMAND_OK))
{
pg_log(ctx, PG_REPORT, "DB command failed\n%s\n%s\n", command,
PQerrorMessage(conn));
PQclear(result);
PQfinish(conn);
exit_nicely(ctx, true);
return NULL; /* Never get here, but keeps compiler happy */
}
else
return result;
}
/*
* get_postmaster_pid()
*
* Returns the pid of the postmaster running on datadir. pid is retrieved
* from the postmaster.pid file
*/
static pgpid_t
get_postmaster_pid(migratorContext *ctx, const char *datadir)
{
FILE *pidf;
long pid;
char pid_file[MAXPGPATH];
snprintf(pid_file, sizeof(pid_file), "%s/postmaster.pid", datadir);
pidf = fopen(pid_file, "r");
if (pidf == NULL)
return (pgpid_t) 0;
if (fscanf(pidf, "%ld", &pid) != 1)
{
fclose(pidf);
pg_log(ctx, PG_FATAL, "%s: invalid data in PID file \"%s\"\n",
ctx->progname, pid_file);
}
fclose(pidf);
return (pgpid_t) pid;
}
/*
* get_major_server_version()
*
* gets the version (in unsigned int form) for the given "datadir". Assumes
* that datadir is an absolute path to a valid pgdata directory. The version
* is retrieved by reading the PG_VERSION file.
*/
uint32
get_major_server_version(migratorContext *ctx, char **verstr, Cluster whichCluster)
{
const char *datadir = whichCluster == CLUSTER_OLD ?
ctx->old.pgdata : ctx->new.pgdata;
FILE *version_fd;
char ver_file[MAXPGPATH];
int integer_version = 0;
int fractional_version = 0;
*verstr = pg_malloc(ctx, 64);
snprintf(ver_file, sizeof(ver_file), "%s/PG_VERSION", datadir);
if ((version_fd = fopen(ver_file, "r")) == NULL)
return 0;
if (fscanf(version_fd, "%63s", *verstr) == 0 ||
sscanf(*verstr, "%d.%d", &integer_version, &fractional_version) != 2)
{
pg_log(ctx, PG_FATAL, "could not get version from %s\n", datadir);
fclose(version_fd);
return 0;
}
return (100 * integer_version + fractional_version) * 100;
}
void
start_postmaster(migratorContext *ctx, Cluster whichCluster, bool quiet)
{
char cmd[MAXPGPATH];
const char *bindir;
const char *datadir;
unsigned short port;
if (whichCluster == CLUSTER_OLD)
{
bindir = ctx->old.bindir;
datadir = ctx->old.pgdata;
port = ctx->old.port;
}
else
{
bindir = ctx->new.bindir;
datadir = ctx->new.pgdata;
port = ctx->new.port;
}
/* use -l for Win32 */
sprintf(cmd, SYSTEMQUOTE "\"%s/pg_ctl\" -l \"%s\" -D \"%s\" "
"-o \"-p %d -c autovacuum=off -c autovacuum_freeze_max_age=2000000000\" "
"start >> \"%s\" 2>&1" SYSTEMQUOTE,
bindir, ctx->logfile, datadir, port, ctx->logfile);
exec_prog(ctx, true, "%s", cmd);
/* wait for the server to start properly */
if (test_server_conn(ctx, POSTMASTER_UPTIME, whichCluster) == false)
pg_log(ctx, PG_FATAL, " Unable to start %s postmaster with the command: %s\nPerhaps pg_hba.conf was not set to \"trust\".",
CLUSTERNAME(whichCluster), cmd);
if ((ctx->postmasterPID = get_postmaster_pid(ctx, datadir)) == 0)
pg_log(ctx, PG_FATAL, " Unable to get postmaster pid\n");
ctx->running_cluster = whichCluster;
}
void
stop_postmaster(migratorContext *ctx, bool fast, bool quiet)
{
const char *bindir;
const char *datadir;
if (ctx->running_cluster == CLUSTER_OLD)
{
bindir = ctx->old.bindir;
datadir = ctx->old.pgdata;
}
else if (ctx->running_cluster == CLUSTER_NEW)
{
bindir = ctx->new.bindir;
datadir = ctx->new.pgdata;
}
else
return; /* no cluster running */
/* use -l for Win32 */
exec_prog(ctx, fast ? false : true,
SYSTEMQUOTE "\"%s/pg_ctl\" -l \"%s\" -D \"%s\" %s stop >> \"%s\" 2>&1" SYSTEMQUOTE,
bindir, ctx->logfile, datadir, fast ? "-m fast" : "", ctx->logfile);
ctx->postmasterPID = 0;
ctx->running_cluster = NONE;
}
/*
* test_server_conn()
*
* tests whether postmaster is running or not by trying to connect
* to it. If connection is unsuccessfull we do a sleep of 1 sec and then
* try the connection again. This process continues "timeout" times.
*
* Returns true if the connection attempt was successfull, false otherwise.
*/
static bool
test_server_conn(migratorContext *ctx, int timeout, Cluster whichCluster)
{
PGconn *conn = NULL;
char con_opts[MAX_STRING];
int tries;
unsigned short port = (whichCluster == CLUSTER_OLD) ?
ctx->old.port : ctx->new.port;
bool ret = false;
snprintf(con_opts, sizeof(con_opts),
"dbname = 'template1' user = '%s' port = %d ", ctx->user, port);
for (tries = 0; tries < timeout; tries++)
{
sleep(1);
if ((conn = PQconnectdb(con_opts)) != NULL &&
PQstatus(conn) == CONNECTION_OK)
{
PQfinish(conn);
ret = true;
break;
}
if (tries == STARTUP_WARNING_TRIES)
prep_status(ctx, "Trying to start %s server ",
CLUSTERNAME(whichCluster));
else if (tries > STARTUP_WARNING_TRIES)
pg_log(ctx, PG_REPORT, ".");
}
if (tries > STARTUP_WARNING_TRIES)
check_ok(ctx);
return ret;
}
/*
* check_for_libpq_envvars()
*
* tests whether any libpq environment variables are set.
* Since pg_upgrade connects to both the old and the new server,
* it is potentially dangerous to have any of these set.
*
* If any are found, will log them and cancel.
*/
void
check_for_libpq_envvars(migratorContext *ctx)
{
PQconninfoOption *option;
PQconninfoOption *start;
bool found = false;
/* Get valid libpq env vars from the PQconndefaults function */
start = option = PQconndefaults();
while (option->keyword != NULL)
{
const char *value;
if (option->envvar && (value = getenv(option->envvar)) && strlen(value) > 0)
{
found = true;
pg_log(ctx, PG_WARNING,
"libpq env var %-20s is currently set to: %s\n", option->envvar, value);
}
option++;
}
/* Free the memory that libpq allocated on our behalf */
PQconninfoFree(start);
if (found)
pg_log(ctx, PG_FATAL,
"libpq env vars have been found and listed above, please unset them for pg_upgrade\n");
}
/*
* tablespace.c
*
* tablespace functions
*/
#include "pg_upgrade.h"
static void get_tablespace_paths(migratorContext *ctx);
static void set_tablespace_directory_suffix(migratorContext *ctx,
Cluster whichCluster);
void
init_tablespaces(migratorContext *ctx)
{
get_tablespace_paths(ctx);
set_tablespace_directory_suffix(ctx, CLUSTER_OLD);
set_tablespace_directory_suffix(ctx, CLUSTER_NEW);
if (ctx->num_tablespaces > 0 &&
strcmp(ctx->old.tablespace_suffix, ctx->new.tablespace_suffix) == 0)
pg_log(ctx, PG_FATAL,
"Cannot migrate to/from the same system catalog version when\n"
"using tablespaces.\n");
}
/*
* get_tablespace_paths()
*
* Scans pg_tablespace and returns a malloc'ed array of all tablespace
* paths. Its the caller's responsibility to free the array.
*/
static void
get_tablespace_paths(migratorContext *ctx)
{
PGconn *conn = connectToServer(ctx, "template1", CLUSTER_OLD);
PGresult *res;
int ntups;
int tblnum;
int i_spclocation;
res = executeQueryOrDie(ctx, conn,
"SELECT spclocation "
"FROM pg_catalog.pg_tablespace "
"WHERE spcname != 'pg_default' AND "
" spcname != 'pg_global'");
ctx->num_tablespaces = ntups = PQntuples(res);
ctx->tablespaces = (char **) pg_malloc(ctx, ntups * sizeof(char *));
i_spclocation = PQfnumber(res, "spclocation");
for (tblnum = 0; tblnum < ntups; tblnum++)
ctx->tablespaces[tblnum] = pg_strdup(ctx,
PQgetvalue(res, tblnum, i_spclocation));
PQclear(res);
PQfinish(conn);
return;
}
static void
set_tablespace_directory_suffix(migratorContext *ctx, Cluster whichCluster)
{
ClusterInfo *cluster = (whichCluster == CLUSTER_OLD) ? &ctx->old : &ctx->new;
if (GET_MAJOR_VERSION(cluster->major_version) <= 804)
cluster->tablespace_suffix = pg_strdup(ctx, "");
else
{
/* This cluster has a version-specific subdirectory */
cluster->tablespace_suffix = pg_malloc(ctx, 4 + strlen(cluster->major_version_str) +
10 /* OIDCHARS */ + 1);
/* The leading slash is needed to start a new directory. */
sprintf(cluster->tablespace_suffix, "/PG_%s_%d", cluster->major_version_str,
cluster->controldata.cat_ver);
}
}
/*
* util.c
*
* utility functions
*/
#include "pg_upgrade.h"
#include <signal.h>
/*
* report_status()
*
* Displays the result of an operation (ok, failed, error message,...)
*/
void
report_status(migratorContext *ctx, eLogType type, const char *fmt,...)
{
va_list args;
char message[MAX_STRING];
va_start(args, fmt);
vsnprintf(message, sizeof(message), fmt, args);
va_end(args);
pg_log(ctx, type, "%s\n", message);
}
/*
* prep_status(&ctx, )
*
* Displays a message that describes an operation we are about to begin.
* We pad the message out to MESSAGE_WIDTH characters so that all of the "ok" and
* "failed" indicators line up nicely.
*
* A typical sequence would look like this:
* prep_status(&ctx, "about to flarb the next %d files", fileCount );
*
* if(( message = flarbFiles(fileCount)) == NULL)
* report_status(ctx, PG_REPORT, "ok" );
* else
* pg_log(ctx, PG_FATAL, "failed - %s", message );
*/
void
prep_status(migratorContext *ctx, const char *fmt,...)
{
va_list args;
char message[MAX_STRING];
va_start(args, fmt);
vsnprintf(message, sizeof(message), fmt, args);
va_end(args);
if (strlen(message) > 0 && message[strlen(message) - 1] == '\n')
pg_log(ctx, PG_REPORT, "%s", message);
else
pg_log(ctx, PG_REPORT, "%-" MESSAGE_WIDTH "s", message);
}
void
pg_log(migratorContext *ctx, eLogType type, char *fmt,...)
{
va_list args;
char message[MAX_STRING];
va_start(args, fmt);
vsnprintf(message, sizeof(message), fmt, args);
va_end(args);
if (ctx->log_fd != NULL)
{
fwrite(message, strlen(message), 1, ctx->log_fd);
/* if we are using OVERWRITE_MESSAGE, add newline */
if (strchr(message, '\r') != NULL)
fwrite("\n", 1, 1, ctx->log_fd);
fflush(ctx->log_fd);
}
switch (type)
{
case PG_INFO:
if (ctx->verbose)
printf("%s", _(message));
break;
case PG_REPORT:
case PG_WARNING:
printf("%s", _(message));
break;
case PG_FATAL:
printf("%s", "\n");
printf("%s", _(message));
exit_nicely(ctx, true);
break;
case PG_DEBUG:
if (ctx->debug)
fprintf(ctx->debug_fd, "%s\n", _(message));
break;
default:
break;
}
fflush(stdout);
}
void
check_ok(migratorContext *ctx)
{
/* all seems well */
report_status(ctx, PG_REPORT, "ok");
fflush(stdout);
}
/*
* quote_identifier()
* Properly double-quote a SQL identifier.
*
* The result should be pg_free'd, but most callers don't bother because
* memory leakage is not a big deal in this program.
*/
char *
quote_identifier(migratorContext *ctx, const char *s)
{
char *result = pg_malloc(ctx, strlen(s) * 2 + 3);
char *r = result;
*r++ = '"';
while (*s)
{
if (*s == '"')
*r++ = *s;
*r++ = *s;
s++;
}
*r++ = '"';
*r++ = '\0';
return result;
}
/*
* get_user_info()
* (copied from initdb.c) find the current user
*/
int
get_user_info(migratorContext *ctx, char **user_name)
{
int user_id;
#ifndef WIN32
struct passwd *pw = getpwuid(geteuid());
user_id = geteuid();
#else /* the windows code */
struct passwd_win32
{
int pw_uid;
char pw_name[128];
} pass_win32;
struct passwd_win32 *pw = &pass_win32;
DWORD pwname_size = sizeof(pass_win32.pw_name) - 1;
GetUserName(pw->pw_name, &pwname_size);
user_id = 1;
#endif
*user_name = pg_strdup(ctx, pw->pw_name);
return user_id;
}
void
exit_nicely(migratorContext *ctx, bool need_cleanup)
{
stop_postmaster(ctx, true, true);
pg_free(ctx->logfile);
if (ctx->log_fd)
fclose(ctx->log_fd);
if (ctx->debug_fd)
fclose(ctx->debug_fd);
/* terminate any running instance of postmaster */
if (ctx->postmasterPID != 0)
kill(ctx->postmasterPID, SIGTERM);
if (need_cleanup)
{
/*
* FIXME must delete intermediate files
*/
exit(1);
}
else
exit(0);
}
void *
pg_malloc(migratorContext *ctx, int n)
{
void *p = malloc(n);
if (p == NULL)
pg_log(ctx, PG_FATAL, "%s: out of memory\n", ctx->progname);
return p;
}
void
pg_free(void *p)
{
if (p != NULL)
free(p);
}
char *
pg_strdup(migratorContext *ctx, const char *s)
{
char *result = strdup(s);
if (result == NULL)
pg_log(ctx, PG_FATAL, "%s: out of memory\n", ctx->progname);
return result;
}
/*
* getErrorText()
*
* Returns the text of the error message for the given error number
*
* This feature is factored into a separate function because it is
* system-dependent.
*/
const char *
getErrorText(int errNum)
{
#ifdef WIN32
_dosmaperr(GetLastError());
#endif
return strdup(strerror(errNum));
}
/*
* version.c
*
* Postgres-version-specific routines
*/
#include "pg_upgrade.h"
#include "access/transam.h"
/*
* new_9_0_populate_pg_largeobject_metadata()
* new >= 9.0, old <= 8.4
* 9.0 has a new pg_largeobject permission table
*/
void
new_9_0_populate_pg_largeobject_metadata(migratorContext *ctx, bool check_mode,
Cluster whichCluster)
{
ClusterInfo *active_cluster = (whichCluster == CLUSTER_OLD) ?
&ctx->old : &ctx->new;
int dbnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status(ctx, "Checking for large objects");
snprintf(output_path, sizeof(output_path), "%s/pg_largeobject.sql",
ctx->output_dir);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
PGresult *res;
int i_count;
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, whichCluster);
/* find if there are any large objects */
res = executeQueryOrDie(ctx, conn,
"SELECT count(*) "
"FROM pg_catalog.pg_largeobject ");
i_count = PQfnumber(res, "count");
if (atoi(PQgetvalue(res, 0, i_count)) != 0)
{
found = true;
if (!check_mode)
{
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", output_path);
fprintf(script, "\\connect %s\n",
quote_identifier(ctx, active_db->db_name));
fprintf(script,
"SELECT pg_catalog.lo_create(t.loid)\n"
"FROM (SELECT DISTINCT loid FROM pg_catalog.pg_largeobject) AS t;\n");
}
}
PQclear(res);
PQfinish(conn);
}
if (found)
{
if (!check_mode)
fclose(script);
report_status(ctx, PG_WARNING, "warning");
if (check_mode)
pg_log(ctx, PG_WARNING, "\n"
"| Your installation contains large objects.\n"
"| The new database has an additional large object\n"
"| permission table. After migration, you will be\n"
"| given a command to populate the pg_largeobject\n"
"| permission table with default permissions.\n\n");
else
pg_log(ctx, PG_WARNING, "\n"
"| Your installation contains large objects.\n"
"| The new database has an additional large object\n"
"| permission table so default permissions must be\n"
"| defined for all large objects. The file:\n"
"| \t%s\n"
"| when executed by psql by the database super-user\n"
"| will define the default permissions.\n\n",
output_path);
}
else
check_ok(ctx);
}
/*
* version.c
*
* Postgres-version-specific routines
*/
#include "pg_upgrade.h"
#include "access/transam.h"
/*
* old_8_3_check_for_name_data_type_usage()
* 8.3 -> 8.4
* Alignment for the 'name' data type changed to 'char' in 8.4;
* checks tables and indexes.
*/
void
old_8_3_check_for_name_data_type_usage(migratorContext *ctx, Cluster whichCluster)
{
ClusterInfo *active_cluster = (whichCluster == CLUSTER_OLD) ?
&ctx->old : &ctx->new;
int dbnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status(ctx, "Checking for invalid 'name' user columns");
snprintf(output_path, sizeof(output_path), "%s/tables_using_name.txt",
ctx->output_dir);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
PGresult *res;
bool db_used = false;
int ntups;
int rowno;
int i_nspname,
i_relname,
i_attname;
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, whichCluster);
/*
* With a smaller alignment in 8.4, 'name' cannot be used in a
* non-pg_catalog table, except as the first column. (We could tighten
* that condition with enough analysis, but it seems not worth the
* trouble.)
*/
res = executeQueryOrDie(ctx, conn,
"SELECT n.nspname, c.relname, a.attname "
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_namespace n, "
" pg_catalog.pg_attribute a "
"WHERE c.oid = a.attrelid AND "
" a.attnum > 1 AND "
" NOT a.attisdropped AND "
" a.atttypid = 'pg_catalog.name'::pg_catalog.regtype AND "
" c.relnamespace = n.oid AND "
" n.nspname != 'pg_catalog' AND "
" n.nspname != 'information_schema'");
ntups = PQntuples(res);
i_nspname = PQfnumber(res, "nspname");
i_relname = PQfnumber(res, "relname");
i_attname = PQfnumber(res, "attname");
for (rowno = 0; rowno < ntups; rowno++)
{
found = true;
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", output_path);
if (!db_used)
{
fprintf(script, "Database: %s\n", active_db->db_name);
db_used = true;
}
fprintf(script, " %s.%s.%s\n",
PQgetvalue(res, rowno, i_nspname),
PQgetvalue(res, rowno, i_relname),
PQgetvalue(res, rowno, i_attname));
}
PQclear(res);
PQfinish(conn);
}
if (found)
{
fclose(script);
pg_log(ctx, PG_REPORT, "fatal\n");
pg_log(ctx, PG_FATAL,
"| Your installation uses the \"name\" data type in\n"
"| user tables. This data type changed its internal\n"
"| alignment between your old and new clusters so this\n"
"| cluster cannot currently be upgraded. You can\n"
"| remove the problem tables and restart the migration.\n"
"| A list of the problem columns is in the file:\n"
"| \t%s\n\n", output_path);
}
else
check_ok(ctx);
}
/*
* old_8_3_check_for_tsquery_usage()
* 8.3 -> 8.4
* A new 'prefix' field was added to the 'tsquery' data type in 8.4
* so migration of such fields is impossible.
*/
void
old_8_3_check_for_tsquery_usage(migratorContext *ctx, Cluster whichCluster)
{
ClusterInfo *active_cluster = (whichCluster == CLUSTER_OLD) ?
&ctx->old : &ctx->new;
int dbnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status(ctx, "Checking for tsquery user columns");
snprintf(output_path, sizeof(output_path), "%s/tables_using_tsquery.txt",
ctx->output_dir);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
PGresult *res;
bool db_used = false;
int ntups;
int rowno;
int i_nspname,
i_relname,
i_attname;
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, whichCluster);
/* Find any user-defined tsquery columns */
res = executeQueryOrDie(ctx, conn,
"SELECT n.nspname, c.relname, a.attname "
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_namespace n, "
" pg_catalog.pg_attribute a "
"WHERE c.relkind = 'r' AND "
" c.oid = a.attrelid AND "
" NOT a.attisdropped AND "
" a.atttypid = 'pg_catalog.tsquery'::pg_catalog.regtype AND "
" c.relnamespace = n.oid AND "
" n.nspname != 'pg_catalog' AND "
" n.nspname != 'information_schema'");
ntups = PQntuples(res);
i_nspname = PQfnumber(res, "nspname");
i_relname = PQfnumber(res, "relname");
i_attname = PQfnumber(res, "attname");
for (rowno = 0; rowno < ntups; rowno++)
{
found = true;
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", output_path);
if (!db_used)
{
fprintf(script, "Database: %s\n", active_db->db_name);
db_used = true;
}
fprintf(script, " %s.%s.%s\n",
PQgetvalue(res, rowno, i_nspname),
PQgetvalue(res, rowno, i_relname),
PQgetvalue(res, rowno, i_attname));
}
PQclear(res);
PQfinish(conn);
}
if (found)
{
fclose(script);
pg_log(ctx, PG_REPORT, "fatal\n");
pg_log(ctx, PG_FATAL,
"| Your installation uses the \"tsquery\" data type.\n"
"| This data type added a new internal field between\n"
"| your old and new clusters so this cluster cannot\n"
"| currently be upgraded. You can remove the problem\n"
"| columns and restart the migration. A list of the\n"
"| problem columns is in the file:\n"
"| \t%s\n\n", output_path);
}
else
check_ok(ctx);
}
/*
* old_8_3_check_for_isn_and_int8_passing_mismatch()
* 8.3 -> 8.4
* /contrib/isn relies on data type int8, and in 8.4 int8 is now passed
* by value. The schema dumps the CREATE TYPE PASSEDBYVALUE setting so
* it must match for the old and new servers.
*/
void
old_8_3_check_for_isn_and_int8_passing_mismatch(migratorContext *ctx, Cluster whichCluster)
{
ClusterInfo *active_cluster = (whichCluster == CLUSTER_OLD) ?
&ctx->old : &ctx->new;
int dbnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status(ctx, "Checking for /contrib/isn with bigint-passing mismatch");
if (ctx->old.controldata.float8_pass_by_value ==
ctx->new.controldata.float8_pass_by_value)
{
/* no mismatch */
check_ok(ctx);
return;
}
snprintf(output_path, sizeof(output_path), "%s/contrib_isn_and_int8_pass_by_value.txt",
ctx->output_dir);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
PGresult *res;
bool db_used = false;
int ntups;
int rowno;
int i_nspname,
i_proname;
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, whichCluster);
/* Find any functions coming from contrib/isn */
res = executeQueryOrDie(ctx, conn,
"SELECT n.nspname, p.proname "
"FROM pg_catalog.pg_proc p, "
" pg_catalog.pg_namespace n "
"WHERE p.pronamespace = n.oid AND "
" p.probin = '$libdir/isn'");
ntups = PQntuples(res);
i_nspname = PQfnumber(res, "nspname");
i_proname = PQfnumber(res, "proname");
for (rowno = 0; rowno < ntups; rowno++)
{
found = true;
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", output_path);
if (!db_used)
{
fprintf(script, "Database: %s\n", active_db->db_name);
db_used = true;
}
fprintf(script, " %s.%s\n",
PQgetvalue(res, rowno, i_nspname),
PQgetvalue(res, rowno, i_proname));
}
PQclear(res);
PQfinish(conn);
}
if (found)
{
fclose(script);
pg_log(ctx, PG_REPORT, "fatal\n");
pg_log(ctx, PG_FATAL,
"| Your installation uses \"/contrib/isn\" functions\n"
"| which rely on the bigint data type. Your old and\n"
"| new clusters pass bigint values differently so this\n"
"| cluster cannot currently be upgraded. You can\n"
"| manually migrate data that use \"/contrib/isn\"\n"
"| facilities and remove \"/contrib/isn\" from the\n"
"| old cluster and restart the migration. A list\n"
"| of the problem functions is in the file:\n"
"| \t%s\n\n", output_path);
}
else
check_ok(ctx);
}
/*
* old_8_3_rebuild_tsvector_tables()
* 8.3 -> 8.4
* 8.3 sorts lexemes by its length and if lengths are the same then it uses
* alphabetic order; 8.4 sorts lexemes in lexicographical order, e.g.
*
* => SELECT 'c bb aaa'::tsvector;
* tsvector
* ----------------
* 'aaa' 'bb' 'c' -- 8.4
* 'c' 'bb' 'aaa' -- 8.3
*/
void
old_8_3_rebuild_tsvector_tables(migratorContext *ctx, bool check_mode,
Cluster whichCluster)
{
ClusterInfo *active_cluster = (whichCluster == CLUSTER_OLD) ?
&ctx->old : &ctx->new;
int dbnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status(ctx, "Checking for tsvector user columns");
snprintf(output_path, sizeof(output_path), "%s/rebuild_tsvector_tables.sql",
ctx->output_dir);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
PGresult *res;
bool db_used = false;
char old_nspname[NAMEDATASIZE] = "",
old_relname[NAMEDATASIZE] = "";
int ntups;
int rowno;
int i_nspname,
i_relname,
i_attname;
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, whichCluster);
/* Find any user-defined tsvector columns */
res = executeQueryOrDie(ctx, conn,
"SELECT n.nspname, c.relname, a.attname "
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_namespace n, "
" pg_catalog.pg_attribute a "
"WHERE c.relkind = 'r' AND "
" c.oid = a.attrelid AND "
" NOT a.attisdropped AND "
" a.atttypid = 'pg_catalog.tsvector'::pg_catalog.regtype AND "
" c.relnamespace = n.oid AND "
" n.nspname != 'pg_catalog' AND "
" n.nspname != 'information_schema'");
/*
* This macro is used below to avoid reindexing indexes already rebuilt
* because of tsvector columns.
*/
#define SKIP_TSVECTOR_TABLES \
"i.indrelid NOT IN ( " \
"SELECT DISTINCT c.oid " \
"FROM pg_catalog.pg_class c, " \
" pg_catalog.pg_namespace n, " \
" pg_catalog.pg_attribute a " \
"WHERE c.relkind = 'r' AND " \
" c.oid = a.attrelid AND " \
" NOT a.attisdropped AND " \
" a.atttypid = 'pg_catalog.tsvector'::pg_catalog.regtype AND " \
" c.relnamespace = n.oid AND " \
" n.nspname != 'pg_catalog' AND " \
" n.nspname != 'information_schema') "
ntups = PQntuples(res);
i_nspname = PQfnumber(res, "nspname");
i_relname = PQfnumber(res, "relname");
i_attname = PQfnumber(res, "attname");
for (rowno = 0; rowno < ntups; rowno++)
{
found = true;
if (!check_mode)
{
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", output_path);
if (!db_used)
{
fprintf(script, "\\connect %s\n\n",
quote_identifier(ctx, active_db->db_name));
db_used = true;
}
/* Rebuild all tsvector collumns with one ALTER TABLE command */
if (strcmp(PQgetvalue(res, rowno, i_nspname), old_nspname) != 0 ||
strcmp(PQgetvalue(res, rowno, i_relname), old_relname) != 0)
{
if (strlen(old_nspname) != 0 || strlen(old_relname) != 0)
fprintf(script, ";\n\n");
fprintf(script, "ALTER TABLE %s.%s\n",
quote_identifier(ctx, PQgetvalue(res, rowno, i_nspname)),
quote_identifier(ctx, PQgetvalue(res, rowno, i_relname)));
}
else
fprintf(script, ",\n");
strlcpy(old_nspname, PQgetvalue(res, rowno, i_nspname), sizeof(old_nspname));
strlcpy(old_relname, PQgetvalue(res, rowno, i_relname), sizeof(old_relname));
fprintf(script, "ALTER COLUMN %s "
/* This could have been a custom conversion function call. */
"TYPE pg_catalog.tsvector USING %s::pg_catalog.text::pg_catalog.tsvector",
quote_identifier(ctx, PQgetvalue(res, rowno, i_attname)),
quote_identifier(ctx, PQgetvalue(res, rowno, i_attname)));
}
}
if (strlen(old_nspname) != 0 || strlen(old_relname) != 0)
fprintf(script, ";\n\n");
PQclear(res);
/* XXX Mark tables as not accessable somehow */
PQfinish(conn);
}
if (found)
{
if (!check_mode)
fclose(script);
report_status(ctx, PG_WARNING, "warning");
if (check_mode)
pg_log(ctx, PG_WARNING, "\n"
"| Your installation contains tsvector columns.\n"
"| The tsvector internal storage format changed\n"
"| between your old and new clusters so the tables\n"
"| must be rebuilt. After migration, you will be\n"
"| given instructions.\n\n");
else
pg_log(ctx, PG_WARNING, "\n"
"| Your installation contains tsvector columns.\n"
"| The tsvector internal storage format changed\n"
"| between your old and new clusters so the tables\n"
"| must be rebuilt. The file:\n"
"| \t%s\n"
"| when executed by psql by the database super-user\n"
"| will rebuild all tables with tsvector columns.\n\n",
output_path);
}
else
check_ok(ctx);
}
/*
* old_8_3_invalidate_hash_gin_indexes()
* 8.3 -> 8.4
* Hash, Gin, and GiST index binary format has changes from 8.3->8.4
*/
void
old_8_3_invalidate_hash_gin_indexes(migratorContext *ctx, bool check_mode,
Cluster whichCluster)
{
ClusterInfo *active_cluster = (whichCluster == CLUSTER_OLD) ?
&ctx->old : &ctx->new;
int dbnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status(ctx, "Checking for hash and gin indexes");
snprintf(output_path, sizeof(output_path), "%s/reindex_hash_and_gin.sql",
ctx->output_dir);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
PGresult *res;
bool db_used = false;
int ntups;
int rowno;
int i_nspname,
i_relname;
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, whichCluster);
/* find hash and gin indexes */
res = executeQueryOrDie(ctx, conn,
"SELECT n.nspname, c.relname "
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_index i, "
" pg_catalog.pg_am a, "
" pg_catalog.pg_namespace n "
"WHERE i.indexrelid = c.oid AND "
" c.relam = a.oid AND "
" c.relnamespace = n.oid AND "
" a.amname IN ('hash', 'gin') AND "
SKIP_TSVECTOR_TABLES);
ntups = PQntuples(res);
i_nspname = PQfnumber(res, "nspname");
i_relname = PQfnumber(res, "relname");
for (rowno = 0; rowno < ntups; rowno++)
{
found = true;
if (!check_mode)
{
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", output_path);
if (!db_used)
{
fprintf(script, "\\connect %s\n",
quote_identifier(ctx, active_db->db_name));
db_used = true;
}
fprintf(script, "REINDEX INDEX %s.%s;\n",
quote_identifier(ctx, PQgetvalue(res, rowno, i_nspname)),
quote_identifier(ctx, PQgetvalue(res, rowno, i_relname)));
}
}
PQclear(res);
if (!check_mode && found)
/* mark hash and gin indexes as invalid */
PQclear(executeQueryOrDie(ctx, conn,
"UPDATE pg_catalog.pg_index i "
"SET indisvalid = false "
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_am a, "
" pg_catalog.pg_namespace n "
"WHERE i.indexrelid = c.oid AND "
" c.relam = a.oid AND "
" c.relnamespace = n.oid AND "
" a.amname IN ('hash', 'gin')"));
PQfinish(conn);
}
if (found)
{
if (!check_mode)
fclose(script);
report_status(ctx, PG_WARNING, "warning");
if (check_mode)
pg_log(ctx, PG_WARNING, "\n"
"| Your installation contains hash and/or gin\n"
"| indexes. These indexes have different\n"
"| internal formats between your old and new\n"
"| clusters so they must be reindexed with the\n"
"| REINDEX command. After migration, you will\n"
"| be given REINDEX instructions.\n\n");
else
pg_log(ctx, PG_WARNING, "\n"
"| Your installation contains hash and/or gin\n"
"| indexes. These indexes have different internal\n"
"| formats between your old and new clusters so\n"
"| they must be reindexed with the REINDEX command.\n"
"| The file:\n"
"| \t%s\n"
"| when executed by psql by the database super-user\n"
"| will recreate all invalid indexes; until then,\n"
"| none of these indexes will be used.\n\n",
output_path);
}
else
check_ok(ctx);
}
/*
* old_8_3_invalidate_bpchar_pattern_ops_indexes()
* 8.3 -> 8.4
* 8.4 bpchar_pattern_ops no longer sorts based on trailing spaces
*/
void
old_8_3_invalidate_bpchar_pattern_ops_indexes(migratorContext *ctx, bool check_mode,
Cluster whichCluster)
{
ClusterInfo *active_cluster = (whichCluster == CLUSTER_OLD) ?
&ctx->old : &ctx->new;
int dbnum;
FILE *script = NULL;
bool found = false;
char output_path[MAXPGPATH];
prep_status(ctx, "Checking for bpchar_pattern_ops indexes");
snprintf(output_path, sizeof(output_path), "%s/reindex_bpchar_ops.sql",
ctx->output_dir);
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
PGresult *res;
bool db_used = false;
int ntups;
int rowno;
int i_nspname,
i_relname;
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, whichCluster);
/* find bpchar_pattern_ops indexes */
/*
* Do only non-hash, non-gin indexees; we already invalidated them
* above; no need to reindex twice
*/
res = executeQueryOrDie(ctx, conn,
"SELECT n.nspname, c.relname "
"FROM pg_catalog.pg_index i, "
" pg_catalog.pg_class c, "
" pg_catalog.pg_namespace n "
"WHERE indexrelid = c.oid AND "
" c.relnamespace = n.oid AND "
" ( "
" SELECT o.oid "
" FROM pg_catalog.pg_opclass o, "
" pg_catalog.pg_am a"
" WHERE a.amname NOT IN ('hash', 'gin') AND "
" a.oid = o.opcmethod AND "
" o.opcname = 'bpchar_pattern_ops') "
" = ANY (i.indclass) AND "
SKIP_TSVECTOR_TABLES);
ntups = PQntuples(res);
i_nspname = PQfnumber(res, "nspname");
i_relname = PQfnumber(res, "relname");
for (rowno = 0; rowno < ntups; rowno++)
{
found = true;
if (!check_mode)
{
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", output_path);
if (!db_used)
{
fprintf(script, "\\connect %s\n",
quote_identifier(ctx, active_db->db_name));
db_used = true;
}
fprintf(script, "REINDEX INDEX %s.%s;\n",
quote_identifier(ctx, PQgetvalue(res, rowno, i_nspname)),
quote_identifier(ctx, PQgetvalue(res, rowno, i_relname)));
}
}
PQclear(res);
if (!check_mode && found)
/* mark bpchar_pattern_ops indexes as invalid */
PQclear(executeQueryOrDie(ctx, conn,
"UPDATE pg_catalog.pg_index i "
"SET indisvalid = false "
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_namespace n "
"WHERE indexrelid = c.oid AND "
" c.relnamespace = n.oid AND "
" ( "
" SELECT o.oid "
" FROM pg_catalog.pg_opclass o, "
" pg_catalog.pg_am a"
" WHERE a.amname NOT IN ('hash', 'gin') AND "
" a.oid = o.opcmethod AND "
" o.opcname = 'bpchar_pattern_ops') "
" = ANY (i.indclass)"));
PQfinish(conn);
}
if (found)
{
if (!check_mode)
fclose(script);
report_status(ctx, PG_WARNING, "warning");
if (check_mode)
pg_log(ctx, PG_WARNING, "\n"
"| Your installation contains indexes using\n"
"| \"bpchar_pattern_ops\". These indexes have\n"
"| different internal formats between your old and\n"
"| new clusters so they must be reindexed with the\n"
"| REINDEX command. After migration, you will be\n"
"| given REINDEX instructions.\n\n");
else
pg_log(ctx, PG_WARNING, "\n"
"| Your installation contains indexes using\n"
"| \"bpchar_pattern_ops\". These indexes have\n"
"| different internal formats between your old and\n"
"| new clusters so they must be reindexed with the\n"
"| REINDEX command. The file:\n"
"| \t%s\n"
"| when executed by psql by the database super-user\n"
"| will recreate all invalid indexes; until then,\n"
"| none of these indexes will be used.\n\n",
output_path);
}
else
check_ok(ctx);
}
/*
* old_8_3_create_sequence_script()
* 8.3 -> 8.4
* 8.4 added the column "start_value" to all sequences. For this reason,
* we don't transfer sequence files but instead use the CREATE SEQUENCE
* command from the schema dump, and use setval() to restore the sequence
* value and 'is_called' from the old database. This is safe to run
* by pg_upgrade because sequence files are not transfered from the old
* server, even in link mode.
*/
char *
old_8_3_create_sequence_script(migratorContext *ctx, Cluster whichCluster)
{
ClusterInfo *active_cluster = (whichCluster == CLUSTER_OLD) ?
&ctx->old : &ctx->new;
int dbnum;
FILE *script = NULL;
bool found = false;
char *output_path = pg_malloc(ctx, MAXPGPATH);
snprintf(output_path, MAXPGPATH, "%s/adjust_sequences.sql", ctx->output_dir);
prep_status(ctx, "Creating script to adjust sequences");
for (dbnum = 0; dbnum < active_cluster->dbarr.ndbs; dbnum++)
{
PGresult *res;
bool db_used = false;
int ntups;
int rowno;
int i_nspname,
i_relname;
DbInfo *active_db = &active_cluster->dbarr.dbs[dbnum];
PGconn *conn = connectToServer(ctx, active_db->db_name, whichCluster);
/* Find any sequences */
res = executeQueryOrDie(ctx, conn,
"SELECT n.nspname, c.relname "
"FROM pg_catalog.pg_class c, "
" pg_catalog.pg_namespace n "
"WHERE c.relkind = 'S' AND "
" c.relnamespace = n.oid AND "
" n.nspname != 'pg_catalog' AND "
" n.nspname != 'information_schema'");
ntups = PQntuples(res);
i_nspname = PQfnumber(res, "nspname");
i_relname = PQfnumber(res, "relname");
for (rowno = 0; rowno < ntups; rowno++)
{
PGresult *seq_res;
int i_last_value,
i_is_called;
const char *nspname = PQgetvalue(res, rowno, i_nspname);
const char *relname = PQgetvalue(res, rowno, i_relname);
found = true;
if (script == NULL && (script = fopen(output_path, "w")) == NULL)
pg_log(ctx, PG_FATAL, "Could not create necessary file: %s\n", output_path);
if (!db_used)
{
fprintf(script, "\\connect %s\n\n",
quote_identifier(ctx, active_db->db_name));
db_used = true;
}
/* Find the desired sequence */
seq_res = executeQueryOrDie(ctx, conn,
"SELECT s.last_value, s.is_called "
"FROM %s.%s s",
quote_identifier(ctx, nspname),
quote_identifier(ctx, relname));
assert(PQntuples(seq_res) == 1);
i_last_value = PQfnumber(seq_res, "last_value");
i_is_called = PQfnumber(seq_res, "is_called");
fprintf(script, "SELECT setval('%s.%s', %s, '%s');\n",
quote_identifier(ctx, nspname), quote_identifier(ctx, relname),
PQgetvalue(seq_res, 0, i_last_value), PQgetvalue(seq_res, 0, i_is_called));
PQclear(seq_res);
}
if (db_used)
fprintf(script, "\n");
PQclear(res);
PQfinish(conn);
}
if (found)
fclose(script);
check_ok(ctx);
if (found)
return output_path;
else
{
pg_free(output_path);
return NULL;
}
}
<!-- $PostgreSQL: pgsql/doc/src/sgml/contrib.sgml,v 1.16 2010/01/28 23:59:52 adunstan Exp $ --> <!-- $PostgreSQL: pgsql/doc/src/sgml/contrib.sgml,v 1.17 2010/05/12 02:19:11 momjian Exp $ -->
<appendix id="contrib"> <appendix id="contrib">
<title>Additional Supplied Modules</title> <title>Additional Supplied Modules</title>
...@@ -110,6 +110,7 @@ psql -d dbname -f <replaceable>SHAREDIR</>/contrib/<replaceable>module</>.sql ...@@ -110,6 +110,7 @@ psql -d dbname -f <replaceable>SHAREDIR</>/contrib/<replaceable>module</>.sql
&pgstatstatements; &pgstatstatements;
&pgstattuple; &pgstattuple;
&pgtrgm; &pgtrgm;
&pgupgrade;
&seg; &seg;
&contrib-spi; &contrib-spi;
&sslinfo; &sslinfo;
......
<!-- $PostgreSQL: pgsql/doc/src/sgml/filelist.sgml,v 1.67 2010/02/22 11:47:30 heikki Exp $ --> <!-- $PostgreSQL: pgsql/doc/src/sgml/filelist.sgml,v 1.68 2010/05/12 02:19:11 momjian Exp $ -->
<!entity history SYSTEM "history.sgml"> <!entity history SYSTEM "history.sgml">
<!entity info SYSTEM "info.sgml"> <!entity info SYSTEM "info.sgml">
...@@ -122,6 +122,7 @@ ...@@ -122,6 +122,7 @@
<!entity pgstatstatements SYSTEM "pgstatstatements.sgml"> <!entity pgstatstatements SYSTEM "pgstatstatements.sgml">
<!entity pgstattuple SYSTEM "pgstattuple.sgml"> <!entity pgstattuple SYSTEM "pgstattuple.sgml">
<!entity pgtrgm SYSTEM "pgtrgm.sgml"> <!entity pgtrgm SYSTEM "pgtrgm.sgml">
<!entity pgupgrade SYSTEM "pgupgrade.sgml">
<!entity seg SYSTEM "seg.sgml"> <!entity seg SYSTEM "seg.sgml">
<!entity contrib-spi SYSTEM "contrib-spi.sgml"> <!entity contrib-spi SYSTEM "contrib-spi.sgml">
<!entity sslinfo SYSTEM "sslinfo.sgml"> <!entity sslinfo SYSTEM "sslinfo.sgml">
......
<!-- $PostgreSQL: pgsql/doc/src/sgml/pgupgrade.sgml,v 1.1 2010/05/12 02:19:11 momjian Exp $ -->
<sect1 id="pgupgrade">
<title>pg_upgrade</title>
<indexterm zone="pgupgrade">
<primary>pg_upgrade</primary>
</indexterm>
<para>
<application>pg_upgrade</> (formerly called pg_migrator) allows data
stored in Postgres data files to be migrated to a later Postgres
major version without the data dump/reload typically required for
major version upgrades, e.g. from 8.4.7 to the current major release
of Postgres. It is not required for minor version upgrades, e.g.
9.0.1 -> 9.0.4.
</para>
<sect2>
<title>Supported Versions</title>
<para>
pg_upgrade supports upgrades from 8.3.X and later to the current
major release of Postgres, including snapshot and alpha releases.
pg_upgrade also supports upgrades from EnterpriseDB's Postgres Plus
Advanced Server.
</para>
</sect2>
<sect2>
<title>Upgrade Steps</title>
<orderedlist>
<listitem>
<para>
Optionally move the old cluster
</para>
<para>
If you are using a version-specific PostgreSQL install directory, e.g.
/opt/PostgreSQL/8.4, you do not need to move the old cluster. The
one-click installers all use version-specific install directories.
</para>
<para>
If your PostgreSQL install directory is not version-specific, e.g.
/usr/local/pgsql, it is necessary to move the current Postgres install
directory so it does not interfere with the new Postgres installation.
Once the current Postgres server is shut down, it is safe to rename the
Postgres install directory; assuming the old directory is
/usr/local/pgsql, you can do:
<programlisting>
mv /usr/local/pgsql /usr/local/pgsql.old
</programlisting>
to rename the directory.
</para>
<para>
If you are using tablespaces and migrating to 8.4 or earlier, there must
be sufficient directory permissions to allow pg_upgrade to rename each
tablespace directory to add a ".old" suffix.
</para>
</listitem>
<listitem>
<para>
For PostgreSQL source installs, build the new PostgreSQL version
</para>
<para>
Build the new Postgres source with configure flags that are compatible
with the old cluster. pg_upgrade will check pg_controldata to make
sure all settings are compatible before starting the upgrade.
</para>
</listitem>
<listitem>
<para>
Install the new Postgres binaries
</para>
<para>
Install the new server's binaries and support files. You can use the
same port numbers for both clusters, typically 5432, because the old and
new clusters will not be running at the same time.
</para>
<para>
For source installs, if you wish to install the new server in a custom
location, use 'prefix':
<programlisting>
gmake prefix=/usr/local/pgsql.new install
</programlisting>
</para>
</listitem>
<listitem>
<para>
Initialize the new PostgreSQL cluster
</para>
<para>
Initialize the new cluster using initdb. Again, use compatible initdb
flags that match the old cluster (pg_upgrade will check that too.) Many
prebuilt installers do this step automatically. There is no need to
start the new cluster.
</para>
<para>
If migrating EnterpriseDB's Postgres Plus Advanced Server, you must:
<itemizedlist>
<listitem>
<para>
<emphasis>not</> install <literal>sample tables and procedures/functions</>
in the new server
</para>
</listitem>
<listitem>
<para>
delete the empty <literal>edb</> schema in the <literal>enterprisedb</> database
</para>
</listitem>
<listitem>
<para>
copy dbserver/lib/pgmemcache.so from the old server
to the new server (AS8.3 to AS8.3R2 migrations only)
</para>
</listitem>
</itemizedlist>
</para>
</listitem>
<listitem>
<para>
Install custom shared object files (or DLLs)
</para>
<para>
Install any custom shared object files (or DLLs) used by the old cluster
into the new cluster, e.g. pgcrypto.so, whether they are from /contrib
or some other source. Do not install the schema definitions, e.g.
pgcrypto.sql --- these will be migrated from the old cluster.
</para>
</listitem>
<listitem>
<para>
Adjust authentication
</para>
<para>
pg_upgrade will connect to the old and new servers several times,
so you might want to set authentication to <literal>trust</> in
<filename>pg_hba.conf</>, or if using <literal>md5</> authentication,
use a <filename>pgpass</> file to avoid being prompted repeatedly
for a password.
</para>
</listitem>
<listitem>
<para>
Stop both servers
</para>
<para>
Make sure both database servers are stopped using on Unix, e.g.:
<programlisting>
pg_ctl --pgdata /opt/PostgreSQL/8.4 stop
pg_ctl --pgdata /opt/PostgreSQL/8.5 stop
</programlisting>
or on Windows
<programlisting>
NET STOP postgresql-8.4
NET STOP postgresql-9.0
</programlisting>
or
<programlisting>
NET STOP pgsql-8.3 (different service name)
</programlisting>
</para>
</listitem>
<listitem>
<para>
Run pg_upgrade
Always run the pg_upgrade binary in the new server, not the old one.
pg_upgrade requires the specification of the old and new cluster's
PGDATA and executable (/bin) directories. You can also specify separate
user and port values, and whether you want the data linked instead of
copied (the default). If you use linking, the migration will be much
faster (no data copying), but you will no longer be able to access your
old cluster once you start the new cluster after the upgrade. See
pg_upgrade --help for a full list of options.
</para>
<para>
For Windows users, you must be logged into an administrative account, and
then start a shell as the 'postgres' user and set the proper path:
<programlisting>
RUNAS /USER:postgres "CMD.EXE"
SET PATH=%PATH%;C:\Program Files\PostgreSQL\8.5\bin;
</programlisting>
and then run pg_upgrade with quoted directories, e.g.:
<programlisting>
pg_upgrade.exe
--old-datadir "C:/Program Files/PostgreSQL/8.4/data"
--new-datadir "C:/Program Files/PostgreSQL/8.5/data"
--old-bindir "C:/Program Files/PostgreSQL/8.4/bin"
--new-bindir "C:/Program Files/PostgreSQL/8.5/bin"
</programlisting>
Once started, pg_upgrade will verify the two clusters are compatible
and then do the migration. You can use pg_upgrade <option>--check</>
to perform only the checks, even if the old server is still
running. pg_upgrade <option>--check</> will also outline any
manual adjustments you will need to make after the migration.
</para>
<para>
Obviously, no one should be accessing the clusters during the migration.
</para>
<para>
If an error occurs while restoring the database schema, pg_upgrade will
exit and you will have to revert to the old cluster as outlined in step
#15 below. To try pg_upgrade again, you will need to modify the old
cluster so the pg_upgrade schema restore succeeds. If the problem is a
/contrib module, you might need to uninstall the /contrib module from
the old cluster and install it in the new cluster after the migration,
assuming the module is not being used to store user data.
</para>
</listitem>
<listitem>
<para>
Restore <filename>pg_hba.conf</>
</para>
<para>
If you modified <filename>pg_hba.conf</> to use <literal>trust</>,
restore its original authentication settings.
</para>
</listitem>
<listitem>
<para>
Post-Migration processing
</para>
<para>
If any post-migration processing is required, pg_upgrade will issue
warnings as it completes. It will also generate script files that must
be run by the administrator. The script files will connect to each
database that needs post-migration processing. Each script should be
run using:
<programlisting>
psql --username postgres --file script.sql postgres
</programlisting>
The scripts can be run in any order and can be deleted once they have
been run.
</para>
<para>
In general it is unsafe to access tables referenced in rebuild scripts
until the rebuild scripts have run to completion; doing so could yield
incorrect results or poor performance. Tables not referenced in rebuild
scripts can be accessed immediately.
</para>
</listitem>
<listitem>
<para>
Statistics
</para>
<para>
Because optimizer statistics are not transferred by pg_upgrade, you will
be instructed to run a command to regenerate that information at the end
of the migration.
</para>
</listitem>
<listitem>
<para>
Delete old cluster
</para>
<para>
Once you are satisfied with the upgrade, you can delete the old
cluster's data directories by running the script mentioned when
pg_upgrade completes. You will need to manually delete the old install
directories, e.g. /bin, /share.
</para>
</listitem>
<listitem>
<para>
Reverting to old cluster
</para>
<para>
If, after running pg_upgrade, you wish to revert to the old cluster,
there are several options.
</para>
<para>
If you ran pg_upgrade with <option>--check</>, no modifications
were made to the old cluster and you can re-use it anytime.
</para>
<para>
If you ran pg_upgrade with <option>--link</>, the data files
are shared between the old and new cluster. If you started
the new cluster, the new server has written to those shared
files and it is unsafe to use the old cluster.
</para>
<para>
If you ran pg_upgrade <emphasis>without</>_ <option>--link</>
or did not start the new server, the old cluster was not
modified except that an <literal>.old</> suffix was appended
to <filename>$PGDATA/global/pg_control</> and perhaps tablespace
directories. To reuse the old cluster, remove the ".old"
suffix from <filename>$PGDATA/global/pg_control</>. and, if
migrating to 8.4 or earlier, remove the tablespace directories
created by the migration and remove the ".old" suffix from
the tablespace directory names; then you can restart the old
cluster.
</para>
</listitem>
</orderedlist>
</sect2>
<sect2>
<title>Limitations In Migrating <emphasis>from</> PostgreSQL 8.3</title>
<para>
pg_upgrade will not work for a migration from 8.3 if a user column
is defined as:
<itemizedlist>
<listitem>
<para>
a <type>tsquery</> data type
</para>
</listitem>
<listitem>
<para>
data type <type>name</> and is not the first column
</para>
</listitem>
</itemizedlist>
</para>
<para>
You must drop any such columns and migrate them manually.
</para>
<para>
pg_upgrade will require a table rebuild if:
<itemizedlist>
<listitem>
<para>
a user column is of data type tsvector
</para>
</listitem>
</itemizedlist>
</para>
<para>
pg_upgrade will require a reindex if:
<itemizedlist>
<listitem>
<para>
an index is of type hash or gin
</para>
</listitem>
<listitem>
<para>
an index uses <function>bpchar_pattern_ops</>
</para>
</listitem>
</itemizedlist>
</para>
<para>
Also, the default datetime storage format changed to integer after
Postgres 8.3. pg_upgrade will check that the datetime storage format
used by the old and new clusters match. Make sure your new cluster is
built with the configure flag <option>--disable-integer-datetimes</>.
</para>
<para>
For Windows users, note that due to different integer datetimes settings
used by the one-click installer and the MSI installer, it is only
possible to upgrade from version 8.3 of the one-click distribution to
version 8.4 of the one-click distribution. It is not possible to upgrade
from the MSI installer to the one-click installer.
</para>
<para>
All failure, rebuild, and reindex cases will be reported by pg_upgrade
if they affect your installation; post-migration scripts to rebuild
tables and indexes will be automatically generated.
</para>
<para>
For deployment testing, create a schema-only copy of the old cluster,
insert dummy data, and migrate that.
</para>
<para>
If you want to use link mode and you don't want your old cluster
to be modified when the new cluster is started, make a copy of the
old cluster and migrate that with link mode. To make a valid copy
of the old cluster, use <application>rsync</> to create a dirty
copy of the old cluster while the server is running, then shut down
the old server and run rsync again to update the copy with any
changes to make it consistent.
</para>
</sect2>
</sect1>
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment