Commit ff992c07 authored by Stephen Frost's avatar Stephen Frost

pg_upgrade: Fix large object COMMENTS, SECURITY LABELS

When performing a pg_upgrade, we copy the files behind pg_largeobject
and pg_largeobject_metadata, allowing us to avoid having to dump out and
reload the actual data for large objects and their ACLs.

Unfortunately, that isn't all of the information which can be associated
with large objects.  Currently, we also support COMMENTs and SECURITY
LABELs with large objects and these were being silently dropped during a
pg_upgrade as pg_dump would skip everything having to do with a large
object and pg_upgrade only copied the tables mentioned to the new
cluster.

As the file copies happen after the catalog dump and reload, we can't
simply include the COMMENTs and SECURITY LABELs in pg_dump's binary-mode
output but we also have to include the actual large object definition as
well.  With the definition, comments, and security labels in the pg_dump
output and the file copies performed by pg_upgrade, all of the data and
metadata associated with large objects is able to be successfully pulled
forward across a pg_upgrade.

In 9.6 and master, we can simply adjust the dump bitmask to indicate
which components we don't want.  In 9.5 and earlier, we have to put
explciit checks in in dumpBlob() and dumpBlobs() to not include the ACL
or the data when in binary-upgrade mode.

Adjustments made to the privileges regression test to allow another test
(large_object.sql) to be added which explicitly leaves a large object
with a comment in place to provide coverage of that case with
pg_upgrade.

Back-patch to all supported branches.

Discussion: https://postgr.es/m/20170221162655.GE9812@tamriel.snowman.net
parent a8df75b0
...@@ -120,6 +120,7 @@ typedef struct _restoreOptions ...@@ -120,6 +120,7 @@ typedef struct _restoreOptions
int enable_row_security; int enable_row_security;
int sequence_data; /* dump sequence data even in schema-only mode */ int sequence_data; /* dump sequence data even in schema-only mode */
int include_subscriptions; int include_subscriptions;
int binary_upgrade;
} RestoreOptions; } RestoreOptions;
typedef struct _dumpOptions typedef struct _dumpOptions
......
...@@ -2874,7 +2874,15 @@ _tocEntryRequired(TocEntry *te, teSection curSection, RestoreOptions *ropt) ...@@ -2874,7 +2874,15 @@ _tocEntryRequired(TocEntry *te, teSection curSection, RestoreOptions *ropt)
/* Mask it if we only want schema */ /* Mask it if we only want schema */
if (ropt->schemaOnly) if (ropt->schemaOnly)
{ {
if (!(ropt->sequence_data && strcmp(te->desc, "SEQUENCE SET") == 0)) /*
* In binary-upgrade mode, even with schema-only set, we do not mask
* out large objects. Only large object definitions, comments and
* other information should be generated in binary-upgrade mode (not
* the actual data).
*/
if (!(ropt->sequence_data && strcmp(te->desc, "SEQUENCE SET") == 0) &&
!(ropt->binary_upgrade && strcmp(te->desc, "BLOB") == 0) &&
!(ropt->binary_upgrade && strncmp(te->tag, "LARGE OBJECT ", 13) == 0))
res = res & REQ_SCHEMA; res = res & REQ_SCHEMA;
} }
......
...@@ -772,7 +772,15 @@ main(int argc, char **argv) ...@@ -772,7 +772,15 @@ main(int argc, char **argv)
if (dopt.schemaOnly && dopt.sequence_data) if (dopt.schemaOnly && dopt.sequence_data)
getTableData(&dopt, tblinfo, numTables, dopt.oids, RELKIND_SEQUENCE); getTableData(&dopt, tblinfo, numTables, dopt.oids, RELKIND_SEQUENCE);
if (dopt.outputBlobs) /*
* In binary-upgrade mode, we do not have to worry about the actual blob
* data or the associated metadata that resides in the pg_largeobject and
* pg_largeobject_metadata tables, respectivly.
*
* However, we do need to collect blob information as there may be
* comments or other information on blobs that we do need to dump out.
*/
if (dopt.outputBlobs || dopt.binary_upgrade)
getBlobs(fout); getBlobs(fout);
/* /*
...@@ -852,6 +860,7 @@ main(int argc, char **argv) ...@@ -852,6 +860,7 @@ main(int argc, char **argv)
ropt->enable_row_security = dopt.enable_row_security; ropt->enable_row_security = dopt.enable_row_security;
ropt->sequence_data = dopt.sequence_data; ropt->sequence_data = dopt.sequence_data;
ropt->include_subscriptions = dopt.include_subscriptions; ropt->include_subscriptions = dopt.include_subscriptions;
ropt->binary_upgrade = dopt.binary_upgrade;
if (compressLevel == -1) if (compressLevel == -1)
ropt->compression = 0; ropt->compression = 0;
...@@ -2900,6 +2909,20 @@ getBlobs(Archive *fout) ...@@ -2900,6 +2909,20 @@ getBlobs(Archive *fout)
PQgetisnull(res, i, i_initlomacl) && PQgetisnull(res, i, i_initlomacl) &&
PQgetisnull(res, i, i_initrlomacl)) PQgetisnull(res, i, i_initrlomacl))
binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL; binfo[i].dobj.dump &= ~DUMP_COMPONENT_ACL;
/*
* In binary-upgrade mode for blobs, we do *not* dump out the data or
* the ACLs, should any exist. The data and ACL (if any) will be
* copied by pg_upgrade, which simply copies the pg_largeobject and
* pg_largeobject_metadata tables.
*
* We *do* dump out the definition of the blob because we need that to
* make the restoration of the comments, and anything else, work since
* pg_upgrade copies the files behind pg_largeobject and
* pg_largeobject_metadata after the dump is restored.
*/
if (dopt->binary_upgrade)
binfo[i].dobj.dump &= ~(DUMP_COMPONENT_DATA | DUMP_COMPONENT_ACL);
} }
/* /*
...@@ -8828,7 +8851,8 @@ dumpComment(Archive *fout, const char *target, ...@@ -8828,7 +8851,8 @@ dumpComment(Archive *fout, const char *target,
} }
else else
{ {
if (dopt->schemaOnly) /* We do dump blob comments in binary-upgrade mode */
if (dopt->schemaOnly && !dopt->binary_upgrade)
return; return;
} }
...@@ -14223,7 +14247,8 @@ dumpSecLabel(Archive *fout, const char *target, ...@@ -14223,7 +14247,8 @@ dumpSecLabel(Archive *fout, const char *target,
} }
else else
{ {
if (dopt->schemaOnly) /* We do dump blob security labels in binary-upgrade mode */
if (dopt->schemaOnly && !dopt->binary_upgrade)
return; return;
} }
......
...@@ -39,11 +39,17 @@ my %pgdump_runs = ( ...@@ -39,11 +39,17 @@ my %pgdump_runs = (
binary_upgrade => { binary_upgrade => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump',
"--file=$tempdir/binary_upgrade.sql", '--format=custom',
"--file=$tempdir/binary_upgrade.dump",
'--schema-only', '--schema-only',
'--binary-upgrade', '--binary-upgrade',
'-d', 'postgres', # alternative way to specify database '-d', 'postgres', # alternative way to specify database
], }, ],
restore_cmd => [
'pg_restore', '-Fc',
'--verbose',
"--file=$tempdir/binary_upgrade.sql",
"$tempdir/binary_upgrade.dump", ], },
clean => { clean => {
dump_cmd => [ dump_cmd => [
'pg_dump', 'pg_dump',
...@@ -334,6 +340,7 @@ my %tests = ( ...@@ -334,6 +340,7 @@ my %tests = (
all_runs => 1, all_runs => 1,
regexp => qr/^ALTER LARGE OBJECT \d+ OWNER TO .*;/m, regexp => qr/^ALTER LARGE OBJECT \d+ OWNER TO .*;/m,
like => { like => {
binary_upgrade => 1,
clean => 1, clean => 1,
clean_if_exists => 1, clean_if_exists => 1,
column_inserts => 1, column_inserts => 1,
...@@ -348,7 +355,6 @@ my %tests = ( ...@@ -348,7 +355,6 @@ my %tests = (
section_pre_data => 1, section_pre_data => 1,
test_schema_plus_blobs => 1, }, test_schema_plus_blobs => 1, },
unlike => { unlike => {
binary_upgrade => 1,
no_blobs => 1, no_blobs => 1,
no_owner => 1, no_owner => 1,
only_dump_test_schema => 1, only_dump_test_schema => 1,
...@@ -666,6 +672,7 @@ my %tests = ( ...@@ -666,6 +672,7 @@ my %tests = (
'SELECT pg_catalog.lo_from_bytea(0, \'\\x310a320a330a340a350a360a370a380a390a\');', 'SELECT pg_catalog.lo_from_bytea(0, \'\\x310a320a330a340a350a360a370a380a390a\');',
regexp => qr/^SELECT pg_catalog\.lo_create\('\d+'\);/m, regexp => qr/^SELECT pg_catalog\.lo_create\('\d+'\);/m,
like => { like => {
binary_upgrade => 1,
clean => 1, clean => 1,
clean_if_exists => 1, clean_if_exists => 1,
column_inserts => 1, column_inserts => 1,
...@@ -681,7 +688,6 @@ my %tests = ( ...@@ -681,7 +688,6 @@ my %tests = (
section_pre_data => 1, section_pre_data => 1,
test_schema_plus_blobs => 1, }, test_schema_plus_blobs => 1, },
unlike => { unlike => {
binary_upgrade => 1,
no_blobs => 1, no_blobs => 1,
only_dump_test_schema => 1, only_dump_test_schema => 1,
only_dump_test_table => 1, only_dump_test_table => 1,
......
-- This is more-or-less DROP IF EXISTS LARGE OBJECT 3001;
WITH unlink AS (SELECT lo_unlink(loid) FROM pg_largeobject WHERE loid = 3001) SELECT 1;
?column?
----------
1
(1 row)
-- Test creation of a large object and leave it for testing pg_upgrade
SELECT lo_create(3001);
lo_create
-----------
3001
(1 row)
COMMENT ON LARGE OBJECT 3001 IS 'testing comments';
...@@ -12,7 +12,7 @@ DROP ROLE IF EXISTS regress_user3; ...@@ -12,7 +12,7 @@ DROP ROLE IF EXISTS regress_user3;
DROP ROLE IF EXISTS regress_user4; DROP ROLE IF EXISTS regress_user4;
DROP ROLE IF EXISTS regress_user5; DROP ROLE IF EXISTS regress_user5;
DROP ROLE IF EXISTS regress_user6; DROP ROLE IF EXISTS regress_user6;
SELECT lo_unlink(oid) FROM pg_largeobject_metadata; SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid;
lo_unlink lo_unlink
----------- -----------
(0 rows) (0 rows)
...@@ -1173,11 +1173,11 @@ SELECT lo_unlink(2002); ...@@ -1173,11 +1173,11 @@ SELECT lo_unlink(2002);
\c - \c -
-- confirm ACL setting -- confirm ACL setting
SELECT oid, pg_get_userbyid(lomowner) ownername, lomacl FROM pg_largeobject_metadata; SELECT oid, pg_get_userbyid(lomowner) ownername, lomacl FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid;
oid | ownername | lomacl oid | ownername | lomacl
------+---------------+------------------------------------------------------------------------------------------------ ------+---------------+------------------------------------------------------------------------------------------------
1002 | regress_user1 |
1001 | regress_user1 | {regress_user1=rw/regress_user1,=rw/regress_user1} 1001 | regress_user1 | {regress_user1=rw/regress_user1,=rw/regress_user1}
1002 | regress_user1 |
1003 | regress_user1 | {regress_user1=rw/regress_user1,regress_user2=r/regress_user1} 1003 | regress_user1 | {regress_user1=rw/regress_user1,regress_user2=r/regress_user1}
1004 | regress_user1 | {regress_user1=rw/regress_user1,regress_user2=rw/regress_user1} 1004 | regress_user1 | {regress_user1=rw/regress_user1,regress_user2=rw/regress_user1}
1005 | regress_user1 | {regress_user1=rw/regress_user1,regress_user2=r*w/regress_user1,regress_user3=r/regress_user2} 1005 | regress_user1 | {regress_user1=rw/regress_user1,regress_user2=r*w/regress_user1,regress_user3=r/regress_user2}
...@@ -1546,7 +1546,7 @@ DROP TABLE atest6; ...@@ -1546,7 +1546,7 @@ DROP TABLE atest6;
DROP TABLE atestc; DROP TABLE atestc;
DROP TABLE atestp1; DROP TABLE atestp1;
DROP TABLE atestp2; DROP TABLE atestp2;
SELECT lo_unlink(oid) FROM pg_largeobject_metadata; SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid;
lo_unlink lo_unlink
----------- -----------
1 1
......
...@@ -84,7 +84,7 @@ test: select_into select_distinct select_distinct_on select_implicit select_havi ...@@ -84,7 +84,7 @@ test: select_into select_distinct select_distinct_on select_implicit select_havi
# ---------- # ----------
# Another group of parallel tests # Another group of parallel tests
# ---------- # ----------
test: brin gin gist spgist privileges init_privs security_label collate matview lock replica_identity rowsecurity object_address tablesample groupingsets drop_operator test: brin gin gist spgist privileges init_privs security_label collate matview lock replica_identity rowsecurity object_address tablesample groupingsets drop_operator large_object
# ---------- # ----------
# Another group of parallel tests # Another group of parallel tests
......
...@@ -116,6 +116,7 @@ test: object_address ...@@ -116,6 +116,7 @@ test: object_address
test: tablesample test: tablesample
test: groupingsets test: groupingsets
test: drop_operator test: drop_operator
test: large_object
test: alter_generic test: alter_generic
test: alter_operator test: alter_operator
test: misc test: misc
......
-- This is more-or-less DROP IF EXISTS LARGE OBJECT 3001;
WITH unlink AS (SELECT lo_unlink(loid) FROM pg_largeobject WHERE loid = 3001) SELECT 1;
-- Test creation of a large object and leave it for testing pg_upgrade
SELECT lo_create(3001);
COMMENT ON LARGE OBJECT 3001 IS 'testing comments';
...@@ -17,7 +17,7 @@ DROP ROLE IF EXISTS regress_user4; ...@@ -17,7 +17,7 @@ DROP ROLE IF EXISTS regress_user4;
DROP ROLE IF EXISTS regress_user5; DROP ROLE IF EXISTS regress_user5;
DROP ROLE IF EXISTS regress_user6; DROP ROLE IF EXISTS regress_user6;
SELECT lo_unlink(oid) FROM pg_largeobject_metadata; SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid;
RESET client_min_messages; RESET client_min_messages;
...@@ -729,7 +729,7 @@ SELECT lo_unlink(2002); ...@@ -729,7 +729,7 @@ SELECT lo_unlink(2002);
\c - \c -
-- confirm ACL setting -- confirm ACL setting
SELECT oid, pg_get_userbyid(lomowner) ownername, lomacl FROM pg_largeobject_metadata; SELECT oid, pg_get_userbyid(lomowner) ownername, lomacl FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid;
SET SESSION AUTHORIZATION regress_user3; SET SESSION AUTHORIZATION regress_user3;
...@@ -960,7 +960,7 @@ DROP TABLE atestc; ...@@ -960,7 +960,7 @@ DROP TABLE atestc;
DROP TABLE atestp1; DROP TABLE atestp1;
DROP TABLE atestp2; DROP TABLE atestp2;
SELECT lo_unlink(oid) FROM pg_largeobject_metadata; SELECT lo_unlink(oid) FROM pg_largeobject_metadata WHERE oid >= 1000 AND oid < 3000 ORDER BY oid;
DROP GROUP regress_group1; DROP GROUP regress_group1;
DROP GROUP regress_group2; DROP GROUP regress_group2;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment