Commit 7a28de20 authored by Tom Lane's avatar Tom Lane

pg_dump can now dump large objects even in plain-text output mode, by

using the recently added lo_create() function.  The restore logic in
pg_restore is greatly simplified as well, since there's no need anymore
to try to adjust database references to match a new set of blob OIDs.
parent b49d871f
<!-- <!--
$PostgreSQL: pgsql/doc/src/sgml/backup.sgml,v 2.67 2005/06/21 04:02:29 tgl Exp $ $PostgreSQL: pgsql/doc/src/sgml/backup.sgml,v 2.68 2005/06/21 20:45:43 tgl Exp $
--> -->
<chapter id="backup"> <chapter id="backup">
<title>Backup and Restore</title> <title>Backup and Restore</title>
...@@ -88,9 +88,7 @@ pg_dump <replaceable class="parameter">dbname</replaceable> &gt; <replaceable cl ...@@ -88,9 +88,7 @@ pg_dump <replaceable class="parameter">dbname</replaceable> &gt; <replaceable cl
When your database schema relies on OIDs (for instance as foreign When your database schema relies on OIDs (for instance as foreign
keys) you must instruct <application>pg_dump</> to dump the OIDs keys) you must instruct <application>pg_dump</> to dump the OIDs
as well. To do this, use the <option>-o</option> command line as well. To do this, use the <option>-o</option> command line
option. <quote>Large objects</> are not dumped by default, option.
either. See <xref linkend="app-pgdump">'s reference page if you
use large objects.
</para> </para>
</important> </important>
...@@ -267,28 +265,6 @@ pg_dump -Fc <replaceable class="parameter">dbname</replaceable> &gt; <replaceabl ...@@ -267,28 +265,6 @@ pg_dump -Fc <replaceable class="parameter">dbname</replaceable> &gt; <replaceabl
</formalpara> </formalpara>
</sect2> </sect2>
<sect2 id="backup-dump-caveats">
<title>Caveats</title>
<para>
For reasons of backward compatibility, <application>pg_dump</>
does not dump large objects by default.<indexterm><primary>large
object</primary><secondary>backup</secondary></indexterm> To dump
large objects you must use either the custom or the tar output
format, and use the <option>-b</> option in
<application>pg_dump</>. See the <xref linkend="app-pgdump"> reference
page for details. The
directory <filename>contrib/pg_dumplo</> of the
<productname>PostgreSQL</> source tree also contains a program
that can dump large objects.
</para>
<para>
Please familiarize yourself with the <xref linkend="app-pgdump">
reference page.
</para>
</sect2>
</sect1> </sect1>
<sect1 id="backup-file"> <sect1 id="backup-file">
......
<!-- $PostgreSQL: pgsql/doc/src/sgml/installation.sgml,v 1.236 2005/06/21 04:02:29 tgl Exp $ --> <!-- $PostgreSQL: pgsql/doc/src/sgml/installation.sgml,v 1.237 2005/06/21 20:45:43 tgl Exp $ -->
<chapter id="installation"> <chapter id="installation">
<title><![%standalone-include[<productname>PostgreSQL</>]]> <title><![%standalone-include[<productname>PostgreSQL</>]]>
...@@ -389,14 +389,6 @@ su - postgres ...@@ -389,14 +389,6 @@ su - postgres
<application>pg_dumpall</>. <application>pg_dumpall</>.
</para> </para>
<para>
<application>pg_dumpall</application> does not
save large objects. Check
<![%standalone-include[the documentation]]>
<![%standalone-ignore[<xref linkend="backup-dump-caveats">]]>
if you need to do this.
</para>
<para> <para>
To make the backup, you can use the <application>pg_dumpall</application> To make the backup, you can use the <application>pg_dumpall</application>
command from the version you are currently running. For best command from the version you are currently running. For best
......
<!-- <!--
$PostgreSQL: pgsql/doc/src/sgml/ref/pg_dump.sgml,v 1.77 2005/05/29 03:32:18 momjian Exp $ $PostgreSQL: pgsql/doc/src/sgml/ref/pg_dump.sgml,v 1.78 2005/06/21 20:45:43 tgl Exp $
PostgreSQL documentation PostgreSQL documentation
--> -->
...@@ -60,9 +60,8 @@ PostgreSQL documentation ...@@ -60,9 +60,8 @@ PostgreSQL documentation
<xref linkend="app-pgrestore"> to rebuild the database. They <xref linkend="app-pgrestore"> to rebuild the database. They
allow <application>pg_restore</application> to be selective about allow <application>pg_restore</application> to be selective about
what is restored, or even to reorder the items prior to being what is restored, or even to reorder the items prior to being
restored. The archive formats also allow saving and restoring restored.
<quote>large objects</>, which is not possible in a script dump. The archive file formats are designed to be portable across
The archive files are also designed to be portable across
architectures. architectures.
</para> </para>
...@@ -127,17 +126,6 @@ PostgreSQL documentation ...@@ -127,17 +126,6 @@ PostgreSQL documentation
</listitem> </listitem>
</varlistentry> </varlistentry>
<varlistentry>
<term><option>-b</></term>
<term><option>--blobs</></term>
<listitem>
<para>
Include large objects in the dump. A non-text output format
must be selected.
</para>
</listitem>
</varlistentry>
<varlistentry> <varlistentry>
<term><option>-c</option></term> <term><option>-c</option></term>
<term><option>--clean</option></term> <term><option>--clean</option></term>
...@@ -600,14 +588,6 @@ CREATE DATABASE foo WITH TEMPLATE template0; ...@@ -600,14 +588,6 @@ CREATE DATABASE foo WITH TEMPLATE template0;
<application>pg_dump</application> has a few limitations: <application>pg_dump</application> has a few limitations:
<itemizedlist> <itemizedlist>
<listitem>
<para>
When dumping a single table or as plain text, <application>pg_dump</application>
does not handle large objects. Large objects must be dumped with the
entire database using one of the non-text archive formats.
</para>
</listitem>
<listitem> <listitem>
<para> <para>
When a data-only dump is chosen and the option When a data-only dump is chosen and the option
...@@ -660,17 +640,16 @@ CREATE DATABASE foo WITH TEMPLATE template0; ...@@ -660,17 +640,16 @@ CREATE DATABASE foo WITH TEMPLATE template0;
</para> </para>
<para> <para>
To dump a database called <literal>mydb</> that contains To dump a database called <literal>mydb</> to a <filename>tar</filename>
large objects to a <filename>tar</filename> file: file:
<screen> <screen>
<prompt>$</prompt> <userinput>pg_dump -Ft -b mydb &gt; db.tar</userinput> <prompt>$</prompt> <userinput>pg_dump -Ft mydb &gt; db.tar</userinput>
</screen> </screen>
</para> </para>
<para> <para>
To reload this database (with large objects) to an To reload this dump into an existing database called <literal>newdb</>:
existing database called <literal>newdb</>:
<screen> <screen>
<prompt>$</prompt> <userinput>pg_restore -d newdb db.tar</userinput> <prompt>$</prompt> <userinput>pg_restore -d newdb db.tar</userinput>
......
<!-- <!--
$PostgreSQL: pgsql/doc/src/sgml/ref/pg_dumpall.sgml,v 1.50 2005/06/21 04:02:31 tgl Exp $ $PostgreSQL: pgsql/doc/src/sgml/ref/pg_dumpall.sgml,v 1.51 2005/06/21 20:45:43 tgl Exp $
PostgreSQL documentation PostgreSQL documentation
--> -->
...@@ -43,16 +43,6 @@ PostgreSQL documentation ...@@ -43,16 +43,6 @@ PostgreSQL documentation
groups, and access permissions that apply to databases as a whole. groups, and access permissions that apply to databases as a whole.
</para> </para>
<para>
Thus, <application>pg_dumpall</application> is an integrated
solution for backing up your databases. But note a limitation:
it cannot dump <quote>large objects</quote>, since
<application>pg_dump</application> cannot dump such objects into
text files. If you have databases containing large objects,
they should be dumped using one of <application>pg_dump</application>'s
non-text output modes.
</para>
<para> <para>
Since <application>pg_dumpall</application> reads tables from all Since <application>pg_dumpall</application> reads tables from all
databases you will most likely have to connect as a database databases you will most likely have to connect as a database
......
<!-- $PostgreSQL: pgsql/doc/src/sgml/ref/pg_restore.sgml,v 1.52 2005/06/09 17:56:51 momjian Exp $ --> <!-- $PostgreSQL: pgsql/doc/src/sgml/ref/pg_restore.sgml,v 1.53 2005/06/21 20:45:43 tgl Exp $ -->
<refentry id="APP-PGRESTORE"> <refentry id="APP-PGRESTORE">
<refmeta> <refmeta>
...@@ -44,14 +44,13 @@ ...@@ -44,14 +44,13 @@
</para> </para>
<para> <para>
<application>pg_restore</application> can operate in two modes: If <application>pg_restore</application> can operate in two modes.
a database name is specified, the archive is restored directly into If a database name is specified, the archive is restored directly into
the database. (Large objects can only be restored by using such a direct the database. Otherwise, a script containing the SQL
database connection.) Otherwise, a script containing the SQL commands necessary to rebuild the database is created and written
commands necessary to rebuild the database is created (and written to a file or standard output. The script output is equivalent to
to a file or standard output), similar to the ones created by the the plain text output format of <application>pg_dump</application>.
<application>pg_dump</application> plain text format. Some of the Some of the options controlling the output are therefore analogous to
options controlling the script output are therefore analogous to
<application>pg_dump</application> options. <application>pg_dump</application> options.
</para> </para>
...@@ -541,16 +540,16 @@ CREATE DATABASE foo WITH TEMPLATE template0; ...@@ -541,16 +540,16 @@ CREATE DATABASE foo WITH TEMPLATE template0;
<title>Examples</title> <title>Examples</title>
<para> <para>
To dump a database called <literal>mydb</> that contains To dump a database called <literal>mydb</> to a <filename>tar</filename>
large objects to a <filename>tar</filename> file: file:
<screen> <screen>
<prompt>$</prompt> <userinput>pg_dump -Ft -b mydb &gt; db.tar</userinput> <prompt>$</prompt> <userinput>pg_dump -Ft mydb &gt; db.tar</userinput>
</screen> </screen>
</para> </para>
<para> <para>
To reload this database (with large objects) to an To reload this dump into an
existing database called <literal>newdb</>: existing database called <literal>newdb</>:
<screen> <screen>
......
...@@ -5,16 +5,14 @@ Notes on pg_dump ...@@ -5,16 +5,14 @@ Notes on pg_dump
2. pg_dumpall forces all pg_dump output to be text, since it also outputs text into the same output stream. 2. pg_dumpall forces all pg_dump output to be text, since it also outputs text into the same output stream.
3. The plain text output format can not be used as input into pg_restore. 3. The plain text output format cannot be used as input into pg_restore.
4. pg_dump now dumps the items in a modified OID order to try to improve relaibility of default restores.
To dump a database into the new custom format, type:
To dump a database into the next custom format, type:
pg_dump <db-name> -Fc > <backup-file> pg_dump <db-name> -Fc > <backup-file>
or, in TAR format or, to dump in TAR format
pg_dump <db-name> -Ft > <backup-file> pg_dump <db-name> -Ft > <backup-file>
...@@ -28,7 +26,7 @@ To restore, try ...@@ -28,7 +26,7 @@ To restore, try
pg_restore <backup-file> --table | less pg_restore <backup-file> --table | less
or to list in a differnet orderL or to list in a different order
pg_restore <backup-file> -l --oid --rearrange | less pg_restore <backup-file> -l --oid --rearrange | less
...@@ -59,27 +57,12 @@ or, simply: ...@@ -59,27 +57,12 @@ or, simply:
pg_restore backup.bck --use=toc.lis | psql newdbname pg_restore backup.bck --use=toc.lis | psql newdbname
BLOBs
=====
To dump blobs you must use the custom archive format (-Fc) or TAR format (-Ft), and specify the
--blobs qualifier to the pg_dump command.
To restore blobs you must use a direct database connection (--db=db-to-restore-to).
eg.
pg_dump --blob -Fc db-to-backup -f backup.bck
pg_restore backup.bck --db=db-to-restore-into
TAR TAR
=== ===
The TAR archive that pg_dump creates currently has a blank username & group for the files, The TAR archive that pg_dump creates currently has a blank username & group for the files,
but should be otherwise valid. It also includes a 'restore.sql' script which is there for but should be otherwise valid. It also includes a 'restore.sql' script which is there for
the benefit of humans. It is never used by pg_restore. the benefit of humans. The script is never used by pg_restore.
Note: the TAR format archive can only be used as input into pg_restore if it is in TAR form. Note: the TAR format archive can only be used as input into pg_restore if it is in TAR form.
(ie. you should not extract the files then expect pg_restore to work). (ie. you should not extract the files then expect pg_restore to work).
...@@ -91,6 +74,3 @@ the BLOB files at the end. ...@@ -91,6 +74,3 @@ the BLOB files at the end.
Philip Warner, 16-Jul-2000 Philip Warner, 16-Jul-2000
pjw@rhyme.com.au pjw@rhyme.com.au
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup.h,v 1.35 2005/06/09 17:56:51 momjian Exp $ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup.h,v 1.36 2005/06/21 20:45:44 tgl Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
...@@ -152,10 +152,6 @@ extern void ArchiveEntry(Archive *AHX, ...@@ -152,10 +152,6 @@ extern void ArchiveEntry(Archive *AHX,
/* Called to write *data* to the archive */ /* Called to write *data* to the archive */
extern size_t WriteData(Archive *AH, const void *data, size_t dLen); extern size_t WriteData(Archive *AH, const void *data, size_t dLen);
/*
extern int StartBlobs(Archive* AH);
extern int EndBlobs(Archive* AH);
*/
extern int StartBlob(Archive *AH, Oid oid); extern int StartBlob(Archive *AH, Oid oid);
extern int EndBlob(Archive *AH, Oid oid); extern int EndBlob(Archive *AH, Oid oid);
......
This diff is collapsed.
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.h,v 1.64 2005/05/25 21:40:41 momjian Exp $ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_archiver.h,v 1.65 2005/06/21 20:45:44 tgl Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
...@@ -34,7 +34,7 @@ ...@@ -34,7 +34,7 @@
#include "libpq-fe.h" #include "libpq-fe.h"
#include "pqexpbuffer.h" #include "pqexpbuffer.h"
#define LOBBUFSIZE 32768 #define LOBBUFSIZE 16384
/* /*
* Note: zlib.h must be included *after* libpq-fe.h, because the latter may * Note: zlib.h must be included *after* libpq-fe.h, because the latter may
...@@ -88,8 +88,6 @@ typedef z_stream *z_streamp; ...@@ -88,8 +88,6 @@ typedef z_stream *z_streamp;
#define K_VERS_MAX (( (1 * 256 + 10) * 256 + 255) * 256 + 0) #define K_VERS_MAX (( (1 * 256 + 10) * 256 + 255) * 256 + 0)
/* No of BLOBs to restore in 1 TX */
#define BLOB_BATCH_SIZE 100
/* Flags to indicate disposition of offsets stored in files */ /* Flags to indicate disposition of offsets stored in files */
#define K_OFFSET_POS_NOT_SET 1 #define K_OFFSET_POS_NOT_SET 1
...@@ -239,9 +237,6 @@ typedef struct _archiveHandle ...@@ -239,9 +237,6 @@ typedef struct _archiveHandle
char *archdbname; /* DB name *read* from archive */ char *archdbname; /* DB name *read* from archive */
bool requirePassword; bool requirePassword;
PGconn *connection; PGconn *connection;
PGconn *blobConnection; /* Connection for BLOB xref */
int txActive; /* Flag set if TX active on connection */
int blobTxActive; /* Flag set if TX active on blobConnection */
int connectToDB; /* Flag to indicate if direct DB int connectToDB; /* Flag to indicate if direct DB
* connection is required */ * connection is required */
int pgCopyIn; /* Currently in libpq 'COPY IN' mode. */ int pgCopyIn; /* Currently in libpq 'COPY IN' mode. */
...@@ -250,7 +245,6 @@ typedef struct _archiveHandle ...@@ -250,7 +245,6 @@ typedef struct _archiveHandle
int loFd; /* BLOB fd */ int loFd; /* BLOB fd */
int writingBlob; /* Flag */ int writingBlob; /* Flag */
int createdBlobXref; /* Flag */
int blobCount; /* # of blobs restored */ int blobCount; /* # of blobs restored */
char *fSpec; /* Archive File Spec */ char *fSpec; /* Archive File Spec */
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_custom.c,v 1.30 2005/01/25 22:44:31 tgl Exp $ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_custom.c,v 1.31 2005/06/21 20:45:44 tgl Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
...@@ -314,10 +314,9 @@ _StartData(ArchiveHandle *AH, TocEntry *te) ...@@ -314,10 +314,9 @@ _StartData(ArchiveHandle *AH, TocEntry *te)
* called for both BLOB and TABLE data; it is the responsibility of * called for both BLOB and TABLE data; it is the responsibility of
* the format to manage each kind of data using StartBlob/StartData. * the format to manage each kind of data using StartBlob/StartData.
* *
* It should only be called from withing a DataDumper routine. * It should only be called from within a DataDumper routine.
* *
* Mandatory. * Mandatory.
*
*/ */
static size_t static size_t
_WriteData(ArchiveHandle *AH, const void *data, size_t dLen) _WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
...@@ -360,7 +359,6 @@ _EndData(ArchiveHandle *AH, TocEntry *te) ...@@ -360,7 +359,6 @@ _EndData(ArchiveHandle *AH, TocEntry *te)
* It is called just prior to the dumper's DataDumper routine. * It is called just prior to the dumper's DataDumper routine.
* *
* Optional, but strongly recommended. * Optional, but strongly recommended.
*
*/ */
static void static void
_StartBlobs(ArchiveHandle *AH, TocEntry *te) _StartBlobs(ArchiveHandle *AH, TocEntry *te)
...@@ -396,7 +394,6 @@ _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) ...@@ -396,7 +394,6 @@ _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
* Called by the archiver when the dumper calls EndBlob. * Called by the archiver when the dumper calls EndBlob.
* *
* Optional. * Optional.
*
*/ */
static void static void
_EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
...@@ -408,7 +405,6 @@ _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) ...@@ -408,7 +405,6 @@ _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
* Called by the archiver when finishing saving all BLOB DATA. * Called by the archiver when finishing saving all BLOB DATA.
* *
* Optional. * Optional.
*
*/ */
static void static void
_EndBlobs(ArchiveHandle *AH, TocEntry *te) _EndBlobs(ArchiveHandle *AH, TocEntry *te)
...@@ -487,9 +483,6 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt) ...@@ -487,9 +483,6 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
break; break;
case BLK_BLOBS: case BLK_BLOBS:
if (!AH->connection)
die_horribly(AH, modulename, "large objects cannot be loaded without a database connection\n");
_LoadBlobs(AH); _LoadBlobs(AH);
break; break;
...@@ -870,7 +863,6 @@ _readBlockHeader(ArchiveHandle *AH, int *type, int *id) ...@@ -870,7 +863,6 @@ _readBlockHeader(ArchiveHandle *AH, int *type, int *id)
/* /*
* If zlib is available, then startit up. This is called from * If zlib is available, then startit up. This is called from
* StartData & StartBlob. The buffers are setup in the Init routine. * StartData & StartBlob. The buffers are setup in the Init routine.
*
*/ */
static void static void
_StartDataCompressor(ArchiveHandle *AH, TocEntry *te) _StartDataCompressor(ArchiveHandle *AH, TocEntry *te)
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
* Implements the basic DB functions used by the archiver. * Implements the basic DB functions used by the archiver.
* *
* IDENTIFICATION * IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_db.c,v 1.61 2004/11/06 19:36:01 tgl Exp $ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_db.c,v 1.62 2005/06/21 20:45:44 tgl Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
...@@ -32,7 +32,6 @@ static const char *modulename = gettext_noop("archiver (db)"); ...@@ -32,7 +32,6 @@ static const char *modulename = gettext_noop("archiver (db)");
static void _check_database_version(ArchiveHandle *AH, bool ignoreVersion); static void _check_database_version(ArchiveHandle *AH, bool ignoreVersion);
static PGconn *_connectDB(ArchiveHandle *AH, const char *newdbname, const char *newUser); static PGconn *_connectDB(ArchiveHandle *AH, const char *newdbname, const char *newUser);
static int _executeSqlCommand(ArchiveHandle *AH, PGconn *conn, PQExpBuffer qry, char *desc);
static void notice_processor(void *arg, const char *message); static void notice_processor(void *arg, const char *message);
static char *_sendSQLLine(ArchiveHandle *AH, char *qry, char *eos); static char *_sendSQLLine(ArchiveHandle *AH, char *qry, char *eos);
static char *_sendCopyLine(ArchiveHandle *AH, char *qry, char *eos); static char *_sendCopyLine(ArchiveHandle *AH, char *qry, char *eos);
...@@ -288,22 +287,9 @@ notice_processor(void *arg, const char *message) ...@@ -288,22 +287,9 @@ notice_processor(void *arg, const char *message)
/* Public interface */ /* Public interface */
/* Convenience function to send a query. Monitors result to handle COPY statements */ /* Convenience function to send a query. Monitors result to handle COPY statements */
int int
ExecuteSqlCommand(ArchiveHandle *AH, PQExpBuffer qry, char *desc, bool use_blob) ExecuteSqlCommand(ArchiveHandle *AH, PQExpBuffer qry, char *desc)
{
if (use_blob)
return _executeSqlCommand(AH, AH->blobConnection, qry, desc);
else
return _executeSqlCommand(AH, AH->connection, qry, desc);
}
/*
* Handle command execution. This is used to execute a command on more than one connection,
* but the 'pgCopyIn' setting assumes the COPY commands are ONLY executed on the primary
* setting...an error will be raised otherwise.
*/
static int
_executeSqlCommand(ArchiveHandle *AH, PGconn *conn, PQExpBuffer qry, char *desc)
{ {
PGconn *conn = AH->connection;
PGresult *res; PGresult *res;
char errStmt[DB_MAX_ERR_STMT]; char errStmt[DB_MAX_ERR_STMT];
...@@ -316,9 +302,6 @@ _executeSqlCommand(ArchiveHandle *AH, PGconn *conn, PQExpBuffer qry, char *desc) ...@@ -316,9 +302,6 @@ _executeSqlCommand(ArchiveHandle *AH, PGconn *conn, PQExpBuffer qry, char *desc)
{ {
if (PQresultStatus(res) == PGRES_COPY_IN) if (PQresultStatus(res) == PGRES_COPY_IN)
{ {
if (conn != AH->connection)
die_horribly(AH, modulename, "COPY command executed in non-primary connection\n");
AH->pgCopyIn = 1; AH->pgCopyIn = 1;
} }
else else
...@@ -478,7 +461,7 @@ _sendSQLLine(ArchiveHandle *AH, char *qry, char *eos) ...@@ -478,7 +461,7 @@ _sendSQLLine(ArchiveHandle *AH, char *qry, char *eos)
* fprintf(stderr, " sending: '%s'\n\n", * fprintf(stderr, " sending: '%s'\n\n",
* AH->sqlBuf->data); * AH->sqlBuf->data);
*/ */
ExecuteSqlCommand(AH, AH->sqlBuf, "could not execute query", false); ExecuteSqlCommand(AH, AH->sqlBuf, "could not execute query");
resetPQExpBuffer(AH->sqlBuf); resetPQExpBuffer(AH->sqlBuf);
AH->sqlparse.lastChar = '\0'; AH->sqlparse.lastChar = '\0';
...@@ -667,164 +650,6 @@ ExecuteSqlCommandBuf(ArchiveHandle *AH, void *qryv, size_t bufLen) ...@@ -667,164 +650,6 @@ ExecuteSqlCommandBuf(ArchiveHandle *AH, void *qryv, size_t bufLen)
return 1; return 1;
} }
void
FixupBlobRefs(ArchiveHandle *AH, TocEntry *te)
{
PQExpBuffer tblName;
PQExpBuffer tblQry;
PGresult *res,
*uRes;
int i,
n;
if (strcmp(te->tag, BLOB_XREF_TABLE) == 0)
return;
tblName = createPQExpBuffer();
tblQry = createPQExpBuffer();
if (te->namespace && strlen(te->namespace) > 0)
appendPQExpBuffer(tblName, "%s.",
fmtId(te->namespace));
appendPQExpBuffer(tblName, "%s",
fmtId(te->tag));
appendPQExpBuffer(tblQry,
"SELECT a.attname, t.typname FROM "
"pg_catalog.pg_attribute a, pg_catalog.pg_type t "
"WHERE a.attnum > 0 AND a.attrelid = '%s'::pg_catalog.regclass "
"AND a.atttypid = t.oid AND t.typname in ('oid', 'lo')",
tblName->data);
res = PQexec(AH->blobConnection, tblQry->data);
if (!res)
die_horribly(AH, modulename, "could not find OID columns of table \"%s\": %s",
te->tag, PQerrorMessage(AH->connection));
if ((n = PQntuples(res)) == 0)
{
/* nothing to do */
ahlog(AH, 1, "no OID type columns in table %s\n", te->tag);
}
for (i = 0; i < n; i++)
{
char *attr;
char *typname;
bool typeisoid;
attr = PQgetvalue(res, i, 0);
typname = PQgetvalue(res, i, 1);
typeisoid = (strcmp(typname, "oid") == 0);
ahlog(AH, 1, "fixing large object cross-references for %s.%s\n",
te->tag, attr);
resetPQExpBuffer(tblQry);
/*
* Note: we use explicit typename() cast style here because if we
* are dealing with a dump from a pre-7.3 database containing LO
* columns, the dump probably will not have CREATE CAST commands
* for lo<->oid conversions. What it will have is functions,
* which we will invoke as functions.
*/
/* Can't use fmtId more than once per call... */
appendPQExpBuffer(tblQry,
"UPDATE %s SET %s = ",
tblName->data, fmtId(attr));
if (typeisoid)
appendPQExpBuffer(tblQry,
"%s.newOid",
BLOB_XREF_TABLE);
else
appendPQExpBuffer(tblQry,
"%s(%s.newOid)",
fmtId(typname),
BLOB_XREF_TABLE);
appendPQExpBuffer(tblQry,
" FROM %s WHERE %s.oldOid = ",
BLOB_XREF_TABLE,
BLOB_XREF_TABLE);
if (typeisoid)
appendPQExpBuffer(tblQry,
"%s.%s",
tblName->data, fmtId(attr));
else
appendPQExpBuffer(tblQry,
"oid(%s.%s)",
tblName->data, fmtId(attr));
ahlog(AH, 10, "SQL: %s\n", tblQry->data);
uRes = PQexec(AH->blobConnection, tblQry->data);
if (!uRes)
die_horribly(AH, modulename,
"could not update column \"%s\" of table \"%s\": %s",
attr, te->tag, PQerrorMessage(AH->blobConnection));
if (PQresultStatus(uRes) != PGRES_COMMAND_OK)
die_horribly(AH, modulename,
"error while updating column \"%s\" of table \"%s\": %s",
attr, te->tag, PQerrorMessage(AH->blobConnection));
PQclear(uRes);
}
PQclear(res);
destroyPQExpBuffer(tblName);
destroyPQExpBuffer(tblQry);
}
/**********
* Convenient SQL calls
**********/
void
CreateBlobXrefTable(ArchiveHandle *AH)
{
PQExpBuffer qry = createPQExpBuffer();
/* IF we don't have a BLOB connection, then create one */
if (!AH->blobConnection)
AH->blobConnection = _connectDB(AH, NULL, NULL);
ahlog(AH, 1, "creating table for large object cross-references\n");
appendPQExpBuffer(qry, "CREATE TEMPORARY TABLE %s(oldOid pg_catalog.oid, newOid pg_catalog.oid) WITHOUT OIDS", BLOB_XREF_TABLE);
ExecuteSqlCommand(AH, qry, "could not create large object cross-reference table", true);
destroyPQExpBuffer(qry);
}
void
CreateBlobXrefIndex(ArchiveHandle *AH)
{
PQExpBuffer qry = createPQExpBuffer();
ahlog(AH, 1, "creating index for large object cross-references\n");
appendPQExpBuffer(qry, "CREATE UNIQUE INDEX %s_ix ON %s(oldOid)",
BLOB_XREF_TABLE, BLOB_XREF_TABLE);
ExecuteSqlCommand(AH, qry, "could not create index on large object cross-reference table", true);
destroyPQExpBuffer(qry);
}
void
InsertBlobXref(ArchiveHandle *AH, Oid old, Oid new)
{
PQExpBuffer qry = createPQExpBuffer();
appendPQExpBuffer(qry,
"INSERT INTO %s(oldOid, newOid) VALUES ('%u', '%u')",
BLOB_XREF_TABLE, old, new);
ExecuteSqlCommand(AH, qry, "could not create large object cross-reference entry", true);
destroyPQExpBuffer(qry);
}
void void
StartTransaction(ArchiveHandle *AH) StartTransaction(ArchiveHandle *AH)
{ {
...@@ -832,22 +657,7 @@ StartTransaction(ArchiveHandle *AH) ...@@ -832,22 +657,7 @@ StartTransaction(ArchiveHandle *AH)
appendPQExpBuffer(qry, "BEGIN"); appendPQExpBuffer(qry, "BEGIN");
ExecuteSqlCommand(AH, qry, "could not start database transaction", false); ExecuteSqlCommand(AH, qry, "could not start database transaction");
AH->txActive = true;
destroyPQExpBuffer(qry);
}
void
StartTransactionXref(ArchiveHandle *AH)
{
PQExpBuffer qry = createPQExpBuffer();
appendPQExpBuffer(qry, "BEGIN");
ExecuteSqlCommand(AH, qry,
"could not start transaction for large object cross-references", true);
AH->blobTxActive = true;
destroyPQExpBuffer(qry); destroyPQExpBuffer(qry);
} }
...@@ -859,21 +669,7 @@ CommitTransaction(ArchiveHandle *AH) ...@@ -859,21 +669,7 @@ CommitTransaction(ArchiveHandle *AH)
appendPQExpBuffer(qry, "COMMIT"); appendPQExpBuffer(qry, "COMMIT");
ExecuteSqlCommand(AH, qry, "could not commit database transaction", false); ExecuteSqlCommand(AH, qry, "could not commit database transaction");
AH->txActive = false;
destroyPQExpBuffer(qry);
}
void
CommitTransactionXref(ArchiveHandle *AH)
{
PQExpBuffer qry = createPQExpBuffer();
appendPQExpBuffer(qry, "COMMIT");
ExecuteSqlCommand(AH, qry, "could not commit transaction for large object cross-references", true);
AH->blobTxActive = false;
destroyPQExpBuffer(qry); destroyPQExpBuffer(qry);
} }
......
...@@ -2,19 +2,11 @@ ...@@ -2,19 +2,11 @@
* Definitions for pg_backup_db.c * Definitions for pg_backup_db.c
* *
* IDENTIFICATION * IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_db.h,v 1.10 2004/03/03 21:28:54 tgl Exp $ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_db.h,v 1.11 2005/06/21 20:45:44 tgl Exp $
*/ */
#define BLOB_XREF_TABLE "pg_dump_blob_xref" /* MUST be lower case */ extern int ExecuteSqlCommand(ArchiveHandle *AH, PQExpBuffer qry, char *desc);
extern void FixupBlobRefs(ArchiveHandle *AH, TocEntry *te);
extern int ExecuteSqlCommand(ArchiveHandle *AH, PQExpBuffer qry, char *desc, bool use_blob);
extern int ExecuteSqlCommandBuf(ArchiveHandle *AH, void *qry, size_t bufLen); extern int ExecuteSqlCommandBuf(ArchiveHandle *AH, void *qry, size_t bufLen);
extern void CreateBlobXrefTable(ArchiveHandle *AH);
extern void CreateBlobXrefIndex(ArchiveHandle *AH);
extern void InsertBlobXref(ArchiveHandle *AH, Oid old, Oid new);
extern void StartTransaction(ArchiveHandle *AH); extern void StartTransaction(ArchiveHandle *AH);
extern void StartTransactionXref(ArchiveHandle *AH);
extern void CommitTransaction(ArchiveHandle *AH); extern void CommitTransaction(ArchiveHandle *AH);
extern void CommitTransactionXref(ArchiveHandle *AH);
...@@ -20,7 +20,7 @@ ...@@ -20,7 +20,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_files.c,v 1.25 2004/03/03 21:28:54 tgl Exp $ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_files.c,v 1.26 2005/06/21 20:45:44 tgl Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
...@@ -457,7 +457,6 @@ _CloseArchive(ArchiveHandle *AH) ...@@ -457,7 +457,6 @@ _CloseArchive(ArchiveHandle *AH)
* It is called just prior to the dumper's DataDumper routine. * It is called just prior to the dumper's DataDumper routine.
* *
* Optional, but strongly recommended. * Optional, but strongly recommended.
*
*/ */
static void static void
_StartBlobs(ArchiveHandle *AH, TocEntry *te) _StartBlobs(ArchiveHandle *AH, TocEntry *te)
...@@ -516,7 +515,6 @@ _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) ...@@ -516,7 +515,6 @@ _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
* Called by the archiver when the dumper calls EndBlob. * Called by the archiver when the dumper calls EndBlob.
* *
* Optional. * Optional.
*
*/ */
static void static void
_EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
...@@ -531,7 +529,6 @@ _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid) ...@@ -531,7 +529,6 @@ _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
* Called by the archiver when finishing saving all BLOB DATA. * Called by the archiver when finishing saving all BLOB DATA.
* *
* Optional. * Optional.
*
*/ */
static void static void
_EndBlobs(ArchiveHandle *AH, TocEntry *te) _EndBlobs(ArchiveHandle *AH, TocEntry *te)
...@@ -543,5 +540,4 @@ _EndBlobs(ArchiveHandle *AH, TocEntry *te) ...@@ -543,5 +540,4 @@ _EndBlobs(ArchiveHandle *AH, TocEntry *te)
if (fclose(ctx->blobToc) != 0) if (fclose(ctx->blobToc) != 0)
die_horribly(AH, modulename, "could not close large object TOC file: %s\n", strerror(errno)); die_horribly(AH, modulename, "could not close large object TOC file: %s\n", strerror(errno));
} }
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* pg_backup_null.c * pg_backup_null.c
* *
* Implementation of an archive that is never saved; it is used by * Implementation of an archive that is never saved; it is used by
* pg_dump to output a plain text SQL script instead of save * pg_dump to output a plain text SQL script instead of saving
* a real archive. * a real archive.
* *
* See the headers to pg_restore for more details. * See the headers to pg_restore for more details.
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
* *
* *
* IDENTIFICATION * IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_null.c,v 1.14 2003/12/08 16:39:05 tgl Exp $ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_backup_null.c,v 1.15 2005/06/21 20:45:44 tgl Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
...@@ -27,12 +27,21 @@ ...@@ -27,12 +27,21 @@
#include <unistd.h> /* for dup */ #include <unistd.h> /* for dup */
#include "libpq/libpq-fs.h"
static size_t _WriteData(ArchiveHandle *AH, const void *data, size_t dLen); static size_t _WriteData(ArchiveHandle *AH, const void *data, size_t dLen);
static size_t _WriteBlobData(ArchiveHandle *AH, const void *data, size_t dLen);
static void _EndData(ArchiveHandle *AH, TocEntry *te); static void _EndData(ArchiveHandle *AH, TocEntry *te);
static int _WriteByte(ArchiveHandle *AH, const int i); static int _WriteByte(ArchiveHandle *AH, const int i);
static size_t _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len); static size_t _WriteBuf(ArchiveHandle *AH, const void *buf, size_t len);
static void _CloseArchive(ArchiveHandle *AH); static void _CloseArchive(ArchiveHandle *AH);
static void _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt); static void _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt);
static void _StartBlobs(ArchiveHandle *AH, TocEntry *te);
static void _StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
static void _EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid);
static void _EndBlobs(ArchiveHandle *AH, TocEntry *te);
/* /*
* Initializer * Initializer
...@@ -48,6 +57,17 @@ InitArchiveFmt_Null(ArchiveHandle *AH) ...@@ -48,6 +57,17 @@ InitArchiveFmt_Null(ArchiveHandle *AH)
AH->ClosePtr = _CloseArchive; AH->ClosePtr = _CloseArchive;
AH->PrintTocDataPtr = _PrintTocData; AH->PrintTocDataPtr = _PrintTocData;
AH->StartBlobsPtr = _StartBlobs;
AH->StartBlobPtr = _StartBlob;
AH->EndBlobPtr = _EndBlob;
AH->EndBlobsPtr = _EndBlobs;
/* Initialize LO buffering */
AH->lo_buf_size = LOBBUFSIZE;
AH->lo_buf = (void *) malloc(LOBBUFSIZE);
if (AH->lo_buf == NULL)
die_horribly(AH, NULL, "out of memory\n");
/* /*
* Now prevent reading... * Now prevent reading...
*/ */
...@@ -59,10 +79,8 @@ InitArchiveFmt_Null(ArchiveHandle *AH) ...@@ -59,10 +79,8 @@ InitArchiveFmt_Null(ArchiveHandle *AH)
* - Start a new TOC entry * - Start a new TOC entry
*/ */
/*------ /*
* Called by dumper via archiver from within a data dump routine * Called by dumper via archiver from within a data dump routine
* As at V1.3, this is only called for COPY FROM dfata, and BLOB data
*------
*/ */
static size_t static size_t
_WriteData(ArchiveHandle *AH, const void *data, size_t dLen) _WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
...@@ -72,12 +90,91 @@ _WriteData(ArchiveHandle *AH, const void *data, size_t dLen) ...@@ -72,12 +90,91 @@ _WriteData(ArchiveHandle *AH, const void *data, size_t dLen)
return dLen; return dLen;
} }
/*
* Called by dumper via archiver from within a data dump routine
* We substitute this for _WriteData while emitting a BLOB
*/
static size_t
_WriteBlobData(ArchiveHandle *AH, const void *data, size_t dLen)
{
if (dLen > 0)
{
unsigned char *str;
size_t len;
str = PQescapeBytea((const unsigned char *) data, dLen, &len);
if (!str)
die_horribly(AH, NULL, "out of memory\n");
ahprintf(AH, "SELECT lowrite(0, '%s');\n", str);
free(str);
}
return dLen;
}
static void static void
_EndData(ArchiveHandle *AH, TocEntry *te) _EndData(ArchiveHandle *AH, TocEntry *te)
{ {
ahprintf(AH, "\n\n"); ahprintf(AH, "\n\n");
} }
/*
* Called by the archiver when starting to save all BLOB DATA (not schema).
* This routine should save whatever format-specific information is needed
* to read the BLOBs back into memory.
*
* It is called just prior to the dumper's DataDumper routine.
*
* Optional, but strongly recommended.
*/
static void
_StartBlobs(ArchiveHandle *AH, TocEntry *te)
{
ahprintf(AH, "BEGIN;\n\n");
}
/*
* Called by the archiver when the dumper calls StartBlob.
*
* Mandatory.
*
* Must save the passed OID for retrieval at restore-time.
*/
static void
_StartBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
{
if (oid == 0)
die_horribly(AH, NULL, "invalid OID for large object\n");
ahprintf(AH, "SELECT lo_open(lo_create(%u), %d);\n", oid, INV_WRITE);
AH->WriteDataPtr = _WriteBlobData;
}
/*
* Called by the archiver when the dumper calls EndBlob.
*
* Optional.
*/
static void
_EndBlob(ArchiveHandle *AH, TocEntry *te, Oid oid)
{
AH->WriteDataPtr = _WriteData;
ahprintf(AH, "SELECT lo_close(0);\n\n");
}
/*
* Called by the archiver when finishing saving all BLOB DATA.
*
* Optional.
*/
static void
_EndBlobs(ArchiveHandle *AH, TocEntry *te)
{
ahprintf(AH, "COMMIT;\n\n");
}
/*------ /*------
* Called as part of a RestoreArchive call; for the NULL archive, this * Called as part of a RestoreArchive call; for the NULL archive, this
* just sends the data for a given TOC entry to the output. * just sends the data for a given TOC entry to the output.
...@@ -89,7 +186,15 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt) ...@@ -89,7 +186,15 @@ _PrintTocData(ArchiveHandle *AH, TocEntry *te, RestoreOptions *ropt)
if (te->dataDumper) if (te->dataDumper)
{ {
AH->currToc = te; AH->currToc = te;
if (strcmp(te->desc, "BLOBS") == 0)
_StartBlobs(AH, te);
(*te->dataDumper) ((Archive *) AH, te->dataDumperArg); (*te->dataDumper) ((Archive *) AH, te->dataDumperArg);
if (strcmp(te->desc, "BLOBS") == 0)
_EndBlobs(AH, te);
AH->currToc = NULL; AH->currToc = NULL;
} }
} }
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
* by PostgreSQL * by PostgreSQL
* *
* IDENTIFICATION * IDENTIFICATION
* $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.409 2005/06/07 14:04:48 tgl Exp $ * $PostgreSQL: pgsql/src/bin/pg_dump/pg_dump.c,v 1.410 2005/06/21 20:45:44 tgl Exp $
* *
*------------------------------------------------------------------------- *-------------------------------------------------------------------------
*/ */
...@@ -195,7 +195,7 @@ main(int argc, char **argv) ...@@ -195,7 +195,7 @@ main(int argc, char **argv)
int plainText = 0; int plainText = 0;
int outputClean = 0; int outputClean = 0;
int outputCreate = 0; int outputCreate = 0;
int outputBlobs = 0; bool outputBlobs = true;
int outputNoOwner = 0; int outputNoOwner = 0;
static int use_setsessauth = 0; static int use_setsessauth = 0;
static int disable_triggers = 0; static int disable_triggers = 0;
...@@ -258,10 +258,7 @@ main(int argc, char **argv) ...@@ -258,10 +258,7 @@ main(int argc, char **argv)
/* Set default options based on progname */ /* Set default options based on progname */
if (strcmp(progname, "pg_backup") == 0) if (strcmp(progname, "pg_backup") == 0)
{
format = "c"; format = "c";
outputBlobs = true;
}
if (argc > 1) if (argc > 1)
{ {
...@@ -287,7 +284,7 @@ main(int argc, char **argv) ...@@ -287,7 +284,7 @@ main(int argc, char **argv)
break; break;
case 'b': /* Dump blobs */ case 'b': /* Dump blobs */
outputBlobs = true; /* this is now default, so just ignore the switch */
break; break;
case 'c': /* clean (i.e., drop) schema prior to case 'c': /* clean (i.e., drop) schema prior to
...@@ -442,19 +439,8 @@ main(int argc, char **argv) ...@@ -442,19 +439,8 @@ main(int argc, char **argv)
exit(1); exit(1);
} }
if (outputBlobs && selectTableName != NULL) if (selectTableName != NULL || selectSchemaName != NULL)
{ outputBlobs = false;
write_msg(NULL, "large-object output not supported for a single table\n");
write_msg(NULL, "use a full dump instead\n");
exit(1);
}
if (outputBlobs && selectSchemaName != NULL)
{
write_msg(NULL, "large-object output not supported for a single schema\n");
write_msg(NULL, "use a full dump instead\n");
exit(1);
}
if (dumpInserts == true && oids == true) if (dumpInserts == true && oids == true)
{ {
...@@ -463,13 +449,6 @@ main(int argc, char **argv) ...@@ -463,13 +449,6 @@ main(int argc, char **argv)
exit(1); exit(1);
} }
if (outputBlobs == true && (format[0] == 'p' || format[0] == 'P'))
{
write_msg(NULL, "large-object output is not supported for plain-text dump files\n");
write_msg(NULL, "(Use a different output format.)\n");
exit(1);
}
/* open the output file */ /* open the output file */
switch (format[0]) switch (format[0])
{ {
...@@ -670,7 +649,6 @@ help(const char *progname) ...@@ -670,7 +649,6 @@ help(const char *progname)
printf(_("\nOptions controlling the output content:\n")); printf(_("\nOptions controlling the output content:\n"));
printf(_(" -a, --data-only dump only the data, not the schema\n")); printf(_(" -a, --data-only dump only the data, not the schema\n"));
printf(_(" -b, --blobs include large objects in dump\n"));
printf(_(" -c, --clean clean (drop) schema prior to create\n")); printf(_(" -c, --clean clean (drop) schema prior to create\n"));
printf(_(" -C, --create include commands to create database in dump\n")); printf(_(" -C, --create include commands to create database in dump\n"));
printf(_(" -d, --inserts dump data as INSERT, rather than COPY, commands\n")); printf(_(" -d, --inserts dump data as INSERT, rather than COPY, commands\n"));
...@@ -1340,10 +1318,6 @@ dumpEncoding(Archive *AH) ...@@ -1340,10 +1318,6 @@ dumpEncoding(Archive *AH)
* dump all blobs * dump all blobs
* *
*/ */
#define loBufSize 16384
#define loFetchSize 1000
static int static int
dumpBlobs(Archive *AH, void *arg) dumpBlobs(Archive *AH, void *arg)
{ {
...@@ -1352,7 +1326,7 @@ dumpBlobs(Archive *AH, void *arg) ...@@ -1352,7 +1326,7 @@ dumpBlobs(Archive *AH, void *arg)
PGresult *res; PGresult *res;
int i; int i;
int loFd; int loFd;
char buf[loBufSize]; char buf[LOBBUFSIZE];
int cnt; int cnt;
Oid blobOid; Oid blobOid;
...@@ -1372,13 +1346,13 @@ dumpBlobs(Archive *AH, void *arg) ...@@ -1372,13 +1346,13 @@ dumpBlobs(Archive *AH, void *arg)
check_sql_result(res, g_conn, oidQry->data, PGRES_COMMAND_OK); check_sql_result(res, g_conn, oidQry->data, PGRES_COMMAND_OK);
/* Fetch for cursor */ /* Fetch for cursor */
appendPQExpBuffer(oidFetchQry, "FETCH %d IN bloboid", loFetchSize); appendPQExpBuffer(oidFetchQry, "FETCH 1000 IN bloboid");
do do
{ {
/* Do a fetch */
PQclear(res); PQclear(res);
/* Do a fetch */
res = PQexec(g_conn, oidFetchQry->data); res = PQexec(g_conn, oidFetchQry->data);
check_sql_result(res, g_conn, oidFetchQry->data, PGRES_TUPLES_OK); check_sql_result(res, g_conn, oidFetchQry->data, PGRES_TUPLES_OK);
...@@ -1400,7 +1374,7 @@ dumpBlobs(Archive *AH, void *arg) ...@@ -1400,7 +1374,7 @@ dumpBlobs(Archive *AH, void *arg)
/* Now read it in chunks, sending data to archive */ /* Now read it in chunks, sending data to archive */
do do
{ {
cnt = lo_read(g_conn, loFd, buf, loBufSize); cnt = lo_read(g_conn, loFd, buf, LOBBUFSIZE);
if (cnt < 0) if (cnt < 0)
{ {
write_msg(NULL, "dumpBlobs(): error reading large object: %s", write_msg(NULL, "dumpBlobs(): error reading large object: %s",
...@@ -1409,16 +1383,16 @@ dumpBlobs(Archive *AH, void *arg) ...@@ -1409,16 +1383,16 @@ dumpBlobs(Archive *AH, void *arg)
} }
WriteData(AH, buf, cnt); WriteData(AH, buf, cnt);
} while (cnt > 0); } while (cnt > 0);
lo_close(g_conn, loFd); lo_close(g_conn, loFd);
EndBlob(AH, blobOid); EndBlob(AH, blobOid);
} }
} while (PQntuples(res) > 0); } while (PQntuples(res) > 0);
PQclear(res);
destroyPQExpBuffer(oidQry); destroyPQExpBuffer(oidQry);
destroyPQExpBuffer(oidFetchQry); destroyPQExpBuffer(oidFetchQry);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment