Commit d20763db authored by Tom Lane's avatar Tom Lane

Remove contrib modules that have been agreed to be obsolete.

(There are more that will be removed once they've been copied to
pgfoundry.org.)
parent 4cc7a93d
# $PostgreSQL: pgsql/contrib/Makefile,v 1.54 2005/03/12 15:36:24 neilc Exp $
# $PostgreSQL: pgsql/contrib/Makefile,v 1.55 2005/06/22 22:56:25 tgl Exp $
subdir = contrib
top_builddir = ..
......@@ -21,13 +21,9 @@ WANTED_DIRS = \
isbn_issn \
lo \
ltree \
miscutil \
mysql \
noupdate \
oid2name \
pg_autovacuum \
pg_buffercache \
pg_dumplo \
pg_trgm \
pgbench \
pgcrypto \
......@@ -35,26 +31,19 @@ WANTED_DIRS = \
rtree_gist \
seg \
spi \
string \
tablefunc \
tips \
tsearch \
tsearch2 \
userlock \
vacuumlo
# Missing:
# adddepend \ (does not have a makefile)
# array \ (removed all but the README)
# ipc_check \ (does not have a makefile)
# mSQL-interface \ (requires msql installed)
# mac \ (does not have a makefile)
# oracle \ (does not have a makefile)
# pg_upgrade \ (does not have a makefile)
# reindexdb \ (does not have a makefile)
# start-scripts \ (does not have a makefile)
# tools \ (does not have a makefile)
# xml \ (non-standard makefile)
# xml2 \ (non-standard makefile)
......
......@@ -28,10 +28,6 @@ adddepend -
Add object dependency information to pre-7.3 objects.
by Rod Taylor <rbt@rbt.ca>
array -
Array iterator functions (now obsolete due to backend improvements)
by Massimo Dal Zotto <dz@cs.unitn.it>
btree_gist -
Support for emulating BTREE indexing in GiST
by Oleg Bartunov <oleg@sai.msu.su> and Teodor Sigaev <teodor@sigaev.ru>
......@@ -87,10 +83,6 @@ intarray -
Index support for arrays of int4, using GiST
by Teodor Sigaev <teodor@sigaev.ru> and Oleg Bartunov <oleg@sai.msu.su>
ipc_check -
Simple test script to help in configuring IPC.
FreeBSD only, for now.
isbn_issn -
PostgreSQL type extensions for ISBN (books) and ISSN (serials)
by Garrett A. Wollman <wollman@khavrinen.lcs.mit.edu>
......@@ -111,19 +103,6 @@ mac -
Support functions for MAC address types
by Lawrence E. Rosenman <ler@lerctr.org>
miscutil -
PostgreSQL assert checking and various utility functions
by Massimo Dal Zotto <dz@cs.unitn.it>
mysql -
Utility to convert MySQL schema dumps to SQL92 and PostgreSQL
by Thomas Lockhart <lockhart@alumni.caltech.edu>
Max Rudensky <fonin@ziet.zhitomir.ua>
Valentine Danilchuk <valdan@ziet.zhitomir.ua>
noupdate -
Trigger to prevent updates on single columns
oid2name -
Maps numeric files to table names
by B Palmer <bpalmer@crimelabs.net>
......@@ -140,19 +119,11 @@ pg_buffercache -
Real time queries on the shared buffer cache
by Mark Kirkwood <markir@paradise.net.nz>
pg_dumplo -
Dump large objects
by Karel Zak <zakkr@zf.jcu.cz>
pg_trgm -
Functions for determining the similarity of text based on trigram
matching.
by Oleg Bartunov <oleg@sai.msu.su> and Teodor Sigaev <teodor@sigaev.ru>
pg_upgrade -
Upgrade from previous PostgreSQL version without pg_dump/reload
by Bruce Momjian <pgman@candle.pha.pa.us>
pgbench -
TPC-B like benchmarking tool
by Tatsuo Ishii <t-ishii@sra.co.jp>
......@@ -184,10 +155,6 @@ spi -
start-scripts -
Scripts for starting the server at boot time.
string -
C-like input/output conversion routines for strings
by Massimo Dal Zotto <dz@cs.unitn.it>
tablefunc -
Examples of functions returning tables
by Joe Conway <mail@joeconway.com>
......@@ -196,15 +163,6 @@ tips/apache_logging -
Getting Apache to log to PostgreSQL
by Terry Mackintosh <terry@terrym.com>
tools -
Assorted developer tools
by Massimo Dal Zotto <dz@cs.unitn.it>
tsearch -
Full-text-index support using GiST (obsolete version)
by Teodor Sigaev <teodor@sigaev.ru> and Oleg Bartunov
<oleg@sai.msu.su>.
tsearch2 -
Full-text-index support using GiST
by Teodor Sigaev <teodor@sigaev.ru> and Oleg Bartunov
......@@ -218,10 +176,6 @@ vacuumlo -
Remove orphaned large objects
by Peter T Mount <peter@retep.org.uk>
xml -
Storing XML in PostgreSQL (obsolete version)
by John Gray <jgray@azuli.co.uk>
xml2 -
Storing XML in PostgreSQL
by John Gray <jgray@azuli.co.uk>
Array iterator functions have been removed as of PostgreSQL 7.4, because
equivalent functionality is now available built in to the backend.
For example, previously, using contrib/array, you might have used the
following construct:
create table t(id int4[], txt text[]);
-- select tuples with some id element equal to 123
select * from t where t.id *= 123;
Now you would do this instead:
-- select tuples with some id element equal to 123
select * from t where 123 = any (t.id);
-- or you could also do this
select * from t where 123 = some (t.id);
Similarly, if using contrib/array, you did the following:
-- select tuples with all txt elements matching '^[A-Z]'
select * from t where t.txt[1:3] **~ '^[A-Z]';
Now do this instead:
-- select tuples with all txt elements matching '^[A-Z]'
select * from t where '^[A-Z]' ~ all (t.txt[1:3]);
See this section in the PostgreSQL documentation for more detail:
The SQL Language => Functions and Operators => Row and Array Comparisons
This simple perl script was designed under FreeBSD, and, right now, is
limited to it. It provides a simple way of determining and directing
administrators in what has to be done to get IPC working, and configured.
Usage:
ipc_check.pl
- simply checks for semaphores and shared memory being enabled
- if one or other is not enabled, appropriate "options" are provided
to get it compiled into the kernel
ipc_check.pl -B <# of buffers>
- checks to see if there are sufficient shared memory buffers to
run the postmaster with a -B option as provided
- if insufficient buffers are provided, appropriate 'sysctl' commands,
and instructions, are provided to the administrator to increase
them
#!/usr/bin/perl
# Notes ... -B 1 == 8k
if(@ARGV > 1) {
if($ARGV[0] eq "-B") {
$buffers = $ARGV[1];
}
}
if($buffers > 0) {
$kb_memory_required = $buffers * 8;
}
$shm = `sysctl kern.ipc | grep shmall`;
( $junk, $shm_amt ) = split(/ /, $shm);
chomp($shm_amt);
$sem = `sysctl kern.ipc | grep semmap`;
print "\n\n";
if(length($shm) > 0) {
printf "shared memory enabled: %d kB available\n", $shm_amt * 4;
if($buffers > 0) {
if($kb_memory_required / 4 > $shm_amt) {
print "\n";
print "to provide enough shared memory for a \"-B $buffers\" setting\n";
print "issue the following command\n\n";
printf "\tsysctl -w kern.ipc.shmall=%d\n", $kb_memory_required / 4;
print "\nand add the following to your /etc/sysctl.conf file:\n\n";
printf "\tkern.ipc.shmall=%d\n", $kb_memory_required / 4;
} else {
print "\n";
print "no changes to kernel required for a \"-B $buffers\" setting\n";
}
}
} else {
print "no shared memory support available\n";
print "add the following option to your kernel config:\n\n";
print "\toptions SYSVSHM\n\n";
}
print "\n==========================\n\n";
if(length($sem) > 0) {
print "semaphores enabled\n";
} else {
print "no semaphore support available\n";
print "add the following option to your kernel config:\n\n";
print "\toptions SYSVSEM\n\n";
}
# $PostgreSQL: pgsql/contrib/miscutil/Makefile,v 1.18 2004/08/20 20:13:05 momjian Exp $
MODULES = misc_utils
DATA_built = misc_utils.sql
DOCS = README.misc_utils
ifdef USE_PGXS
PGXS = $(shell pg_config --pgxs)
include $(PGXS)
else
subdir = contrib/miscutil
top_builddir = ../..
include $(top_builddir)/src/Makefile.global
include $(top_srcdir)/contrib/contrib-global.mk
endif
Miscellaneous utility functions for PostgreSQL.
Copyright (C) 1999, Massimo Dal Zotto <dz@cs.unitn.it>
This software is distributed under the GNU General Public License
either version 2, or (at your option) any later version.
backend_pid()
return the pid of our corresponding backend.
unlisten(relname)
unlisten from a relation or from all relations if the argument
is null, empty or '*'.
It is now obsoleted by the new unlisten command but still useful
if you want unlisten a name computed by the query.
Note that a listen/notify relname can be any ascii string, not
just valid relation names.
min(x,y)
max(x,y)
return the min or max of two integers.
--
Massimo Dal Zotto <dz@cs.unitn.it>
/*
* misc_utils.c --
*
* This file defines miscellaneous PostgreSQL utility functions.
*
* Copyright (C) 1999, Massimo Dal Zotto <dz@cs.unitn.it>
*
* This file is distributed under the GNU General Public License
* either version 2, or (at your option) any later version.
*/
#include "postgres.h"
#include <unistd.h>
#include <signal.h>
#include "access/heapam.h"
#include "access/htup.h"
#include "access/relscan.h"
#include "access/skey.h"
#include "access/tupdesc.h"
#include "catalog/pg_listener.h"
#include "commands/async.h"
#include "fmgr.h"
#include "storage/lmgr.h"
#include "utils/fmgroids.h"
#include "utils/rel.h"
#include "utils/tqual.h"
#include "misc_utils.h"
int
backend_pid(void)
{
return getpid();
}
int
unlisten(char *relname)
{
Async_Unlisten(relname, getpid());
return 0;
}
int
int4max(int x, int y)
{
return Max(x, y);
}
int
int4min(int x, int y)
{
return Min(x, y);
}
/*
* Return the number of active listeners on a relation name.
*/
int
active_listeners(text *relname)
{
HeapTuple lTuple;
Relation lRel;
HeapScanDesc sRel;
TupleDesc tdesc;
ScanKeyData key;
Datum d;
bool isnull;
int len,
pid;
int count = 0;
int ourpid = getpid();
char listen_name[NAMEDATALEN];
lRel = heap_open(ListenerRelationId, AccessShareLock);
tdesc = RelationGetDescr(lRel);
if (relname && (VARSIZE(relname) > VARHDRSZ))
{
MemSet(listen_name, 0, NAMEDATALEN);
len = Min(VARSIZE(relname) - VARHDRSZ, NAMEDATALEN - 1);
memcpy(listen_name, VARDATA(relname), len);
ScanKeyInit(&key,
Anum_pg_listener_relname,
BTEqualStrategyNumber, F_NAMEEQ,
PointerGetDatum(listen_name));
sRel = heap_beginscan(lRel, SnapshotNow, 1, &key);
}
else
sRel = heap_beginscan(lRel, SnapshotNow, 0, (ScanKey) NULL);
while ((lTuple = heap_getnext(sRel, ForwardScanDirection)) != NULL)
{
d = heap_getattr(lTuple, Anum_pg_listener_pid, tdesc, &isnull);
pid = DatumGetInt32(d);
if ((pid == ourpid) || (kill(pid, 0) == 0))
count++;
}
heap_endscan(sRel);
heap_close(lRel, AccessShareLock);
return count;
}
#ifndef MISC_UTILS_H
#define MISC_UTILS_H
int backend_pid(void);
int unlisten(char *relname);
int int4max(int x, int y);
int int4min(int x, int y);
int active_listeners(text *relname);
#endif
-- misc_utils.sql --
--
-- SQL code to define misc functions.
--
-- Copyright (c) 1998, Massimo Dal Zotto <dz@cs.unitn.it>
--
-- This file is distributed under the GNU General Public License
-- either version 2, or (at your option) any later version.
-- Return the pid of the backend.
--
-- Adjust this setting to control where the objects get created.
SET search_path = public;
CREATE OR REPLACE FUNCTION backend_pid()
RETURNS int4
AS 'MODULE_PATHNAME'
LANGUAGE 'C';
-- Unlisten from a relation.
--
CREATE OR REPLACE FUNCTION "unlisten"(name)
RETURNS int4
AS 'MODULE_PATHNAME'
LANGUAGE 'C';
-- Unlisten from all relations for this backend.
--
CREATE OR REPLACE FUNCTION "unlisten"()
RETURNS int4
AS 'SELECT "unlisten"(''*'')'
LANGUAGE 'SQL';
-- min(x,y)
--
CREATE OR REPLACE FUNCTION min(int4,int4)
RETURNS int4
AS 'MODULE_PATHNAME', 'int4min'
LANGUAGE 'C' STRICT;
-- max(x,y)
--
CREATE OR REPLACE FUNCTION max(int4,int4)
RETURNS int4
AS 'MODULE_PATHNAME', 'int4max'
LANGUAGE 'C' STRICT;
-- Return the number of active listeners on a relation
--
CREATE OR REPLACE FUNCTION active_listeners(text)
RETURNS int4
AS 'MODULE_PATHNAME'
LANGUAGE 'C';
# mysql conversion Perl scripts
# $PostgreSQL: pgsql/contrib/mysql/Makefile,v 1.1 2004/11/04 06:09:21 neilc Exp $
MODULES =
SCRIPTS = my2pg.pl mysql2pgsql
DOCS = README.mysql
ifdef USE_PGXS
PGXS = $(shell pg_config --pgxs)
include $(PGXS)
else
subdir = contrib/mysql
top_builddir = ../..
include $(top_builddir)/src/Makefile.global
include $(top_srcdir)/contrib/contrib-global.mk
endif
Here are two conversion utilities for MySQL dumps. Use the one you prefer.
The most recent version of my2pg.pl can be obtained from:
http://www.omnistarinc.com/~fonin/downloads.php#my2pg
my2pg.diff has additional changes for CREATE FUNCTION.
Another tool, mysql2pgsql, can be found at:
http://gborg.postgresql.org/project/mysql2psql/projdisplay.php
*** /laptop/my2pg.pl Mon Apr 19 18:51:44 2004
--- my2pg.pl Mon Apr 19 18:59:09 2004
***************
*** 38,43 ****
--- 38,50 ----
# $My2pg: my2pg.pl,v 1.28 2001/12/06 19:32:20 fonin Exp $
# $Id: my2pg.diff,v 1.1 2004/04/19 23:18:12 momjian Exp $
+ # Custom patch
+ # Revision 1.9 2002/08/22 00:01:39 tgl
+ # Add a bunch of pseudo-types to replace the behavior formerly associated
+ # with OPAQUE, as per recent pghackers discussion. I still want to do some
+ # more work on the 'cstring' pseudo-type, but I'm going to commit the bulk
+ # of the changes now before the tree starts shifting under me ...
+
#
# $Log: my2pg.diff,v $
# Revision 1.1 2004/04/19 23:18:12 momjian
# Update to my2pg version 1.28, add docs, update URL for newest version.
#
# Create diff of custom changes Tom made to the utility for CREATE
# FUNCTION.
#
# This will make moving this utility out of CVS easier.
#
# Revision 1.28 2002/11/30 12:03:48 fonin
***************
*** 332,342 ****
print LIBTYPES "\n * Types for table ".uc($table_name);
print LIBTYPES "\n */\n";
! $types.="\nCREATE FUNCTION $typename"."_in (opaque)
RETURNS $typename
AS '$libtypename'
LANGUAGE 'c'
! WITH (ISCACHABLE);\n";
# creating output function
my $func_out="
--- 339,349 ----
print LIBTYPES "\n * Types for table ".uc($table_name);
print LIBTYPES "\n */\n";
! $types.="\nCREATE FUNCTION $typename"."_in (cstring)
RETURNS $typename
AS '$libtypename'
LANGUAGE 'c'
! WITH (ISSTRICT, ISCACHABLE);\n";
# creating output function
my $func_out="
***************
*** 386,396 ****
return (*a>=*b);
}\n";
! $types.="\nCREATE FUNCTION $typename"."_out (opaque)
! RETURNS opaque
AS '$libtypename'
LANGUAGE 'c'
! WITH (ISCACHABLE);\n";
$types.="\nCREATE TYPE $typename (
internallength = 2,
--- 393,403 ----
return (*a>=*b);
}\n";
! $types.="\nCREATE FUNCTION $typename"."_out ($typename)
! RETURNS cstring
AS '$libtypename'
LANGUAGE 'c'
! WITH (ISSTRICT, ISCACHABLE);\n";
$types.="\nCREATE TYPE $typename (
internallength = 2,
***************
*** 532,538 ****
print LIBTYPES "\n * Types for table ".uc($table_name);
print LIBTYPES "\n */\n";
! $types.="\nCREATE FUNCTION $typename"."_in (opaque)
RETURNS $typename
AS '$libtypename'
LANGUAGE 'c';\n";
--- 539,545 ----
print LIBTYPES "\n * Types for table ".uc($table_name);
print LIBTYPES "\n */\n";
! $types.="\nCREATE FUNCTION $typename"."_in (cstring)
RETURNS $typename
AS '$libtypename'
LANGUAGE 'c';\n";
***************
*** 584,591 ****
\n";
! $types.="\nCREATE FUNCTION $typename"."_out (opaque)
! RETURNS opaque
AS '$libtypename'
LANGUAGE 'c';\n";
--- 591,598 ----
\n";
! $types.="\nCREATE FUNCTION $typename"."_out ($typename)
! RETURNS cstring
AS '$libtypename'
LANGUAGE 'c';\n";
<HTML>
<HEAD>
<TITLE>my2pg - MySQL -> PostgreSQL dump conversion utility.</TITLE>
<LINK REV="made" HREF="mailto:bhcompile@daffy.perf.redhat.com">
</HEAD>
<BODY>
<A NAME="__index__"></A>
<!-- INDEX BEGIN -->
<UL>
<LI><A HREF="#name">NAME</A></LI>
<LI><A HREF="#syntax">SYNTAX</A></LI>
<LI><A HREF="#overview">OVERVIEW</A></LI>
<LI><A HREF="#commandline options">COMMAND-LINE OPTIONS</A></LI>
<LI><A HREF="#side effects">SIDE EFFECTS</A></LI>
<LI><A HREF="#bugs">BUGS</A></LI>
<LI><A HREF="#authors">AUTHORS</A></LI>
<LI><A HREF="#credits">CREDITS</A></LI>
<LI><A HREF="#license">LICENSE</A></LI>
</UL>
<!-- INDEX END -->
<HR>
<P>
<H1><A NAME="name">NAME</A></H1>
<P>my2pg - MySQL -&gt; PostgreSQL dump conversion utility.</P>
<P>
<HR>
<H1><A NAME="syntax">SYNTAX</A></H1>
<PRE>
mysqldump db | ./my2pg.pl [-nds] &gt; pgsqldump.sql
vi libtypes.c
make
psql database &lt; pgsqldump.txt
where</PRE>
<DL>
<DT><STRONG><A NAME="item_pgsqldump%2Esql"><EM>pgsqldump.sql</EM></A></STRONG><BR>
<DD>
- file suitable for loading into PostgreSQL.
<P></P>
<DT><STRONG><A NAME="item_libtypes%2Ec"><EM>libtypes.c</EM></A></STRONG><BR>
<DD>
- C source for emulated MySQL types (ENUM, SET) generated by <STRONG>my2pg</STRONG>
<P></P></DL>
<P>
<HR>
<H1><A NAME="overview">OVERVIEW</A></H1>
<P><STRONG>my2pg</STRONG> utility attempts to convert MySQL database dump to Postgres's one.
<STRONG>my2pg</STRONG> performs such conversions:</P>
<UL>
<LI><STRONG><A NAME="item_Type_conversion%2E">Type conversion.</A></STRONG><BR>
It tries to find proper Postgres
type for each column.
Unknown types are silently pushing to output dump;
ENUM and SET types implemented via user types
(C source for such types can be found in
<STRONG>libtypes.c</STRONG> file);
<P></P>
<LI><STRONG><A NAME="item_Encloses_identifiers_into_double_quotes%2E">Encloses identifiers into double quotes.</A></STRONG><BR>
All column and table
names should be enclosed to double-quotes to prevent
conflict with reserved SQL keywords;
<P></P>
<LI><STRONG><A NAME="item_Converting">Converting</A></STRONG><BR>
AUTO_INCREMENT fields to SERIAL. Actually, creating the sequence and
setting default value to nextval('seq'), well, you know :)
<P></P>
<LI><STRONG>Converting</STRONG><BR>
<CODE>KEY(field)</CODE> to CREATE INDEX i_field on table (field);
<P></P>
<LI><STRONG><A NAME="item_The_same">The same</A></STRONG><BR>
for UNIQUE keys;
<P></P>
<LI><STRONG><A NAME="item_Indices">Indices</A></STRONG><BR>
are creating AFTER rows insertion (to speed up the load);
<P></P>
<LI><STRONG><A NAME="item_Translates_%27%23%27">Translates '#'</A></STRONG><BR>
MySQL comments to ANSI SQL '--'
<P></P></UL>
<P>It encloses dump in transaction block to prevent single errors
during data load.</P>
<P>
<HR>
<H1><A NAME="commandline options">COMMAND-LINE OPTIONS</A></H1>
<P>My2pg takes the following command-line options:</P>
<DL>
<DT><STRONG><A NAME="item_%2Dn">-n</A></STRONG><BR>
<DD>
Convert *CHAR DEFAULT '' NOT NULL types to *CHAR NULL.
Postgres can't load empty '' strings in NOT NULL fields.
<P></P>
<DT><STRONG><A NAME="item_%2Dd">-d</A></STRONG><BR>
<DD>
Add double quotes around table and column names
<P></P>
<DT><STRONG><A NAME="item_%2Dh">-h</A></STRONG><BR>
<DD>
Show usage banner.
<P></P>
<DT><STRONG><A NAME="item_%2Ds">-s</A></STRONG><BR>
<DD>
Do not attempt to convert data. Currently my2pg only tries to convert
date and time data.
<P></P></DL>
<P>
<HR>
<H1><A NAME="side effects">SIDE EFFECTS</A></H1>
<UL>
<LI><STRONG><A NAME="item_creates">creates</A></STRONG><BR>
file <STRONG>libtypes.c</STRONG> in current directory
overwriting existed file without any checks;
<P></P>
<LI><STRONG><A NAME="item_the_same">the same</A></STRONG><BR>
for Makefile.
<P></P></UL>
<P>
<HR>
<H1><A NAME="bugs">BUGS</A></H1>
<P>Known bugs are:</P>
<UL>
<LI><STRONG><A NAME="item_Possible_problems_with_the_timestamp_data%2E">Possible problems with the timestamp data.</A></STRONG><BR>
PostgreSQL does not accept incorrect date/time values like <STRONG>2002-00-15</STRONG>,
while MySQL does not care about that. Currently my2pg cannot handle this
issue. You should care yourself to convert such a data.
<P></P>
<LI><STRONG><A NAME="item_Use_%2Ds_option_if_your_numeric_data_are_broken_du">Use -s option if your numeric data are broken during conversion.</A></STRONG><BR>
My2pg attempts to convert MySQL timestamps of the form <STRONG>yyyymmdd</STRONG> to
<STRONG>yyyy-mm-dd</STRONG> and <STRONG>yyyymmddhhmmss</STRONG> to <STRONG>yyyy-mm-dd hh:mm:ss</STRONG>. It performs
some heuristic checks to ensure that the month,day,hour,minutes and seconds have
values from the correct range (0..12, 0..31, 0..23, 0..59, 0..59 respectively).
It is still possible that your numeric values that satisfy these conditions
will get broken.
<P></P>
<LI><STRONG><A NAME="item_Possible_problems_with_enclosing_identifiers_in_do">Possible problems with enclosing identifiers in double quotes.</A></STRONG><BR>
All identifiers such as table and column names should be enclosed in double
quotes. Program can't handle upper-case identifiers,
like DBA. Lower-case identifiers are OK.
<P></P>
<LI><STRONG><A NAME="item_SET_type_emulation_is_not_full%2E_LIKE_operation_o">SET type emulation is not full. LIKE operation on</A></STRONG><BR>
SETs, raw integer input values should be implemented
<P></P>
<LI><STRONG><A NAME="item_Makefile"><STRONG>Makefile</STRONG></A></STRONG><BR>
generated during output is
platform-dependent and surely works only on
Linux/gcc (FreeBSD/gcc probably works as well - not tested)
<P></P>
<LI><STRONG><A NAME="item_Generated_libtypes%2Ec_contain_line">Generated <STRONG>libtypes.c</STRONG> contain line</A></STRONG><BR>
<PRE>
#include &lt;postgres.h&gt;</PRE>
<P>This file may be located not in standard compiler
include path, you need to check it before compiling.</P>
</UL>
<P>
<HR>
<H1><A NAME="authors">AUTHORS</A></H1>
<P><STRONG>(c) 2000-2002 Maxim V. Rudensky (<A HREF="mailto:fonin@omnistaronline.com">fonin@ziet.zhitomir.ua</A>)</STRONG> (developer, maintainer)</P>
<P><STRONG>(c) 2000 Valentine V. Danilchuk (<A HREF="mailto:valdan@ziet.zhitomir.ua">valdan@ziet.zhitomir.ua</A>)</STRONG> (original script)</P>
<P>
<HR>
<H1><A NAME="credits">CREDITS</A></H1>
<P>Great thanks to all those people who provided feedback and make development
of this tool easier.</P>
<P>Jeff Waugh &lt;<A HREF="mailto:jaw@ic.net">jaw@ic.net</A>&gt;</P>
<P>Joakim Lemstrm &lt;<A HREF="mailto:jocke@bytewize.com">jocke@bytewize.com</A>&gt; || &lt;<A HREF="mailto:buddyh19@hotmail.com">buddyh19@hotmail.com</A>&gt;</P>
<P>Yunliang Yu &lt;<A HREF="mailto:yu@math.duke.edu">yu@math.duke.edu</A>&gt;</P>
<P>Brad Hilton &lt;<A HREF="mailto:bhilton@vpop.net">bhilton@vpop.net</A>&gt;</P>
<P>If you are not listed here please write to me.</P>
<P>
<HR>
<H1><A NAME="license">LICENSE</A></H1>
<P><STRONG>BSD</STRONG></P>
</BODY>
</HTML>
This diff is collapsed.
#!/usr/bin/perl
# mysql2pgsql
# Take a MySQL schema dump and turn it into SQL92 and PostgreSQL form.
# Thomas Lockhart, (c) 2000, PostgreSQL Inc.
# Thanks to Tim Perdue at SourceForge.Net for testing and feedback.
#
eval '(exit $?0)' && eval 'exec perl -S $0 ${1+"$@"}'
& eval 'exec perl -S $0 $argv:q'
if 0;
use IO::File;
use Getopt::Long;
my $progname = "mysql2pgsql";
my $version = "0.3";
GetOptions("debug!", "verbose!", "version", "path=s", "help", "data!");
my $debug = $opt_debug || 0;
my $verbose = $opt_verbose || 0;
my $pathfrom = $opt_path || "";
my $nodata = (! $opt_data);
$pathfrom = "$pathfrom/" if ($pathfrom =~ /.*[^\/]$/);
print "$0: $progname version $version\n"
if ($opt_version || $opt_help);
print "\t(c) 2000 Thomas Lockhart PostgreSQL Inc.\n"
if ($opt_version && $opt_verbose || $opt_help);
if ($opt_help) {
print "$0 --verbose --version --help --path=dir --nodata infile ...\n";
exit;
}
while (@ARGV) {
my $ostem;
my $oname;
my $pname;
my @xargs;
$iname = shift @ARGV;
$ostem = $iname;
$ostem = $1 if ($ostem =~ /.+\/([^\/]+)$/);
$ostem = $1 if ($ostem =~ /(.+)[.][^.]*$/);
$oname = "$ostem.sql92";
$pname = "$ostem.init";
@xargs = ($iname, $oname);
push @xargs, $pname unless ($nodata);
print "@xargs\n" if ($debug);
TransformDumpFile($iname, $oname, $pname);
}
exit;
sub TransformDumpFile {
local ($iname, $oname, $pname) = @_;
local @dlines;
local @slines;
local @plines;
local @tables; # list of tables created
local %pkeys;
local %pseqs;
local %sequences;
open(IN, "<$iname") || die "Unable to open file $iname";
while (<IN>) {
chomp;
push @dlines, $_;
}
print("Calling CreateSchema with $#dlines lines\n") if ($debug);
@slines = CreateSchema(@dlines);
open(OUT, ">$oname") || die "Unable to open output file $oname";
foreach (@slines) {
print "> $_" if ($debug);
print OUT "$_";
}
close(OUT);
return if (! defined($pname));
@plines = PopulateSchema(@tables);
open(OUT, ">$pname") || die "Unable to open output file $pname";
foreach (@plines) {
print "> $_" if ($debug);
print OUT "$_";
}
close(OUT);
}
sub PopulateSchema {
local @tables = @_;
local @out;
local $pkey;
local $pseq;
foreach (@tables) {
$table = $_;
$tpath = "$pathfrom$table";
print "Table is $table\n" if ($debug);
push @out, "\n";
push @out, "copy $table from '$tpath.txt';\n";
if (defined($pkeys{$table})) {
foreach ($pkeys{$table}) {
$pkey = $_;
$pseq = $pseqs{$table};
print "Key for $table is $pkey on $pseq\n" if ($debug);
# //push @out, "\$value = select max($pkey) from $table;\n";
push @out, "select setval ('$pseq', (select max($pkey) from $table));\n";
}
}
}
return @out;
}
sub CreateSchema {
local @lines = @_;
local @out;
# undef $last;
local %knames;
push @out, "--\n";
push @out, "-- Generated from mysql2pgsql\n";
push @out, "-- (c) 2000, Thomas Lockhart, PostgreSQL Inc.\n";
push @out, "--\n";
push @out, "\n";
while (@lines) {
$_ = shift @lines;
print "< $_\n" if ($debug);
# Replace hash comments with SQL9x standard syntax
$_ = "-- $1" if (/^[\#](.*)/);
# Found a CREATE TABLE statement?
if (/(create\s+table)\s+(\w+)\s+([(])\s*$/i) {
$table = $2;
$table = "\"$1\"" if ($table =~ /^(user)$/);
push @tables, $table;
push @tabledef, "create table $table (";
# push @out, "$_\n";
while (@lines) {
$_ = shift @lines;
print "< $_\n" if ($debug);
# Replace int(11) with SQL9x standard syntax
while (/int\(\d*\)/gi) {
$_ = "$`integer$'";
}
# Replace float(10,2) with SQL9x standard syntax
while (/(float)\((\d+),\s*(\d+)\)/gi) {
$_ = "$`$1($2)$'";
}
# Replace smallinteger with SQL9x syntax
while (/smallinteger/gi) {
$_ = "$`integer$'";
}
# Replace mediumtext with PostgreSQL syntax
while (/(longtext|mediumtext|blob|largeblob)/gi) {
$_ = "$`text$'";
}
# Replace integer ... auto_increment with PostgreSQL syntax
while (/(\s*)(\w+)\s+integer\s+(.*)\s+auto_increment/gi) {
$serid = $table . "_pk_seq";
push @out, "-- serial identifier $serid will likely be truncated\n"
if (length($serid) >= 32);
if (length($serid) >= 32) {
$excess=(length($serid)-31);
$serid = substr($table,0,-($excess)) . "_pk_seq";
push @out, "-- serial identifier $serid was truncated\n";
}
push @out, "CREATE SEQUENCE $serid;\n\n";
$pkeys{$table} = $2;
$pseqs{$table} = $serid;
push @out, "-- key is $pkeys{$table}, sequence is $pseqs{$table}\n" if ($debug);
$_ = "$`$1$2 integer default nextval('$serid') $3$'";
}
# Replace date with double-quoted name
# while (/^(\s*)(date|time)(\s+)/gi) {
# $_ = "$1\"$2\"$3$'";
# }
# Found "KEY"? Then remove it from the CREATE TABLE statement
# and instead write a CREATE INDEX statement.
if (/^\s*key\s+(\w+)\s*[(](\w[()\w\d,\s]*)[)][,]?/i) {
$iname = $1;
$column = $2;
$iname = $1 if ($iname =~ /^idx_(\w[\_\w\d]+)/);
# Sheesh, there can be upper bounds on index string sizes?
# Get rid of the length specifier (e.g. filename(45) -> filename)
while ($column =~ /(\w[\w\d])[(]\d+[)]/g) {
$column = "$`$1$'";
}
# $column = $1 if ($column =~ /(\w+)[(]\d+[)]/);
# push @out, "Index on $table($column) is $iname\n";
if (defined($knames{$iname})) {
push @out, "-- $iname already exists";
# sprintf($iname, "idx_%_%s", $table, $iname);
# $iname = "idx_" . $table . "_" . $column;
# Do not bother with more to the name; it will be too big anyway
$iname = $table . "_" . $column;
push @out, "; use $iname instead\n";
}
$knames{$iname} = $iname;
$keydef{$column} = $iname;
# push @out, "! $_\n";
# $last = $tabledef[$#tabledef];
# push @out, "? $#tabledef $last\n";
# push @out, "match $1\n" if ($last =~ /(.*),\s*$/);
# Remove the trailing comma from the previous line, if necessary
$tabledef[$#tabledef] = $1
if (($#tabledef > 0) && ($tabledef[$#tabledef] =~ /(.*),\s*$/));
# push @out, "? $tabledef[$#tabledef]\n";
# If this is the end of the statement, save it and exit loop
} elsif (/^\s*[)]\;/) {
push @tabledef, $_;
# push @out, "< $_\n";
last;
# Otherwise, just save the line
} else {
# push @out, "$last\n" if (defined($last));
# $last = $_;
push @tabledef, $_;
# push @out, "$_\n";
}
}
foreach $t (@tabledef) {
push @out, "$t\n";
}
undef @tabledef;
foreach $k (keys %keydef) {
push @out, "create index $keydef{$k} on $table ($k);\n";
}
undef %keydef;
} else {
push @out, "$_\n";
}
}
# push @out, "$last\n" if (defined($last));
foreach (keys %pkeys) {
my $val = $pkeys{$_};
print "key is $val\n" if ($debug);
}
return @out;
}
sub StripComma {
local $line = shift @_;
$line = "$1" if ($line =~ /(.*)[,]\s*$/);
return $line;
}
# $PostgreSQL: pgsql/contrib/noupdate/Makefile,v 1.11 2004/08/20 20:13:05 momjian Exp $
MODULES = noup
DATA_built = noup.sql
DOCS = README.noup
ifdef USE_PGXS
PGXS = $(shell pg_config --pgxs)
include $(PGXS)
else
subdir = contrib/noupdate
top_builddir = ../..
include $(top_builddir)/src/Makefile.global
include $(top_srcdir)/contrib/contrib-global.mk
endif
noupdate
~~~~~~~~
- trigger to prevent updates on single columns.
Example:
~~~~~~~
CREATE TABLE TEST ( COL1 INT, COL2 INT, COL3 INT );
CREATE TRIGGER BT BEFORE UPDATE ON TEST FOR EACH ROW
EXECUTE PROCEDURE
noup ('COL1');
-- Now Try
INSERT INTO TEST VALUES (10,20,30);
UPDATE TEST SET COL1 = 5;
/*
* noup.c -- functions to remove update permission from a column
*/
#include "executor/spi.h" /* this is what you need to work with SPI */
#include "commands/trigger.h" /* -"- and triggers */
#include <ctype.h> /* tolower () */
extern Datum noup(PG_FUNCTION_ARGS);
/*
* noup () -- revoke permission on column
*
* Though it's called without args You have to specify referenced
* table/column while creating trigger:
* EXECUTE PROCEDURE noup ('col').
*/
PG_FUNCTION_INFO_V1(noup);
Datum
noup(PG_FUNCTION_ARGS)
{
TriggerData *trigdata = (TriggerData *) fcinfo->context;
Trigger *trigger; /* to get trigger name */
int nargs; /* # of args specified in CREATE TRIGGER */
char **args; /* arguments: column names and table name */
int nkeys; /* # of key columns (= nargs / 2) */
Datum *kvals; /* key values */
Relation rel; /* triggered relation */
HeapTuple tuple = NULL; /* tuple to return */
TupleDesc tupdesc; /* tuple description */
bool isnull; /* to know is some column NULL or not */
int ret;
int i;
/*
* Some checks first...
*/
/* Called by trigger manager ? */
if (!CALLED_AS_TRIGGER(fcinfo))
/* internal error */
elog(ERROR, "noup: not fired by trigger manager");
/* Should be called for ROW trigger */
if (TRIGGER_FIRED_FOR_STATEMENT(trigdata->tg_event))
/* internal error */
elog(ERROR, "noup: can't process STATEMENT events");
/* Should not be called for INSERT */
if (TRIGGER_FIRED_BY_INSERT(trigdata->tg_event))
/* internal error */
elog(ERROR, "noup: can't process INSERT events");
/* Should not be called for DELETE */
else if (TRIGGER_FIRED_BY_DELETE(trigdata->tg_event))
/* internal error */
elog(ERROR, "noup: can't process DELETE events");
/* check new Tuple */
tuple = trigdata->tg_newtuple;
trigger = trigdata->tg_trigger;
nargs = trigger->tgnargs;
args = trigger->tgargs;
nkeys = nargs;
rel = trigdata->tg_relation;
tupdesc = rel->rd_att;
/* Connect to SPI manager */
if ((ret = SPI_connect()) < 0)
/* internal error */
elog(ERROR, "noup: SPI_connect returned %d", ret);
/*
* We use SPI plan preparation feature, so allocate space to place key
* values.
*/
kvals = (Datum *) palloc(nkeys * sizeof(Datum));
/* For each column in key ... */
for (i = 0; i < nkeys; i++)
{
/* get index of column in tuple */
int fnumber = SPI_fnumber(tupdesc, args[i]);
/* Bad guys may give us un-existing column in CREATE TRIGGER */
if (fnumber < 0)
/* internal error */
elog(ERROR, "noup: there is no attribute %s in relation %s",
args[i], SPI_getrelname(rel));
/* Well, get binary (in internal format) value of column */
kvals[i] = SPI_getbinval(tuple, tupdesc, fnumber, &isnull);
/*
* If it's NOT NULL then cancel update
*/
if (!isnull)
{
elog(WARNING, "%s: update not allowed", args[i]);
SPI_finish();
return PointerGetDatum(NULL);
}
}
SPI_finish();
return PointerGetDatum(tuple);
}
-- Adjust this setting to control where the objects get created.
SET search_path = public;
CREATE OR REPLACE FUNCTION noup ()
RETURNS trigger
AS 'MODULE_PATHNAME'
LANGUAGE 'C';
# $PostgreSQL: pgsql/contrib/pg_dumplo/Makefile,v 1.14 2005/03/25 18:17:11 momjian Exp $
PROGRAM = pg_dumplo
OBJS = main.o lo_export.o lo_import.o utils.o
PG_CPPFLAGS = -I$(libpq_srcdir)
PG_LIBS = $(libpq_pgport)
DOCS = README.pg_dumplo
ifdef USE_PGXS
PGXS = $(shell pg_config --pgxs)
include $(PGXS)
else
subdir = contrib/pg_dumplo
top_builddir = ../..
include $(top_builddir)/src/Makefile.global
include $(top_srcdir)/contrib/contrib-global.mk
endif
$PostgreSQL: pgsql/contrib/pg_dumplo/README.pg_dumplo,v 1.3 2003/11/29 19:51:35 pgsql Exp $
pg_dumplo - PostgreSQL large object dumper
==========================================
By Karel Zak <zakkr@zf.jcu.cz>
Compilation:
===========
* run master ./configure in the PG source top directory
* gmake all
* gmake install
THANKS:
======
<??? I lost his e-mail ???>
* option '--all' and pg_class usage
Pavel Janík ml. <Pavel.Janik@linux.cz>
* HOWTO (the rest of this file)
How to use pg_dumplo?
=====================
(c) 2000, Pavel Janík ml. <Pavel.Janik@linux.cz>
Q: How do you use pg_dumplo?
============================
A: This is a small demo of backing up the database table with Large Objects:
We will create a demo database and a small and useless table `lo' inside
it:
SnowWhite:$ createdb test
CREATE DATABASE
Ok, our database with the name 'test' is created. Now we should create demo
table which will contain only one column with the name 'id' which will hold
the OID number of a Large Object:
SnowWhite:$ psql test
Welcome to psql, the PostgreSQL interactive terminal.
Type: \copyright for distribution terms
\h for help with SQL commands
\? for help on internal slash commands
\g or terminate with semicolon to execute query
\q to quit
test=# CREATE TABLE lo (id oid);
CREATE
test=# \lo_import /etc/aliases
lo_import 19338
test=# INSERT INTO lo VALUES (19338);
INSERT 19352 1
test=# select * from lo;
id
-------
19338
(1 row)
test=# \q
In the above example you can see that we have also imported one "Large
Object" - the file /etc/aliases. It has an oid of 19338 so we have inserted
this oid number to the database table lo to the column id. The final SELECT
shows that we have one record in the table.
Now we can demonstrate the work of pg_dumplo. We will create a dump directory
which will contain the whole dump of large objects (/tmp/dump):
mkdir -p /tmp/dump
Now we can dump all large objects from the database `test' which have OIDs
stored in the column `id' in the table `lo':
SnowWhite:$ pg_dumplo -s /tmp/dump -d test -l lo.id
pg_dumplo: dump lo.id (1 large obj)
Voila, we have the dump of all Large Objects in our directory:
SnowWhite:$ tree /tmp/dump/
/tmp/dump/
`-- test
|-- lo
| `-- id
| `-- 19338
`-- lo_dump.index
3 directories, 2 files
SnowWhite:$
In practice, we'd probably use
SnowWhite:$ pg_dumplo -s /tmp/dump -d test -e
to export all large objects that are referenced by any OID-type column
in the database. Calling out specific column(s) with -l is only needed
for a selective dump.
For routine backup purposes, the dump directory could now be converted into
an archive file with tar and stored on tape. Notice that a single dump
directory can hold the dump of multiple databases.
Now, how can we recreate the contents of the table lo and the Large Object
database when something went wrong? To do this, we expect that pg_dump is
also used to store the definition and contents of the regular tables in
the database.
SnowWhite:$ pg_dump test >test.backup
Now, if we lose the database:
SnowWhite:$ dropdb test
DROP DATABASE
we can recreate it and reload the regular tables from the dump file:
SnowWhite:$ createdb test
CREATE DATABASE
SnowWhite:$ psql test <test.backup
But at this point our database has no large objects in it. What's more,
the large-object-referencing columns contain the OIDs of the old large
objects, which will not be the OIDs they'll have when reloaded. Never
fear: pg_dumplo will fix the large object references at the same time
it reloads the large objects. We reload the LO data from the dump
directory like this:
SnowWhite:$ pg_dumplo -s /tmp/dump -d test -i
19338 lo id test/lo/id/19338
SnowWhite:$
And this is everything. The contents of table lo will be automatically
updated to refer to the new large object OIDs.
Summary: In this small example we have shown that pg_dumplo can be used to
completely dump the database's Large Objects very easily.
For more information see the help ( pg_dumplo -h ).
/* -------------------------------------------------------------------------
* pg_dumplo
*
* $PostgreSQL: pgsql/contrib/pg_dumplo/lo_export.c,v 1.13 2004/11/28 23:49:49 tgl Exp $
*
* Karel Zak 1999-2004
* -------------------------------------------------------------------------
*/
#include "postgres_fe.h"
#include <fcntl.h>
#include <errno.h>
#include <time.h>
#include <unistd.h>
#include <sys/stat.h>
#include "libpq-fe.h"
#include "libpq/libpq-fs.h"
#include "pg_dumplo.h"
extern int errno;
void
load_lolist(LODumpMaster * pgLO)
{
LOlist *ll;
int i;
int n;
/*
* Now find any candidate tables who have columns of type oid.
*
* NOTE: System tables including pg_largeobject will be ignored.
* Otherwise we'd end up dumping all LOs, referenced or not.
*
* NOTE: the system oid column is ignored, as it has attnum < 1. This
* shouldn't matter for correctness, but it saves time.
*/
pgLO->res = PQexec(pgLO->conn, "SELECT c.relname, a.attname, n.nspname "
"FROM pg_catalog.pg_class c, pg_catalog.pg_attribute a, "
" pg_catalog.pg_type t, pg_catalog.pg_namespace n "
"WHERE a.attnum > 0 "
" AND a.attrelid = c.oid "
" AND a.atttypid = t.oid "
" AND t.typname = 'oid' "
" AND c.relkind = 'r' "
" AND c.relname NOT LIKE 'pg_%' "
" AND n.oid = c.relnamespace");
if (PQresultStatus(pgLO->res) != PGRES_TUPLES_OK)
{
fprintf(stderr, "%s: Failed to get LO OIDs:\n%s", progname,
PQerrorMessage(pgLO->conn));
exit(RE_ERROR);
}
if ((n = PQntuples(pgLO->res)) == 0)
{
fprintf(stderr, "%s: No OID columns in the database.\n", progname);
exit(RE_ERROR);
}
pgLO->lolist = (LOlist *) malloc((n + 1) * sizeof(LOlist));
memset(pgLO->lolist, 0, (n + 1) * sizeof(LOlist));
if (!pgLO->lolist)
{
fprintf(stderr, "%s: can't allocate memory\n", progname);
exit(RE_ERROR);
}
for (i = 0, ll = pgLO->lolist; i < n; i++, ll++)
{
ll->lo_table = strdup(PQgetvalue(pgLO->res, i, 0));
ll->lo_attr = strdup(PQgetvalue(pgLO->res, i, 1));
ll->lo_schema = strdup(PQgetvalue(pgLO->res, i, 2));
}
PQclear(pgLO->res);
}
void
pglo_export(LODumpMaster * pgLO)
{
LOlist *ll;
int tuples;
char path[BUFSIZ],
Qbuff[QUERY_BUFSIZ];
if (pgLO->action != ACTION_SHOW)
{
time_t t;
time(&t);
fprintf(pgLO->index, "#\n# This is the PostgreSQL large object dump index\n#\n");
fprintf(pgLO->index, "#\tDate: %s", ctime(&t));
fprintf(pgLO->index, "#\tHost: %s\n", pgLO->host);
fprintf(pgLO->index, "#\tDatabase: %s\n", pgLO->db);
fprintf(pgLO->index, "#\tUser: %s\n", pgLO->user);
fprintf(pgLO->index, "#\n# oid\ttable\tattribut\tinfile\tschema\n#\n");
}
pgLO->counter = 0;
for (ll = pgLO->lolist; ll->lo_table != NULL; ll++)
{
/*
* Query: find the LOs referenced by this column
*/
snprintf(Qbuff, QUERY_BUFSIZ,
"SELECT DISTINCT l.loid FROM \"%s\".\"%s\" x, pg_catalog.pg_largeobject l "
"WHERE x.\"%s\" = l.loid",
ll->lo_schema, ll->lo_table, ll->lo_attr);
/* puts(Qbuff); */
pgLO->res = PQexec(pgLO->conn, Qbuff);
if (PQresultStatus(pgLO->res) != PGRES_TUPLES_OK)
{
fprintf(stderr, "%s: Failed to get LO OIDs:\n%s", progname,
PQerrorMessage(pgLO->conn));
}
else if ((tuples = PQntuples(pgLO->res)) == 0)
{
if (!pgLO->quiet && pgLO->action == ACTION_EXPORT_ATTR)
printf("%s: no large objects in \"%s\".\"%s\".\"%s\"\n",
progname, ll->lo_schema, ll->lo_table, ll->lo_attr);
}
else
{
int t;
char *val;
/*
* Create DIR/FILE
*/
if (pgLO->action != ACTION_SHOW)
{
snprintf(path, BUFSIZ, "%s/%s/%s", pgLO->space, pgLO->db,
ll->lo_schema);
if (mkdir(path, DIR_UMASK) == -1)
{
if (errno != EEXIST)
{
perror(path);
exit(RE_ERROR);
}
}
snprintf(path, BUFSIZ, "%s/%s/%s/%s", pgLO->space, pgLO->db,
ll->lo_schema, ll->lo_table);
if (mkdir(path, DIR_UMASK) == -1)
{
if (errno != EEXIST)
{
perror(path);
exit(RE_ERROR);
}
}
snprintf(path, BUFSIZ, "%s/%s/%s/%s/%s", pgLO->space, pgLO->db,
ll->lo_schema, ll->lo_table, ll->lo_attr);
if (mkdir(path, DIR_UMASK) == -1)
{
if (errno != EEXIST)
{
perror(path);
exit(RE_ERROR);
}
}
if (!pgLO->quiet)
printf("dump %s.%s.%s (%d large obj)\n",
ll->lo_schema, ll->lo_table, ll->lo_attr, tuples);
}
pgLO->counter += tuples;
for (t = 0; t < tuples; t++)
{
Oid lo;
val = PQgetvalue(pgLO->res, t, 0);
lo = atooid(val);
if (pgLO->action == ACTION_SHOW)
{
printf("%s.%s.%s: %u\n", ll->lo_schema, ll->lo_table, ll->lo_attr, lo);
continue;
}
snprintf(path, BUFSIZ, "%s/%s/%s/%s/%s/%s", pgLO->space,
pgLO->db, ll->lo_schema, ll->lo_table, ll->lo_attr, val);
if (lo_export(pgLO->conn, lo, path) < 0)
fprintf(stderr, "%s: lo_export failed:\n%s", progname,
PQerrorMessage(pgLO->conn));
else
fprintf(pgLO->index, "%s\t%s\t%s\t%s/%s/%s/%s/%s\t%s\n",
val, ll->lo_table, ll->lo_attr, pgLO->db,
ll->lo_schema, ll->lo_table, ll->lo_attr,
val, ll->lo_schema);
}
}
PQclear(pgLO->res);
}
}
/* -------------------------------------------------------------------------
* pg_dumplo
*
* $PostgreSQL: pgsql/contrib/pg_dumplo/lo_import.c,v 1.11 2004/11/28 23:49:49 tgl Exp $
*
* Karel Zak 1999-2004
* -------------------------------------------------------------------------
*/
#include "postgres_fe.h"
#include <fcntl.h>
#include <errno.h>
#include <time.h>
#include <unistd.h>
#include <sys/stat.h>
#include "libpq-fe.h"
#include "libpq/libpq-fs.h"
#include "pg_dumplo.h"
extern int errno;
void
pglo_import(LODumpMaster * pgLO)
{
LOlist loa;
Oid new_oid;
int ret, line=0;
char tab[MAX_TABLE_NAME],
attr[MAX_ATTR_NAME],
sch[MAX_SCHEMA_NAME],
path[BUFSIZ],
lo_path[BUFSIZ],
Qbuff[QUERY_BUFSIZ];
while (fgets(Qbuff, QUERY_BUFSIZ, pgLO->index))
{
line++;
if (*Qbuff == '#')
continue;
if (!pgLO->remove && !pgLO->quiet)
printf(Qbuff);
if ((ret=sscanf(Qbuff, "%u\t%s\t%s\t%s\t%s\n", &loa.lo_oid, tab, attr, path, sch)) < 5)
{
/* backward compatible mode */
ret = sscanf(Qbuff, "%u\t%s\t%s\t%s\n", &loa.lo_oid, tab, attr, path);
strcpy(sch, "public");
}
if (ret < 4)
{
fprintf(stderr, "%s: index file reading failed at line %d\n", progname, line);
PQexec(pgLO->conn, "ROLLBACK");
fprintf(stderr, "\n%s: ROLLBACK\n", progname);
exit(RE_ERROR);
}
loa.lo_schema = sch;
loa.lo_table = tab;
loa.lo_attr = attr;
if (path && *path=='/')
/* absolute path */
snprintf(lo_path, BUFSIZ, "%s", path);
else
snprintf(lo_path, BUFSIZ, "%s/%s", pgLO->space, path);
/*
* Import LO
*/
if ((new_oid = lo_import(pgLO->conn, lo_path)) == 0)
{
fprintf(stderr, "%s: %s\n", progname, PQerrorMessage(pgLO->conn));
PQexec(pgLO->conn, "ROLLBACK");
fprintf(stderr, "\n%s: ROLLBACK\n", progname);
exit(RE_ERROR);
}
if (pgLO->remove)
{
notice(pgLO, FALSE);
if (lo_unlink(pgLO->conn, loa.lo_oid) < 0)
fprintf(stderr, "%s: can't remove LO %u:\n%s",
progname, loa.lo_oid, PQerrorMessage(pgLO->conn));
else if (!pgLO->quiet)
printf("remove old %u and create new %u\n",
loa.lo_oid, new_oid);
notice(pgLO, TRUE);
}
pgLO->counter++;
/*
* UPDATE oid in tab
*/
snprintf(Qbuff, QUERY_BUFSIZ,
"UPDATE \"%s\".\"%s\" SET \"%s\"=%u WHERE \"%s\"=%u",
loa.lo_schema, loa.lo_table, loa.lo_attr, new_oid, loa.lo_attr, loa.lo_oid);
/*fprintf(stderr, Qbuff);*/
pgLO->res = PQexec(pgLO->conn, Qbuff);
if (PQresultStatus(pgLO->res) != PGRES_COMMAND_OK)
{
fprintf(stderr, "%s: %s\n", progname, PQerrorMessage(pgLO->conn));
PQclear(pgLO->res);
PQexec(pgLO->conn, "ROLLBACK");
fprintf(stderr, "\n%s: ROLLBACK\n", progname);
exit(RE_ERROR);
}
PQclear(pgLO->res);
}
}
/* -------------------------------------------------------------------------
* pg_dumplo
*
* $PostgreSQL: pgsql/contrib/pg_dumplo/main.c,v 1.22 2004/11/28 23:49:49 tgl Exp $
*
* Karel Zak 1999-2000
* -------------------------------------------------------------------------
*/
#include "postgres_fe.h"
#include <errno.h>
#include <unistd.h>
#include "libpq-fe.h"
#include "libpq/libpq-fs.h"
#include "pg_dumplo.h"
#ifndef HAVE_STRDUP
#include "strdup.h"
#endif
#include "getopt_long.h"
#ifndef HAVE_INT_OPTRESET
int optreset;
#endif
char *progname = NULL;
int main(int argc, char **argv);
static void usage(void);
static void parse_lolist(LODumpMaster * pgLO);
/*-----
* The mother of all C functions
*-----
*/
int
main(int argc, char **argv)
{
LODumpMaster _pgLO,
*pgLO = &_pgLO;
char *pwd = NULL;
pgLO->argv = argv;
pgLO->argc = argc;
pgLO->action = 0;
pgLO->lolist = NULL;
pgLO->user = NULL;
pgLO->db = NULL;
pgLO->host = NULL;
pgLO->port = NULL;
pgLO->space = NULL;
pgLO->index = NULL;
pgLO->remove = FALSE;
pgLO->quiet = FALSE;
pgLO->counter = 0;
pgLO->lolist_start = 0;
progname = argv[0];
/*
* Parse ARGV
*/
if (argc > 1)
{
int arg;
extern int optind;
int l_index = 0;
static struct option l_opt[] = {
{"help", no_argument, 0, 'h'},
{"user", required_argument, 0, 'u'},
{"pwd", required_argument, 0, 'p'},
{"db", required_argument, 0, 'd'},
{"host", required_argument, 0, 'h'},
{"port", required_argument, 0, 'o'},
{"space", required_argument, 0, 's'},
{"import", no_argument, 0, 'i'},
{"export", no_argument, 0, 'e'},
{"remove", no_argument, 0, 'r'},
{"quiet", no_argument, 0, 'q'},
{"all", no_argument, 0, 'a'},
{"show", no_argument, 0, 'w'},
{NULL, 0, 0, 0}
};
while ((arg = getopt_long(argc, argv, "?aeho:u:p:qd:l:t:irs:w", l_opt, &l_index)) != -1)
{
switch (arg)
{
case '?':
case 'h':
usage();
exit(RE_OK);
case 'u':
pgLO->user = strdup(optarg);
break;
case 't':
pgLO->host = strdup(optarg);
break;
case 'o':
pgLO->port = strdup(optarg);
break;
case 'p':
pwd = strdup(optarg);
break;
case 'd':
pgLO->db = strdup(optarg);
break;
case 's':
pgLO->space = strdup(optarg);
break;
case 'i':
pgLO->action = ACTION_IMPORT;
break;
case 'l':
pgLO->action = ACTION_EXPORT_ATTR;
pgLO->lolist_start = optind - 1;
parse_lolist(pgLO);
break;
case 'e':
case 'a':
pgLO->action = ACTION_EXPORT_ALL;
break;
case 'w':
pgLO->action = ACTION_SHOW;
break;
case 'r':
pgLO->remove = TRUE;
break;
case 'q':
pgLO->quiet = TRUE;
break;
default:
fprintf(stderr, "%s: bad arg -%c\n", progname, arg);
usage();
exit(RE_ERROR);
}
}
}
else
{
usage();
exit(RE_ERROR);
}
/*
* Check space
*/
if (pgLO->space==NULL && pgLO->action != ACTION_SHOW)
{
if (!(pgLO->space = getenv("PWD")))
pgLO->space = ".";
}
if (!pgLO->action)
{
fprintf(stderr, "%s: What do you want - export or import?\n", progname);
exit(RE_ERROR);
}
/*
* Make connection
*/
pgLO->conn = PQsetdbLogin(pgLO->host, pgLO->port, NULL, NULL, pgLO->db,
pgLO->user, pwd);
if (PQstatus(pgLO->conn) == CONNECTION_BAD)
{
fprintf(stderr, "%s (connection): %s\n", progname, PQerrorMessage(pgLO->conn));
exit(RE_ERROR);
}
pgLO->host = PQhost(pgLO->conn) ? PQhost(pgLO->conn) : "localhost";
pgLO->db = PQdb(pgLO->conn);
pgLO->user = PQuser(pgLO->conn);
/*
* Init index file
*/
if (pgLO->action != ACTION_SHOW)
index_file(pgLO);
PQexec(pgLO->conn, "SET search_path = public");
PQexec(pgLO->conn, "BEGIN");
switch (pgLO->action)
{
case ACTION_SHOW:
case ACTION_EXPORT_ALL:
load_lolist(pgLO);
/* FALL THROUGH */
case ACTION_EXPORT_ATTR:
pglo_export(pgLO);
if (!pgLO->quiet)
{
if (pgLO->action == ACTION_SHOW)
printf("\nDatabase '%s' contains %d large objects.\n\n", pgLO->db, pgLO->counter);
else
printf("\nExported %d large objects.\n\n", pgLO->counter);
}
break;
case ACTION_IMPORT:
pglo_import(pgLO);
if (!pgLO->quiet)
printf("\nImported %d large objects.\n\n", pgLO->counter);
break;
}
PQexec(pgLO->conn, "COMMIT");
PQfinish(pgLO->conn);
if (pgLO->action != ACTION_SHOW)
fclose(pgLO->index);
exit(RE_OK);
}
static void
parse_lolist(LODumpMaster * pgLO)
{
LOlist *ll;
char **d, *loc, *loc2,
buff[MAX_SCHEMA_NAME + MAX_TABLE_NAME + MAX_ATTR_NAME + 3];
pgLO->lolist = (LOlist *) malloc(pgLO->argc * sizeof(LOlist));
if (!pgLO->lolist)
{
fprintf(stderr, "%s: can't allocate memory\n", progname);
exit(RE_ERROR);
}
for (d = pgLO->argv + pgLO->lolist_start, ll = pgLO->lolist;
*d != NULL;
d++, ll++)
{
strncpy(buff, *d, MAX_SCHEMA_NAME + MAX_TABLE_NAME + MAX_ATTR_NAME + 2);
if ((loc = strchr(buff, '.')) == NULL || *(loc+1)=='\0')
{
fprintf(stderr, "%s: '%s' is bad 'table.attr' or 'schema.table.attr'\n", progname, buff);
exit(RE_ERROR);
}
loc2 = strchr(loc+1, '.');
*loc = '\0';
if (loc2)
{
/* "schema.table.attr"
*/
*loc2 = '\0';
ll->lo_schema = strdup(buff);
ll->lo_table = strdup(loc+1);
ll->lo_attr = strdup(loc2+1);
}
else
{
ll->lo_schema = strdup("public");
ll->lo_table = strdup(buff);
ll->lo_attr = strdup(loc+1);
}
}
ll++;
ll->lo_table = ll->lo_attr = (char *) NULL;
}
static void
usage(void)
{
printf("\npg_dumplo %s - PostgreSQL large objects dump\n", PG_VERSION);
puts("pg_dumplo [option]\n\n"
"-h --help this help\n"
"-u --user=<username> username for connection to server\n"
"-p --password=<password> password for connection to server\n"
"-d --db=<database> database name\n"
"-t --host=<hostname> server hostname\n"
"-o --port=<port> database server port (default: 5432)\n"
"-s --space=<dir> directory with dump tree (for export/import)\n"
"-i --import import large obj dump tree to DB\n"
"-e --export export (dump) large obj to dump tree\n"
"-l <schema.table.attr ...> dump attribute (columns) with LO to dump tree\n"
"-a --all dump all LO in DB (default)\n"
"-r --remove if is set '-i' try remove old LO\n"
"-q --quiet run quietly\n"
"-w --show not dump, but show all LO in DB\n"
"\n"
"Example (dump): pg_dumplo -d my_db -s /my_dump/dir -l t1.a t1.b t2.a\n"
" pg_dumplo -a -d my_db -s /my_dump/dir\n"
"Example (import): pg_dumplo -i -d my_db -s /my_dump/dir\n"
"Example (show): pg_dumplo -w -d my_db\n\n"
"Note: * option '-l' must be last option!\n"
" * default schema is \"public\"\n"
" * option '-i' without option '-r' make new large obj in DB\n"
" not rewrite old, the '-i' UPDATE oid numbers in table.attr only!\n"
" * if option -s is not set, pg_dumplo uses $PWD or \".\"\n"
); /* puts() */
}
/* -------------------------------------------------------------------------
* pg_dumplo.h
*
* $PostgreSQL: pgsql/contrib/pg_dumplo/pg_dumplo.h,v 1.11 2004/11/28 23:49:49 tgl Exp $
*
* Karel Zak 1999-2004
* -------------------------------------------------------------------------
*/
#ifndef PG_DUMPLO_H
#define PG_DUMPLO_H
#include "postgres_ext.h"
/* ----------
* Define
* ----------
*/
#define QUERY_BUFSIZ (8*1024)
#define DIR_UMASK 0755
#define FILE_UMASK 0644
#define TRUE 1
#define FALSE 0
#define RE_OK 0
#define RE_ERROR 1
#define MAX_SCHEMA_NAME 128
#define MAX_TABLE_NAME 128
#define MAX_ATTR_NAME 128
#define atooid(x) ((Oid) strtoul((x), NULL, 10))
/* ----------
* LO struct
* ----------
*/
typedef struct
{
char *lo_schema,
*lo_table,
*lo_attr;
Oid lo_oid;
} LOlist;
typedef struct
{
int action;
LOlist *lolist;
char **argv,
*user,
*db,
*host,
*port,
*space;
FILE *index;
int counter,
argc,
lolist_start,
remove,
quiet;
PGresult *res;
PGconn *conn;
} LODumpMaster;
typedef enum
{
ACTION_NONE,
ACTION_SHOW,
ACTION_EXPORT_ATTR,
ACTION_EXPORT_ALL,
ACTION_IMPORT
} PGLODUMP_ACTIONS;
extern char *progname;
extern void notice(LODumpMaster * pgLO, int set);
extern void index_file(LODumpMaster * pgLO);
extern void load_lolist(LODumpMaster * pgLO);
extern void pglo_export(LODumpMaster * pgLO);
extern void pglo_import(LODumpMaster * pgLO);
#endif /* PG_DUMPLO_H */
/* -------------------------------------------------------------------------
* pg_dumplo
*
* $PostgreSQL: pgsql/contrib/pg_dumplo/utils.c,v 1.9 2004/08/29 05:06:36 momjian Exp $
*
* Karel Zak 1999-2000
* -------------------------------------------------------------------------
*/
#include "postgres_fe.h"
#include <fcntl.h>
#include <errno.h>
#include <time.h>
#include <unistd.h>
#include <sys/stat.h>
#include "libpq-fe.h"
#include "libpq/libpq-fs.h"
#include "pg_dumplo.h"
extern int errno;
static void Dummy_NoticeProcessor(void *arg, const char *message);
static void Default_NoticeProcessor(void *arg, const char *message);
void
index_file(LODumpMaster * pgLO)
{
char path[BUFSIZ];
int sz;
if (pgLO->action == ACTION_SHOW)
return;
snprintf(path, BUFSIZ, "%s/%s", pgLO->space, pgLO->db);
if (pgLO->action == ACTION_EXPORT_ATTR ||
pgLO->action == ACTION_EXPORT_ALL)
{
if (mkdir(path, DIR_UMASK) == -1)
{
if (errno != EEXIST)
{
perror(path);
exit(RE_ERROR);
}
}
sz = strlen(path);
strncat(path, "/lo_dump.index", BUFSIZ - sz);
if ((pgLO->index = fopen(path, "w")) == NULL)
{
perror(path);
exit(RE_ERROR);
}
}
else if (pgLO->action != ACTION_NONE)
{
sz = strlen(path);
strncat(path, "/lo_dump.index", BUFSIZ - sz);
if ((pgLO->index = fopen(path, "r")) == NULL)
{
perror(path);
exit(RE_ERROR);
}
}
}
static
void
Dummy_NoticeProcessor(void *arg, const char *message)
{
;
}
static
void
Default_NoticeProcessor(void *arg, const char *message)
{
fprintf(stderr, "%s", message);
}
void
notice(LODumpMaster * pgLO, int set)
{
if (set)
PQsetNoticeProcessor(pgLO->conn, Default_NoticeProcessor, NULL);
else
PQsetNoticeProcessor(pgLO->conn, Dummy_NoticeProcessor, NULL);
}
pg_upgrade
PG_UPGRADE IS NOT AVAILABLE FOR UPGRADES FROM 7.2.X.
This is a version of pg_upgrade which will allow a 7.3 to 7.3 migration
if you need to perform an initdb. It has been only lightly tested.
Please report any problems to the PostgreSQL lists.
Read the manual page for more information. To view it:
nroff -man pg_upgrade.1 | more
Bruce Momjian
2002-07-03
This diff is collapsed.
.\"
.TH PG_UPGRADE 1 "PG_UPGRADE(1)" "14 Jan 2002" "PostgreSQL Client Applications" ""
.SH NAME
pg_upgrade \- upgrading from a previous release without reloading
.SH SYNOPSIS
pg_upgrade [-D \fIdata_dir\fP] -1 | -2
.SH DESCRIPTION
\fBpg_upgrade\fP is a utility for upgrading from a previous PostgreSQL
release without reloading all the data. It can also be used as a data
recovery tool.
.LP
\fBpg_upgrade\fP must be run in two stages. In phase one you must run
\fBpg_upgrade\fP with your old database installation in place. In phase
two, \fBpg_upgrade\fP must be run on a freshly \fBinitdb\fP'ed server.
In both phases, the same newly installed \fBpg_upgrade\fP script must be
used.
.SH Upgrading PostgreSQL with pg_upgrade
.LP
1) Back up your existing data directory, preferably using
\fBpg_dumpall.\fP
.LP
2) Copy the program \fIpgsql/contrib/pg_upgrade/pg_upgrade\fP from the
current PostgreSQL distribution somewhere into your path.
.LP
3) Run phase one of \fBpg_upgrade:\fP
.LP
.B $ pg_upgrade -1
.sp
to collect information about the old database needed for the upgrade.
You may use \fI-D\fP to specify the data directory. By default it uses
the environment variable \fIPGDATA.\fP
.LP
4) Do:
.LP
.B $ cd pgsql/src
.br
.B $ make install
.sp
to install the PostgreSQL binaries for the new release.
.LP
5) Do:
.LP
.B $ cd pgsql/contrib/pg_resetxlog
.br
.B $ make install
.sp
to install the \fIpg_resetxlog\fP utility, which is needed during phase
2 of \fBpg_upgrade\fP.
.LP
6) Run initdb to create a new template1 database containing the system
tables for the new release. Make sure you use settings similar to those
used in your previous version.
.LP
7) Start the new \fIpostmaster.\fP (Note: it is critical that no users
connect to the server until the upgrade is complete. You may wish to
start the postmaster without -i or alter pg_hba.conf temporarily.)
.LP
8) Run phase two of \fBpg_upgrade:\fP
.LP
.B $ pg_upgrade -2
.sp
The program will do some checking to make sure everything is properly
configured, and will then recreate all your databases and tables,
but with no data. It will then physically move the data files
containing non-system tables and indexes into the proper
subdirectories.
.LP
9) Restore your old \fIpostmaster\fP flags or \fIpg_hba.conf\fP if
needed to allow user logins.
.sp
.LP
10) Carefully examine the contents of the upgraded databases. If you
detect problems, you'll need to recover by restoring from your full
\fBpg_dumpall\fP backup. You can delete the \fIpg_upgrade_info/\fP
directory when you are satisfied.
.LP
The upgraded databases will be in an un-vacuumed state. You will
probably want to run a \fIVACUUM ANALYZE\fP before beginning production
work.
.SH NOTES
While \fBpg_upgrade\fP is primarly an upgrade tool, it can also be used
for data recovery. During phase 1, \fBpg_upgrade\fP creates database
name / oid and database name / table name / oid mapping files in
\fIpg_upgrade_info/.\fP These files are tab-delimited. If your system is
too damaged, you may need to manually pull this information out of
\fBpg_database\fP and \fBpg_class\fP and create the files manually.
(Keep in mind most tables have \fBpg_toast_OID\fP and
\fBpg_toast_OID_idx\fP files that store very long values. These must be
recorded as well.) It also creates a schema-only \fBpg_dumpall.\fP In a
damaged installation, you may be able to make one of these from a recent
full \fBpg_dumpall.\fP
.LP
Phase 2 rebuilds each database with the schema from the old
installation. It then moves the physical data files from the old
installation and makes some modifications so the old data files work
properly in the new installation.
.SH SEE ALSO
initdb(1), postmaster(1), pg_dump(1), pg_dumpall(1), vacuumdb(1)
# $PostgreSQL: pgsql/contrib/string/Makefile,v 1.18 2004/08/20 20:13:08 momjian Exp $
MODULES = string_io
DATA_built = string_io.sql
DOCS = README.string_io
ifdef USE_PGXS
PGXS = $(shell pg_config --pgxs)
include $(PGXS)
else
subdir = contrib/string
top_builddir = ../..
include $(top_builddir)/src/Makefile.global
include $(top_srcdir)/contrib/contrib-global.mk
endif
String io module for postgresql.
Copyright (C) 1999, Massimo Dal Zotto <dz@cs.unitn.it>
This software is distributed under the GNU General Public License
either version 2, or (at your option) any later version.
These output functions can be used as substitution of the standard text
output functions to get the value of text fields printed in the format
used for C strings. This allows the output of queries or the exported
files to be processed more easily using standard unix filter programs
like perl or awk.
If you use the standard functions instead you could find a single tuple
splitted into many lines and the tabs embedded in the values could be
confused with those used as field delimters.
My function translates all non-printing characters into corresponding
esacape sequences as defined by the C syntax. All you need to reconstruct
the exact value in your application is a corresponding unescape function
like the string_input defined in the source code.
Massimo Dal Zotto <dz@cs.unitn.it>
/*
* string_io.c --
*
* This file defines C-like input/output conversion routines for strings.
*
* Copyright (C) 1999, Massimo Dal Zotto <dz@cs.unitn.it>
*
* This software is distributed under the GNU General Public License
* either version 2, or (at your option) any later version.
*/
#include "postgres.h"
#include <ctype.h>
#include "utils/builtins.h"
#include "string_io.h"
/* define this if you want to see iso-8859 characters */
#define ISO8859
#define VALUE(char) ((char) - '0')
#define DIGIT(val) ((val) + '0')
#define ISOCTAL(c) (((c) >= '0') && ((c) <= '7'))
#ifndef ISO8859
#define NOTPRINTABLE(c) (!isprint((unsigned char) (c)))
#else
#define NOTPRINTABLE(c) (!isprint((unsigned char) (c)) && \
((unsigned char) (c) < (unsigned char) 0xa0))
#endif
/*
* string_output() --
*
* This function takes a pointer to a string data and an optional
* data size and returns a printable representation of the string
* translating all escape sequences to C-like \nnn or \c escapes.
* The function is used by output methods of various string types.
*
* Arguments:
* data - input data (can be NULL)
* size - optional size of data. A negative value indicates
* that data is a null terminated string.
*
* Returns:
* a pointer to a new string containing the printable
* representation of data.
*/
unsigned char *
string_output(unsigned char *data, int size)
{
register unsigned char c,
*p,
*r,
*result;
register int l,
len;
if (data == NULL)
{
result = (char *) palloc(2);
result[0] = '-';
result[1] = '\0';
return (result);
}
if (size < 0)
size = strlen(data);
/* adjust string length for escapes */
len = size;
for (p = data, l = size; l > 0; p++, l--)
{
switch (*p)
{
case '\\':
case '"':
case '\b':
case '\f':
case '\n':
case '\r':
case '\t':
case '\v':
len++;
break;
case '{':
/* Escape beginning of string, to distinguish from arrays */
if (p == data)
len++;
break;
default:
if (NOTPRINTABLE(*p))
len += 3;
}
}
len++;
result = (char *) palloc(len);
for (p = data, r = result, l = size; (l > 0) && (c = *p); p++, l--)
{
switch (c)
{
case '\\':
case '"':
*r++ = '\\';
*r++ = c;
break;
case '\b':
*r++ = '\\';
*r++ = 'b';
break;
case '\f':
*r++ = '\\';
*r++ = 'f';
break;
case '\n':
*r++ = '\\';
*r++ = 'n';
break;
case '\r':
*r++ = '\\';
*r++ = 'r';
break;
case '\t':
*r++ = '\\';
*r++ = 't';
break;
case '\v':
*r++ = '\\';
*r++ = 'v';
break;
case '{':
/* Escape beginning of string, to distinguish from arrays */
if (p == data)
*r++ = '\\';
*r++ = c;
break;
default:
if (NOTPRINTABLE(c))
{
*r = '\\';
r += 3;
*r-- = DIGIT(c & 07);
c >>= 3;
*r-- = DIGIT(c & 07);
c >>= 3;
*r = DIGIT(c & 03);
r += 3;
}
else
*r++ = c;
}
}
*r = '\0';
return ((char *) result);
}
/*
* string_input() --
*
* This function accepts a C string in input and copies it into a new
* object allocated with palloc() translating all escape sequences.
* An optional header can be allocated before the string, for example
* to hold the length of a varlena object.
* This function is not necessary for input from sql commands because
* the parser already does escape translation, all data input routines
* receive strings in internal form.
*
* Arguments:
* str - input string possibly with escapes
* size - the required size of new data. A value of 0
* indicates a variable size string, while a
* negative value indicates a variable size string
* of size not greater than this absolute value.
* hdrsize - size of an optional header to be allocated before
* the data. It must then be filled by the caller.
* rtn_size - an optional pointer to an int variable where the
* size of the new string is stored back.
*
* Returns:
* a pointer to the new string or the header.
*/
unsigned char *
string_input(unsigned char *str, int size, int hdrsize, int *rtn_size)
{
register unsigned char *p,
*r;
unsigned char *result;
int len;
if ((str == NULL) || (hdrsize < 0))
return (char *) NULL;
/* Compute result size */
len = strlen(str);
for (p = str; *p;)
{
if (*p++ == '\\')
{
if (ISOCTAL(*p))
{
if (ISOCTAL(*(p + 1)))
{
p++;
len--;
}
if (ISOCTAL(*(p + 1)))
{
p++;
len--;
}
}
if (*p)
p++;
len--;
}
}
/* result has variable length */
if (size == 0)
size = len + 1;
else
/* result has variable length with maximum size */
if (size < 0)
size = Min(len, -size) + 1;
result = (char *) palloc(hdrsize + size);
memset(result, 0, hdrsize + size);
if (rtn_size)
*rtn_size = size;
r = result + hdrsize;
for (p = str; *p;)
{
register unsigned char c;
if ((c = *p++) == '\\')
{
switch (c = *p++)
{
case '\0':
p--;
break;
case '0':
case '1':
case '2':
case '3':
case '4':
case '5':
case '6':
case '7':
c = VALUE(c);
if (isdigit(*p))
c = (c << 3) + VALUE(*p++);
if (isdigit(*p))
c = (c << 3) + VALUE(*p++);
*r++ = c;
break;
case 'b':
*r++ = '\b';
break;
case 'f':
*r++ = '\f';
break;
case 'n':
*r++ = '\n';
break;
case 'r':
*r++ = '\r';
break;
case 't':
*r++ = '\t';
break;
case 'v':
*r++ = '\v';
break;
default:
*r++ = c;
}
}
else
*r++ = c;
}
return ((char *) result);
}
unsigned char *
c_charout(int32 c)
{
char str[2];
str[0] = (char) c;
str[1] = '\0';
return (string_output(str, 1));
}
/*
* This can be used for SET, bytea, text and unknown data types
*/
unsigned char *
c_textout(struct varlena * vlena)
{
int len = 0;
char *s = NULL;
if (vlena)
{
len = VARSIZE(vlena) - VARHDRSZ;
s = VARDATA(vlena);
}
return (string_output(s, len));
}
/*
* This can be used for varchar and bpchar strings
*/
unsigned char *
c_varcharout(unsigned char *s)
{
int len = 0;
if (s)
{
len = *(int32 *) s - 4;
s += 4;
}
return (string_output(s, len));
}
#if 0
struct varlena *
c_textin(unsigned char *str)
{
struct varlena *result;
int len;
if (str == NULL)
return ((struct varlena *) NULL);
result = (struct varlena *) string_input(str, 0, VARHDRSZ, &len);
VARSIZE(result) = len;
return (result);
}
int32 *
c_charin(unsigned char *str)
{
return (string_input(str, 1, 0, NULL));
}
#endif
/* end of file */
/*
* Local Variables:
* tab-width: 4
* c-indent-level: 4
* c-basic-offset: 4
* End:
*/
#ifndef STRING_IO_H
#define STRING_IO_H
unsigned char *string_output(unsigned char *data, int size);
unsigned char *string_input(unsigned char *str, int size, int hdrsize,
int *rtn_size);
unsigned char *c_charout(int32 c);
unsigned char *c_textout(struct varlena * vlena);
unsigned char *c_varcharout(unsigned char *s);
#if 0
struct varlena *c_textin(unsigned char *str);
int32 *
c_charin(unsigned char *str)
#endif
#endif
/*
* Local Variables:
* tab-width: 4
* c-indent-level: 4
* c-basic-offset: 4
* End:
*/
-- string_io.sql --
--
-- SQL code to define the new string I/O functions
--
-- Copyright (c) 1998, Massimo Dal Zotto <dz@cs.unitn.it>
--
-- This file is distributed under the GNU General Public License
-- either version 2, or (at your option) any later version.
-- Define the new output functions.
--
CREATE FUNCTION c_charout(bpchar)
RETURNS cstring
AS 'MODULE_PATHNAME'
LANGUAGE 'C';
CREATE FUNCTION c_textout(text)
RETURNS cstring
AS 'MODULE_PATHNAME'
LANGUAGE 'C';
CREATE FUNCTION c_varcharout(varchar)
RETURNS cstring
AS 'MODULE_PATHNAME'
LANGUAGE 'C';
-- This is not needed because escapes are handled by the parser
--
-- CREATE FUNCTION c_textin(cstring)
-- RETURNS text
-- AS 'MODULE_PATHNAME'
-- LANGUAGE 'c';
-- Define a function which sets the new output routines for char types.
--
-- SELECT c_mode();
--
CREATE FUNCTION c_mode()
RETURNS text
AS ' UPDATE pg_type SET typoutput=''c_textout'' WHERE typname=''SET'';
UPDATE pg_type SET typoutput=''c_varcharout'' WHERE typname=''bpchar'';
UPDATE pg_type SET typoutput=''c_textout'' WHERE typname=''bytea'';
UPDATE pg_type SET typoutput=''c_charout'' WHERE typname=''char'';
UPDATE pg_type SET typoutput=''c_textout'' WHERE typname=''text'';
UPDATE pg_type SET typoutput=''c_textout'' WHERE typname=''unknown'';
UPDATE pg_type SET typoutput=''c_varcharout'' WHERE typname=''varchar'';
select ''c_mode''::text;'
LANGUAGE 'SQL';
-- Define a function which restores the standard routines for char types.
--
-- SELECT pg_mode();
--
CREATE FUNCTION pg_mode()
RETURNS text
AS ' UPDATE pg_type SET typoutput=''textout'' WHERE typname=''SET'';
UPDATE pg_type SET typoutput=''varcharout'' WHERE typname=''bpchar'';
UPDATE pg_type SET typoutput=''textout'' WHERE typname=''bytea'';
UPDATE pg_type SET typoutput=''charout'' WHERE typname=''char'';
UPDATE pg_type SET typoutput=''textout'' WHERE typname=''text'';
UPDATE pg_type SET typoutput=''textout'' WHERE typname=''unknown'';
UPDATE pg_type SET typoutput=''varcharout'' WHERE typname=''varchar'';
select ''pg_mode''::text;'
LANGUAGE 'SQL';
-- Use these to do the changes manually.
--
-- UPDATE pg_type SET typoutput='textout' WHERE typname='SET';
-- UPDATE pg_type SET typoutput='varcharout' WHERE typname='bpchar';
-- UPDATE pg_type SET typoutput='textout' WHERE typname='bytea';
-- UPDATE pg_type SET typoutput='charout' WHERE typname='char';
-- UPDATE pg_type SET typoutput='textout' WHERE typname='text';
-- UPDATE pg_type SET typoutput='textout' WHERE typname='unknown';
-- UPDATE pg_type SET typoutput='varcharout' WHERE typname='varchar';
--
-- UPDATE pg_type SET typoutput='c_textout' WHERE typname='SET';
-- UPDATE pg_type SET typoutput='c_varcharout' WHERE typname='bpchar';
-- UPDATE pg_type SET typoutput='c_textout' WHERE typname='bytea';
-- UPDATE pg_type SET typoutput='c_charout' WHERE typname='char';
-- UPDATE pg_type SET typoutput='c_textout' WHERE typname='text';
-- UPDATE pg_type SET typoutput='c_textout' WHERE typname='unknown';
-- UPDATE pg_type SET typoutput='c_varcharout' WHERE typname='varchar';
-- end of file
#!/bin/bash
#
# Add local variables to C sources files to set emacs C style to 4-space tabs.
#
# Usage: cd $PG_HOME && add-emacs-variables `find . -name \*.[chy] -print`
for f in $*; do
if [ -L $f ] || grep -q '^ \* Local Variables:' $f; then
continue
fi
echo $f
touch -r $f /tmp/.add-local-variables.$$
cat <<- ' EOF' >> $f
/*
* Local Variables:
* tab-width: 4
* c-indent-level: 4
* c-basic-offset: 4
* End:
*/
EOF
touch -r /tmp/.add-local-variables.$$ $f
done
rm -f /tmp/.add-local-variables.$$
# end of file
#!/bin/echo Usage: source
#
# Set the shell variables files, cfiles, hfiles, yfiles and sfiles with
# the names of all .c, .h, .y, and .S files in current directory tree.
# Define also some shell functions to grep the files. Typical usage is:
#
# $ cd src/
# $ source ../contrib/tools/find-sources
# $ gh BLCKSZ # grep BLCKSZ in .h files
# $ gcl MAXTUPLEN # list all .c files containing MAXTUPLEN
#
# THIS SCRIPT MUST BE SOURCED FROM BASH.
#
# Copyright (C) 1999 Massimo Dal Zotto <dz@cs.unitn.it>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# Build the file lists
dir=${1-`pwd`}/
cfiles=`find $dir -name \*.c | sort`
hfiles=`find $dir -name \*.h | sort`
yfiles=`find $dir -name \*.y | sort`
sfiles=`find $dir -name \*.S | sort`
files="$hfiles $cfiles $yfiles $sfiles"
# Define some functions to grep the files in the lists
function g() { grep -- "$*" $files /dev/null; }
function gc() { grep -- "$*" $cfiles /dev/null; }
function gh() { grep -- "$*" $hfiles /dev/null; }
function gy() { grep -- "$*" $yfiles /dev/null; }
function gS() { grep -- "$*" $sfiles /dev/null; }
function gl() { grep -l -- "$*" $files /dev/null; }
function gcl() { grep -l -- "$*" $cfiles /dev/null; }
function ghl() { grep -l -- "$*" $hfiles /dev/null; }
function gyl() { grep -l -- "$*" $yfiles /dev/null; }
function gSl() { grep -l -- "$*" $sfiles /dev/null; }
# end of file
#!/bin/bash
#
# Makes an emacs tagfile for all .[chS] and .el files in the current
# directory tree.
etags $(find . -name \*.h) 2>/dev/null || true
etags -a $(find . -name \*.c) 2>/dev/null || true
etags -a $(find . -name \*.S) 2>/dev/null || true
etags -a $(find . -name \*.el) 2>/dev/null || true
# $PostgreSQL: pgsql/contrib/tsearch/Makefile,v 1.5 2004/08/20 20:13:08 momjian Exp $
PG_CPPFLAGS = -I.
MODULE_big = tsearch
OBJS = crc32.o morph.o txtidx.o query.o gistidx.o rewrite.o
DATA_built = tsearch.sql
DOCS = README.tsearch
REGRESS = tsearch
EXTRA_CLEAN = parser.c
ifdef USE_PGXS
PGXS = $(shell pg_config --pgxs)
include $(PGXS)
else
subdir = contrib/tsearch
top_builddir = ../..
include $(top_builddir)/src/Makefile.global
include $(top_srcdir)/contrib/contrib-global.mk
endif
# parser is compiled as part of query
query.o: parser.c
parser.c: parser.l
ifdef FLEX
$(FLEX) $(FLEXFLAGS) -8 -Ptsearch_yy -o'$@' $<
else
@$(missing) flex $< $@
endif
# DO NOT DELETE
This diff is collapsed.
This diff is collapsed.
#ifndef _CRC32_H
#define _CRC32_H
/* Returns crc32 of data block */
extern unsigned int crc32_sz(char *buf, int size);
/* Returns crc32 of null-terminated string */
#define crc32(buf) crc32_sz((buf),strlen(buf))
#endif
This diff is collapsed.
#ifndef __DEFLEX_H__
#define __DEFLEX_H__
/* rememder !!!! */
#define LASTNUM 23
#define LATWORD 1
#define CYRWORD 2
#define UWORD 3
#define EMAIL 4
#define FURL 5
#define HOST 6
#define SCIENTIFIC 7
#define VERSIONNUMBER 8
#define PARTHYPHENWORD 9
#define CYRPARTHYPHENWORD 10
#define LATPARTHYPHENWORD 11
#define SPACE 12
#define TAG 13
#define HTTP 14
#define HYPHENWORD 15
#define LATHYPHENWORD 16
#define CYRHYPHENWORD 17
#define URI 18
#define FILEPATH 19
#define DECIMAL 20
#define SIGNEDINT 21
#define UNSIGNEDINT 22
#define HTMLENTITY 23
extern const char *descr[];
#endif
#define TABLE_DICT_START ,{
#define TABLE_DICT_END }
#include "dict/porter_english.dct"
#include "dict/russian_stemming.dct"
#undef TABLE_DICT_START
#undef TABLE_DICT_END
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#ifndef __MORPH_H__
#define __MORPH_H__
void initmorph(void);
char *lemmatize(char *word, int *len, int type);
bool is_stoptype(int type);
#endif
#ifndef __PARSER_H__
#define __PARSER_H__
char *token;
int tokenlen;
int tsearch_yylex(void);
void start_parse_str(char *, int);
void end_parse(void);
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
#ifndef __REWRITE_H__
#define __REWRITE_H__
ITEM *clean_NOT(ITEM * ptr, int4 *len);
ITEM *clean_fakeval(ITEM * ptr, int4 *len);
#endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
-- SQL for XML parser
-- Adjust this setting to control where the objects get created.
SET search_path TO public;
CREATE OR REPLACE FUNCTION pgxml_parse(text) RETURNS boolean
AS 'MODULE_PATHNAME' LANGUAGE c STRICT;
CREATE OR REPLACE FUNCTION pgxml_xpath(text, text, text, text) RETURNS text
AS 'MODULE_PATHNAME' LANGUAGE c STRICT;
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment