Commit 436a2956 authored by Bruce Momjian's avatar Bruce Momjian

Re-run pgindent, fixing a problem where comment lines after a blank

comment line where output as too long, and update typedefs for /lib
directory.  Also fix case where identifiers were used as variable names
in the backend, but as typedefs in ecpg (favor the backend for
indenting).

Backpatch to 8.1.X.
parent e196eedd
......@@ -143,8 +143,8 @@ g_int_compress(PG_FUNCTION_ARGS)
CHECKARRVALID(r);
PREPAREARR(r);
if (ARRNELEMS(r)>= 2 * MAXNUMRANGE)
elog(NOTICE,"Input array is too big (%d maximum allowed, %d current), use gist__intbig_ops opclass instead",
if (ARRNELEMS(r) >= 2 * MAXNUMRANGE)
elog(NOTICE, "Input array is too big (%d maximum allowed, %d current), use gist__intbig_ops opclass instead",
2 * MAXNUMRANGE - 1, ARRNELEMS(r));
retval = palloc(sizeof(GISTENTRY));
......@@ -154,8 +154,10 @@ g_int_compress(PG_FUNCTION_ARGS)
PG_RETURN_POINTER(retval);
}
/* leaf entries never compress one more time, only when entry->leafkey ==true,
so now we work only with internal keys */
/*
* leaf entries never compress one more time, only when entry->leafkey
* ==true, so now we work only with internal keys
*/
r = (ArrayType *) PG_DETOAST_DATUM(entry->key);
CHECKARRVALID(r);
......
/*
* $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.45 2005/10/29 19:38:07 tgl Exp $
* $PostgreSQL: pgsql/contrib/pgbench/pgbench.c,v 1.46 2005/11/22 18:17:04 momjian Exp $
*
* pgbench: a simple benchmark program for PostgreSQL
* written by Tatsuo Ishii
......@@ -1110,7 +1110,8 @@ main(int argc, char **argv)
fprintf(stderr, "Use limit/ulimt to increase the limit before using pgbench.\n");
exit(1);
}
#endif /* #if !(defined(__CYGWIN__) || defined(__MINGW32__)) */
#endif /* #if !(defined(__CYGWIN__) ||
* defined(__MINGW32__)) */
break;
case 'C':
is_connect = 1;
......
......@@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $PostgreSQL: pgsql/contrib/pgcrypto/pgp-decrypt.c,v 1.6 2005/10/15 02:49:06 momjian Exp $
* $PostgreSQL: pgsql/contrib/pgcrypto/pgp-decrypt.c,v 1.7 2005/11/22 18:17:04 momjian Exp $
*/
#include "postgres.h"
......@@ -269,14 +269,14 @@ prefix_init(void **priv_p, void *arg, PullFilter * src)
* The original purpose of the 2-byte check was to show user a
* friendly "wrong key" message. This made following possible:
*
* "An Attack on CFB Mode Encryption As Used By OpenPGP" by Serge Mister
* and Robert Zuccherato
* "An Attack on CFB Mode Encryption As Used By OpenPGP" by Serge
* Mister and Robert Zuccherato
*
* To avoid being 'oracle', we delay reporting, which basically means we
* prefer to run into corrupt packet header.
* To avoid being 'oracle', we delay reporting, which basically means
* we prefer to run into corrupt packet header.
*
* We _could_ throw PXE_PGP_CORRUPT_DATA here, but there is possibility
* of attack via timing, so we don't.
* We _could_ throw PXE_PGP_CORRUPT_DATA here, but there is
* possibility of attack via timing, so we don't.
*/
ctx->corrupt_prefix = 1;
}
......
......@@ -26,7 +26,7 @@
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* $PostgreSQL: pgsql/contrib/pgcrypto/pgp-pgsql.c,v 1.6 2005/10/15 02:49:06 momjian Exp $
* $PostgreSQL: pgsql/contrib/pgcrypto/pgp-pgsql.c,v 1.7 2005/11/22 18:17:04 momjian Exp $
*/
#include "postgres.h"
......@@ -125,8 +125,8 @@ add_entropy(text *data1, text *data2, text *data3)
/*
* Try to make the feeding unpredictable.
*
* Prefer data over keys, as it's rather likely that key is same in several
* calls.
* Prefer data over keys, as it's rather likely that key is same in
* several calls.
*/
/* chance: 7/8 */
......
......@@ -547,8 +547,8 @@ crosstab(PG_FUNCTION_ARGS)
* Get the next category item value, which is alway
* attribute number three.
*
* Be careful to sssign the value to the array index based on
* which category we are presently processing.
* Be careful to sssign the value to the array index based
* on which category we are presently processing.
*/
values[1 + i] = SPI_getvalue(spi_tuple, spi_tupdesc, 3);
......@@ -870,8 +870,8 @@ get_crosstab_tuplestore(char *sql,
/*
* The provided SQL query must always return at least three columns:
*
* 1. rowname the label for each row - column 1 in the final result 2.
* category the label for each value-column in the final result 3.
* 1. rowname the label for each row - column 1 in the final result
* 2. category the label for each value-column in the final result 3.
* value the values used to populate the value-columns
*
* If there are more than three columns, the last two are taken as
......
......@@ -178,7 +178,7 @@ gettoken_query(QPRS_STATE * state, int4 *val, int4 *lenval, char **strval, int2
state->state = WAITOPERATOR;
return VAL;
}
else if ( state->state == WAITFIRSTOPERAND )
else if (state->state == WAITFIRSTOPERAND)
return END;
else
ereport(ERROR,
......@@ -206,11 +206,11 @@ gettoken_query(QPRS_STATE * state, int4 *val, int4 *lenval, char **strval, int2
return ERR;
break;
case WAITSINGLEOPERAND:
if ( *(state->buf) == '\0' )
if (*(state->buf) == '\0')
return END;
*strval = state->buf;
*lenval = strlen( state->buf );
state->buf += strlen( state->buf );
*lenval = strlen(state->buf);
state->buf += strlen(state->buf);
state->count++;
return VAL;
default:
......@@ -600,7 +600,7 @@ findoprnd(ITEM * ptr, int4 *pos)
* input
*/
static QUERYTYPE *
queryin(char *buf, void (*pushval) (QPRS_STATE *, int, char *, int, int2), int cfg_id, bool isplain)
queryin(char *buf, void (*pushval) (QPRS_STATE *, int, char *, int, int2), int cfg_id, bool isplain)
{
QPRS_STATE state;
int4 i;
......@@ -637,9 +637,10 @@ queryin(char *buf, void (*pushval) (QPRS_STATE *, int, char *, int, int2), int c
/* parse query & make polish notation (postfix, but in reverse order) */
makepol(&state, pushval);
pfree(state.valstate.word);
if (!state.num) {
if (!state.num)
{
elog(NOTICE, "Query doesn't contain lexem(s)");
query = (QUERYTYPE*)palloc( HDRSIZEQT );
query = (QUERYTYPE *) palloc(HDRSIZEQT);
query->len = HDRSIZEQT;
query->size = 0;
return query;
......@@ -928,9 +929,9 @@ to_tsquery(PG_FUNCTION_ARGS)
str = text2char(in);
PG_FREE_IF_COPY(in, 1);
query = queryin(str, pushval_morph, PG_GETARG_INT32(0),false);
query = queryin(str, pushval_morph, PG_GETARG_INT32(0), false);
if ( query->size == 0 )
if (query->size == 0)
PG_RETURN_POINTER(query);
res = clean_fakeval_v2(GETQUERY(query), &len);
......@@ -985,7 +986,7 @@ plainto_tsquery(PG_FUNCTION_ARGS)
query = queryin(str, pushval_morph, PG_GETARG_INT32(0), true);
if ( query->size == 0 )
if (query->size == 0)
PG_RETURN_POINTER(query);
res = clean_fakeval_v2(GETQUERY(query), &len);
......@@ -1023,4 +1024,3 @@ plainto_tsquery_current(PG_FUNCTION_ARGS)
Int32GetDatum(get_currcfg()),
PG_GETARG_DATUM(0)));
}
......@@ -7,6 +7,7 @@
#include "query.h"
typedef uint64 TPQTGist;
#define SIGLEN (sizeof(TPQTGist)*BITS_PER_BYTE)
......@@ -19,13 +20,15 @@ PG_FUNCTION_INFO_V1(tsq_mcontained);
Datum tsq_mcontained(PG_FUNCTION_ARGS);
static TPQTGist
makesign(QUERYTYPE* a) {
makesign(QUERYTYPE * a)
{
int i;
ITEM *ptr = GETQUERY(a);
TPQTGist sign = 0;
for (i = 0; i < a->size; i++) {
if ( ptr->type == VAL )
for (i = 0; i < a->size; i++)
{
if (ptr->type == VAL)
sign |= 1 << (ptr->val % SIGLEN);
ptr++;
}
......@@ -34,57 +37,67 @@ makesign(QUERYTYPE* a) {
}
Datum
tsq_mcontains(PG_FUNCTION_ARGS) {
tsq_mcontains(PG_FUNCTION_ARGS)
{
QUERYTYPE *query = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(0)));
QUERYTYPE *ex = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(1)));
TPQTGist sq, se;
int i,j;
ITEM *iq, *ie;
if ( query->size < ex->size ) {
TPQTGist sq,
se;
int i,
j;
ITEM *iq,
*ie;
if (query->size < ex->size)
{
PG_FREE_IF_COPY(query, 0);
PG_FREE_IF_COPY(ex, 1);
PG_RETURN_BOOL( false );
PG_RETURN_BOOL(false);
}
sq = makesign(query);
se = makesign(ex);
if ( (sq&se)!=se ) {
if ((sq & se) != se)
{
PG_FREE_IF_COPY(query, 0);
PG_FREE_IF_COPY(ex, 1);
PG_RETURN_BOOL( false );
PG_RETURN_BOOL(false);
}
ie = GETQUERY(ex);
for(i=0;i<ex->size;i++) {
for (i = 0; i < ex->size; i++)
{
iq = GETQUERY(query);
if ( ie[i].type != VAL )
if (ie[i].type != VAL)
continue;
for(j=0;j<query->size;j++)
if ( iq[j].type == VAL && ie[i].val == iq[j].val ) {
j = query->size+1;
for (j = 0; j < query->size; j++)
if (iq[j].type == VAL && ie[i].val == iq[j].val)
{
j = query->size + 1;
break;
}
if ( j == query->size ) {
if (j == query->size)
{
PG_FREE_IF_COPY(query, 0);
PG_FREE_IF_COPY(ex, 1);
PG_RETURN_BOOL( false );
PG_RETURN_BOOL(false);
}
}
PG_FREE_IF_COPY(query, 0);
PG_FREE_IF_COPY(ex, 1);
PG_RETURN_BOOL( true );
PG_RETURN_BOOL(true);
}
Datum
tsq_mcontained(PG_FUNCTION_ARGS) {
tsq_mcontained(PG_FUNCTION_ARGS)
{
PG_RETURN_DATUM(
DirectFunctionCall2(
tsq_mcontains,
......@@ -123,26 +136,31 @@ Datum gtsq_picksplit(PG_FUNCTION_ARGS);
Datum
gtsq_in(PG_FUNCTION_ARGS) {
gtsq_in(PG_FUNCTION_ARGS)
{
elog(ERROR, "Not implemented");
PG_RETURN_DATUM(0);
}
Datum
gtsq_out(PG_FUNCTION_ARGS) {
gtsq_out(PG_FUNCTION_ARGS)
{
elog(ERROR, "Not implemented");
PG_RETURN_DATUM(0);
}
Datum
gtsq_compress(PG_FUNCTION_ARGS) {
gtsq_compress(PG_FUNCTION_ARGS)
{
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
GISTENTRY *retval = entry;
if (entry->leafkey) {
TPQTGist *sign = (TPQTGist*)palloc( sizeof(TPQTGist) );
if (entry->leafkey)
{
TPQTGist *sign = (TPQTGist *) palloc(sizeof(TPQTGist));
retval = (GISTENTRY *) palloc(sizeof(GISTENTRY));
*sign = makesign( (QUERYTYPE*)DatumGetPointer(PG_DETOAST_DATUM(entry->key)) );
*sign = makesign((QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(entry->key)));
gistentryinit(*retval, PointerGetDatum(sign),
entry->rel, entry->page,
......@@ -153,34 +171,37 @@ gtsq_compress(PG_FUNCTION_ARGS) {
}
Datum
gtsq_decompress(PG_FUNCTION_ARGS) {
gtsq_decompress(PG_FUNCTION_ARGS)
{
PG_RETURN_DATUM(PG_GETARG_DATUM(0));
}
Datum
gtsq_consistent(PG_FUNCTION_ARGS) {
gtsq_consistent(PG_FUNCTION_ARGS)
{
GISTENTRY *entry = (GISTENTRY *) PG_GETARG_POINTER(0);
TPQTGist *key = (TPQTGist*) DatumGetPointer(entry->key);
TPQTGist *key = (TPQTGist *) DatumGetPointer(entry->key);
QUERYTYPE *query = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM(PG_GETARG_DATUM(1)));
StrategyNumber strategy = (StrategyNumber) PG_GETARG_UINT16(2);
TPQTGist sq = makesign(query);
if ( GIST_LEAF(entry) )
PG_RETURN_BOOL( ( (*key) & sq ) == ((strategy==1) ? sq : *key) );
if (GIST_LEAF(entry))
PG_RETURN_BOOL(((*key) & sq) == ((strategy == 1) ? sq : *key));
else
PG_RETURN_BOOL( (*key) & sq );
PG_RETURN_BOOL((*key) & sq);
}
Datum
gtsq_union(PG_FUNCTION_ARGS) {
gtsq_union(PG_FUNCTION_ARGS)
{
GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0);
TPQTGist *sign = (TPQTGist*)palloc( sizeof(TPQTGist) );
TPQTGist *sign = (TPQTGist *) palloc(sizeof(TPQTGist));
int i;
int *size = (int *) PG_GETARG_POINTER(1);
memset( sign, 0, sizeof(TPQTGist) );
memset(sign, 0, sizeof(TPQTGist));
for (i = 0; i < entryvec->n;i++)
for (i = 0; i < entryvec->n; i++)
*sign |= *GETENTRY(entryvec, i);
*size = sizeof(TPQTGist);
......@@ -189,34 +210,39 @@ gtsq_union(PG_FUNCTION_ARGS) {
}
Datum
gtsq_same(PG_FUNCTION_ARGS) {
gtsq_same(PG_FUNCTION_ARGS)
{
TPQTGist *a = (TPQTGist *) PG_GETARG_POINTER(0);
TPQTGist *b = (TPQTGist *) PG_GETARG_POINTER(1);
PG_RETURN_POINTER( *a == *b );
PG_RETURN_POINTER(*a == *b);
}
static int
sizebitvec(TPQTGist sign) {
int size=0,i;
sizebitvec(TPQTGist sign)
{
int size = 0,
i;
for(i=0;i<SIGLEN;i++)
size += 0x01 & (sign>>i);
for (i = 0; i < SIGLEN; i++)
size += 0x01 & (sign >> i);
return size;
}
static int
hemdist(TPQTGist a, TPQTGist b) {
hemdist(TPQTGist a, TPQTGist b)
{
TPQTGist res = a ^ b;
return sizebitvec(res);
}
Datum
gtsq_penalty(PG_FUNCTION_ARGS) {
TPQTGist *origval = (TPQTGist*) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key);
TPQTGist *newval = (TPQTGist*) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key);
gtsq_penalty(PG_FUNCTION_ARGS)
{
TPQTGist *origval = (TPQTGist *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(0))->key);
TPQTGist *newval = (TPQTGist *) DatumGetPointer(((GISTENTRY *) PG_GETARG_POINTER(1))->key);
float *penalty = (float *) PG_GETARG_POINTER(2);
*penalty = hemdist(*origval, *newval);
......@@ -225,13 +251,15 @@ gtsq_penalty(PG_FUNCTION_ARGS) {
}
typedef struct {
typedef struct
{
OffsetNumber pos;
int4 cost;
} SPLITCOST;
static int
comparecost(const void *a, const void *b) {
comparecost(const void *a, const void *b)
{
if (((SPLITCOST *) a)->cost == ((SPLITCOST *) b)->cost)
return 0;
else
......@@ -241,18 +269,25 @@ comparecost(const void *a, const void *b) {
#define WISH_F(a,b,c) (double)( -(double)(((a)-(b))*((a)-(b))*((a)-(b)))*(c) )
Datum
gtsq_picksplit(PG_FUNCTION_ARGS) {
gtsq_picksplit(PG_FUNCTION_ARGS)
{
GistEntryVector *entryvec = (GistEntryVector *) PG_GETARG_POINTER(0);
GIST_SPLITVEC *v = (GIST_SPLITVEC *) PG_GETARG_POINTER(1);
OffsetNumber maxoff = entryvec->n - 2;
OffsetNumber k,j;
TPQTGist *datum_l, *datum_r;
int4 size_alpha, size_beta;
int4 size_waste, waste = -1;
OffsetNumber k,
j;
TPQTGist *datum_l,
*datum_r;
int4 size_alpha,
size_beta;
int4 size_waste,
waste = -1;
int4 nbytes;
OffsetNumber seed_1 = 0, seed_2 = 0;
OffsetNumber *left, *right;
OffsetNumber seed_1 = 0,
seed_2 = 0;
OffsetNumber *left,
*right;
SPLITCOST *costvector;
......@@ -262,9 +297,11 @@ gtsq_picksplit(PG_FUNCTION_ARGS) {
v->spl_nleft = v->spl_nright = 0;
for (k = FirstOffsetNumber; k < maxoff; k = OffsetNumberNext(k))
for (j = OffsetNumberNext(k); j <= maxoff; j = OffsetNumberNext(j)) {
size_waste = hemdist( *GETENTRY(entryvec,j), *GETENTRY(entryvec,k) );
if (size_waste > waste) {
for (j = OffsetNumberNext(k); j <= maxoff; j = OffsetNumberNext(j))
{
size_waste = hemdist(*GETENTRY(entryvec, j), *GETENTRY(entryvec, k));
if (size_waste > waste)
{
waste = size_waste;
seed_1 = k;
seed_2 = j;
......@@ -272,47 +309,56 @@ gtsq_picksplit(PG_FUNCTION_ARGS) {
}
if (seed_1 == 0 || seed_2 == 0) {
if (seed_1 == 0 || seed_2 == 0)
{
seed_1 = 1;
seed_2 = 2;
}
datum_l = (TPQTGist*)palloc( sizeof(TPQTGist) );
*datum_l=*GETENTRY(entryvec,seed_1);
datum_r = (TPQTGist*)palloc( sizeof(TPQTGist) );
*datum_r=*GETENTRY(entryvec,seed_2);
datum_l = (TPQTGist *) palloc(sizeof(TPQTGist));
*datum_l = *GETENTRY(entryvec, seed_1);
datum_r = (TPQTGist *) palloc(sizeof(TPQTGist));
*datum_r = *GETENTRY(entryvec, seed_2);
maxoff = OffsetNumberNext(maxoff);
costvector = (SPLITCOST *) palloc(sizeof(SPLITCOST) * maxoff);
for (j = FirstOffsetNumber; j <= maxoff; j = OffsetNumberNext(j)) {
for (j = FirstOffsetNumber; j <= maxoff; j = OffsetNumberNext(j))
{
costvector[j - 1].pos = j;
size_alpha = hemdist( *GETENTRY(entryvec,seed_1), *GETENTRY(entryvec,j) );
size_beta = hemdist( *GETENTRY(entryvec,seed_2), *GETENTRY(entryvec,j) );
size_alpha = hemdist(*GETENTRY(entryvec, seed_1), *GETENTRY(entryvec, j));
size_beta = hemdist(*GETENTRY(entryvec, seed_2), *GETENTRY(entryvec, j));
costvector[j - 1].cost = abs(size_alpha - size_beta);
}
qsort((void *) costvector, maxoff, sizeof(SPLITCOST), comparecost);
for (k = 0; k < maxoff; k++) {
for (k = 0; k < maxoff; k++)
{
j = costvector[k].pos;
if ( j == seed_1 ) {
if (j == seed_1)
{
*left++ = j;
v->spl_nleft++;
continue;
} else if ( j == seed_2 ) {
}
else if (j == seed_2)
{
*right++ = j;
v->spl_nright++;
continue;
}
size_alpha = hemdist( *datum_l, *GETENTRY(entryvec,j) );
size_beta = hemdist( *datum_r, *GETENTRY(entryvec,j) );
size_alpha = hemdist(*datum_l, *GETENTRY(entryvec, j));
size_beta = hemdist(*datum_r, *GETENTRY(entryvec, j));
if (size_alpha < size_beta + WISH_F(v->spl_nleft, v->spl_nright, 0.05)) {
*datum_l |= *GETENTRY(entryvec,j);
if (size_alpha < size_beta + WISH_F(v->spl_nleft, v->spl_nright, 0.05))
{
*datum_l |= *GETENTRY(entryvec, j);
*left++ = j;
v->spl_nleft++;
} else {
*datum_r |= *GETENTRY(entryvec,j);
}
else
{
*datum_r |= *GETENTRY(entryvec, j);
*right++ = j;
v->spl_nright++;
}
......@@ -324,5 +370,3 @@ gtsq_picksplit(PG_FUNCTION_ARGS) {
PG_RETURN_POINTER(v);
}
This diff is collapsed.
......@@ -7,25 +7,28 @@ PG_FUNCTION_INFO_V1(tsquery_numnode);
Datum tsquery_numnode(PG_FUNCTION_ARGS);
Datum
tsquery_numnode(PG_FUNCTION_ARGS) {
tsquery_numnode(PG_FUNCTION_ARGS)
{
QUERYTYPE *query = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(0)));
int nnode = query->size;
PG_FREE_IF_COPY(query,0);
PG_FREE_IF_COPY(query, 0);
PG_RETURN_INT32(nnode);
}
static QTNode*
join_tsqueries(QUERYTYPE *a, QUERYTYPE *b) {
QTNode *res=(QTNode*)palloc0( sizeof(QTNode) );
static QTNode *
join_tsqueries(QUERYTYPE * a, QUERYTYPE * b)
{
QTNode *res = (QTNode *) palloc0(sizeof(QTNode));
res->flags |= QTN_NEEDFREE;
res->valnode = (ITEM*)palloc0( sizeof(ITEM) );
res->valnode = (ITEM *) palloc0(sizeof(ITEM));
res->valnode->type = OPR;
res->child = (QTNode**)palloc0( sizeof(QTNode*)*2 );
res->child[0] = QT2QTN( GETQUERY(b), GETOPERAND(b) );
res->child[1] = QT2QTN( GETQUERY(a), GETOPERAND(a) );
res->child = (QTNode **) palloc0(sizeof(QTNode *) * 2);
res->child[0] = QT2QTN(GETQUERY(b), GETOPERAND(b));
res->child[1] = QT2QTN(GETQUERY(a), GETOPERAND(a));
res->nchild = 2;
return res;
......@@ -35,17 +38,21 @@ PG_FUNCTION_INFO_V1(tsquery_and);
Datum tsquery_and(PG_FUNCTION_ARGS);
Datum
tsquery_and(PG_FUNCTION_ARGS) {
tsquery_and(PG_FUNCTION_ARGS)
{
QUERYTYPE *a = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(0)));
QUERYTYPE *b = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(1)));
QTNode *res;
QUERYTYPE *query;
if ( a->size == 0 ) {
PG_FREE_IF_COPY(a,1);
if (a->size == 0)
{
PG_FREE_IF_COPY(a, 1);
PG_RETURN_POINTER(b);
} else if ( b->size == 0 ) {
PG_FREE_IF_COPY(b,1);
}
else if (b->size == 0)
{
PG_FREE_IF_COPY(b, 1);
PG_RETURN_POINTER(a);
}
......@@ -53,11 +60,11 @@ tsquery_and(PG_FUNCTION_ARGS) {
res->valnode->val = '&';
query = QTN2QT( res, PlainMemory );
query = QTN2QT(res, PlainMemory);
QTNFree(res);
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(b,1);
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
PG_RETURN_POINTER(query);
}
......@@ -66,17 +73,21 @@ PG_FUNCTION_INFO_V1(tsquery_or);
Datum tsquery_or(PG_FUNCTION_ARGS);
Datum
tsquery_or(PG_FUNCTION_ARGS) {
tsquery_or(PG_FUNCTION_ARGS)
{
QUERYTYPE *a = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(0)));
QUERYTYPE *b = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(1)));
QTNode *res;
QUERYTYPE *query;
if ( a->size == 0 ) {
PG_FREE_IF_COPY(a,1);
if (a->size == 0)
{
PG_FREE_IF_COPY(a, 1);
PG_RETURN_POINTER(b);
} else if ( b->size == 0 ) {
PG_FREE_IF_COPY(b,1);
}
else if (b->size == 0)
{
PG_FREE_IF_COPY(b, 1);
PG_RETURN_POINTER(a);
}
......@@ -84,11 +95,11 @@ tsquery_or(PG_FUNCTION_ARGS) {
res->valnode->val = '|';
query = QTN2QT( res, PlainMemory );
query = QTN2QT(res, PlainMemory);
QTNFree(res);
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(b,1);
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
PG_RETURN_POINTER(query);
}
......@@ -97,43 +108,50 @@ PG_FUNCTION_INFO_V1(tsquery_not);
Datum tsquery_not(PG_FUNCTION_ARGS);
Datum
tsquery_not(PG_FUNCTION_ARGS) {
tsquery_not(PG_FUNCTION_ARGS)
{
QUERYTYPE *a = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(0)));
QTNode *res;
QUERYTYPE *query;
if ( a->size == 0 )
if (a->size == 0)
PG_RETURN_POINTER(a);
res=(QTNode*)palloc0( sizeof(QTNode) );
res = (QTNode *) palloc0(sizeof(QTNode));
res->flags |= QTN_NEEDFREE;
res->valnode = (ITEM*)palloc0( sizeof(ITEM) );
res->valnode = (ITEM *) palloc0(sizeof(ITEM));
res->valnode->type = OPR;
res->valnode->val = '!';
res->child = (QTNode**)palloc0( sizeof(QTNode*) );
res->child[0] = QT2QTN( GETQUERY(a), GETOPERAND(a) );
res->child = (QTNode **) palloc0(sizeof(QTNode *));
res->child[0] = QT2QTN(GETQUERY(a), GETOPERAND(a));
res->nchild = 1;
query = QTN2QT( res, PlainMemory );
query = QTN2QT(res, PlainMemory);
QTNFree(res);
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(a, 0);
PG_RETURN_POINTER(query);
}
static int
CompareTSQ( QUERYTYPE *a, QUERYTYPE *b ) {
if ( a->size != b->size ) {
return ( a->size < b->size ) ? -1 : 1;
} else if ( a->len != b->len ) {
return ( a->len < b->len ) ? -1 : 1;
} else {
QTNode *an = QT2QTN( GETQUERY(a), GETOPERAND(a) );
QTNode *bn = QT2QTN( GETQUERY(b), GETOPERAND(b) );
CompareTSQ(QUERYTYPE * a, QUERYTYPE * b)
{
if (a->size != b->size)
{
return (a->size < b->size) ? -1 : 1;
}
else if (a->len != b->len)
{
return (a->len < b->len) ? -1 : 1;
}
else
{
QTNode *an = QT2QTN(GETQUERY(a), GETOPERAND(a));
QTNode *bn = QT2QTN(GETQUERY(b), GETOPERAND(b));
int res = QTNodeCompare(an, bn);
QTNFree(an);
......@@ -145,17 +163,19 @@ CompareTSQ( QUERYTYPE *a, QUERYTYPE *b ) {
return 0;
}
PG_FUNCTION_INFO_V1(tsquery_cmp); \
PG_FUNCTION_INFO_V1(tsquery_cmp);
\
Datum tsquery_cmp(PG_FUNCTION_ARGS);
Datum
tsquery_cmp(PG_FUNCTION_ARGS) {
tsquery_cmp(PG_FUNCTION_ARGS)
{
QUERYTYPE *a = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(0)));
QUERYTYPE *b = (QUERYTYPE *) DatumGetPointer(PG_DETOAST_DATUM_COPY(PG_GETARG_DATUM(1)));
int res = CompareTSQ(a,b);
int res = CompareTSQ(a, b);
PG_FREE_IF_COPY(a,0);
PG_FREE_IF_COPY(b,1);
PG_FREE_IF_COPY(a, 0);
PG_FREE_IF_COPY(b, 1);
PG_RETURN_INT32(res);
}
......@@ -176,12 +196,9 @@ NAME(PG_FUNCTION_ARGS) { \
PG_RETURN_BOOL( ACTION ); \
}
CMPFUNC( tsquery_lt, res <0 );
CMPFUNC( tsquery_le, res<=0 );
CMPFUNC( tsquery_eq, res==0 );
CMPFUNC( tsquery_ge, res>=0 );
CMPFUNC( tsquery_gt, res >0 );
CMPFUNC( tsquery_ne, res!=0 );
CMPFUNC(tsquery_lt, res < 0);
CMPFUNC(tsquery_le, res <= 0);
CMPFUNC(tsquery_eq, res == 0);
CMPFUNC(tsquery_ge, res >= 0);
CMPFUNC(tsquery_gt, res > 0);
CMPFUNC(tsquery_ne, res != 0);
This diff is collapsed.
......@@ -6,7 +6,8 @@
#include "query.h"
typedef struct QTNode {
typedef struct QTNode
{
ITEM *valnode;
uint32 flags;
int4 nchild;
......@@ -19,21 +20,22 @@ typedef struct QTNode {
#define QTN_NOCHANGE 0x02
#define QTN_WORDFREE 0x04
typedef enum {
typedef enum
{
PlainMemory,
SPIMemory,
AggMemory
} MemoryType;
QTNode* QT2QTN( ITEM *in, char *operand );
QUERYTYPE* QTN2QT( QTNode* in, MemoryType memtype );
void QTNFree( QTNode* in );
void QTNSort( QTNode* in );
void QTNTernary( QTNode* in );
void QTNBinary( QTNode* in );
int QTNodeCompare( QTNode *an, QTNode *bn );
QTNode* QTNCopy( QTNode* in, MemoryType memtype);
bool QTNEq( QTNode* a, QTNode* b );
QTNode *QT2QTN(ITEM * in, char *operand);
QUERYTYPE *QTN2QT(QTNode * in, MemoryType memtype);
void QTNFree(QTNode * in);
void QTNSort(QTNode * in);
void QTNTernary(QTNode * in);
void QTNBinary(QTNode * in);
int QTNodeCompare(QTNode * an, QTNode * bn);
QTNode *QTNCopy(QTNode * in, MemoryType memtype);
bool QTNEq(QTNode * a, QTNode * b);
extern MemoryContext AggregateContext;
......
......@@ -266,8 +266,10 @@ calc_rank_or(float *w, tsvector * t, QUERYTYPE * q)
for (i = 0; i < size; i++)
{
float resj,wjm;
float resj,
wjm;
int4 jm;
entry = find_wordentry(t, q, item[i]);
if (!entry)
continue;
......@@ -288,8 +290,9 @@ calc_rank_or(float *w, tsvector * t, QUERYTYPE * q)
jm = 0;
for (j = 0; j < dimt; j++)
{
resj = resj + wpos(post[j])/((j+1)*(j+1));
if ( wpos(post[j]) > wjm ) {
resj = resj + wpos(post[j]) / ((j + 1) * (j + 1));
if (wpos(post[j]) > wjm)
{
wjm = wpos(post[j]);
jm = j;
}
......@@ -301,10 +304,10 @@ calc_rank_or(float *w, tsvector * t, QUERYTYPE * q)
don't sort for now, just choose maximum weight. This should be corrected
Oleg Bartunov
*/
res = res + ( wjm + resj - wjm/((jm+1)*(jm+1)))/1.64493406685;
res = res + (wjm + resj - wjm / ((jm + 1) * (jm + 1))) / 1.64493406685;
}
if ( size > 0 )
res = res /size;
if (size > 0)
res = res / size;
pfree(item);
return res;
}
......@@ -429,18 +432,21 @@ compareDocR(const void *a, const void *b)
}
static bool
checkcondition_ITEM(void *checkval, ITEM * val) {
return (bool)(val->istrue);
checkcondition_ITEM(void *checkval, ITEM * val)
{
return (bool) (val->istrue);
}
static void
reset_istrue_flag(QUERYTYPE *query) {
reset_istrue_flag(QUERYTYPE * query)
{
ITEM *item = GETQUERY(query);
int i;
/* reset istrue flag */
for(i = 0; i < query->size; i++) {
if ( item->type == VAL )
for (i = 0; i < query->size; i++)
{
if (item->type == VAL)
item->istrue = 0;
item++;
}
......@@ -452,7 +458,7 @@ Cover(DocRepresentation * doc, int len, QUERYTYPE * query, int *pos, int *p, int
DocRepresentation *ptr;
int lastpos = *pos;
int i;
bool found=false;
bool found = false;
reset_istrue_flag(query);
......@@ -461,11 +467,14 @@ Cover(DocRepresentation * doc, int len, QUERYTYPE * query, int *pos, int *p, int
ptr = doc + *pos;
/* find upper bound of cover from current position, move up */
while (ptr - doc < len) {
for(i=0;i<ptr->nitem;i++)
while (ptr - doc < len)
{
for (i = 0; i < ptr->nitem; i++)
ptr->item[i]->istrue = 1;
if ( TS_execute(GETQUERY(query), NULL, false, checkcondition_ITEM) ) {
if (ptr->pos > *q) {
if (TS_execute(GETQUERY(query), NULL, false, checkcondition_ITEM))
{
if (ptr->pos > *q)
{
*q = ptr->pos;
lastpos = ptr - doc;
found = true;
......@@ -483,10 +492,12 @@ Cover(DocRepresentation * doc, int len, QUERYTYPE * query, int *pos, int *p, int
ptr = doc + lastpos;
/* find lower bound of cover from founded upper bound, move down */
while (ptr >= doc ) {
for(i=0;i<ptr->nitem;i++)
while (ptr >= doc)
{
for (i = 0; i < ptr->nitem; i++)
ptr->item[i]->istrue = 1;
if ( TS_execute(GETQUERY(query), NULL, true, checkcondition_ITEM) ) {
if (TS_execute(GETQUERY(query), NULL, true, checkcondition_ITEM))
{
if (ptr->pos < *p)
*p = ptr->pos;
break;
......@@ -494,14 +505,18 @@ Cover(DocRepresentation * doc, int len, QUERYTYPE * query, int *pos, int *p, int
ptr--;
}
if ( *p <= *q ) {
/* set position for next try to next lexeme after begining of founded cover */
*pos= (ptr-doc) + 1;
if (*p <= *q)
{
/*
* set position for next try to next lexeme after begining of founded
* cover
*/
*pos = (ptr - doc) + 1;
return true;
}
(*pos)++;
return Cover( doc, len, query, pos, p, q );
return Cover(doc, len, query, pos, p, q);
}
static DocRepresentation *
......@@ -550,26 +565,32 @@ get_docrep(tsvector * txt, QUERYTYPE * query, int *doclen)
for (j = 0; j < dimt; j++)
{
if ( j == 0 ) {
ITEM *kptr, *iptr = item+i;
if (j == 0)
{
ITEM *kptr,
*iptr = item + i;
int k;
doc[cur].needfree = false;
doc[cur].nitem = 0;
doc[cur].item = (ITEM**)palloc( sizeof(ITEM*) * query->size );
doc[cur].item = (ITEM **) palloc(sizeof(ITEM *) * query->size);
for(k=0; k < query->size; k++) {
kptr = item+k;
if ( k==i || ( item[k].type == VAL && compareITEM( &kptr, &iptr ) == 0 ) ) {
doc[cur].item[ doc[cur].nitem ] = item+k;
for (k = 0; k < query->size; k++)
{
kptr = item + k;
if (k == i || (item[k].type == VAL && compareITEM(&kptr, &iptr) == 0))
{
doc[cur].item[doc[cur].nitem] = item + k;
doc[cur].nitem++;
kptr->istrue = 1;
}
}
} else {
}
else
{
doc[cur].needfree = false;
doc[cur].nitem = doc[cur-1].nitem;
doc[cur].item = doc[cur-1].item;
doc[cur].nitem = doc[cur - 1].nitem;
doc[cur].item = doc[cur - 1].item;
}
doc[cur].pos = WEP_GETPOS(post[j]);
cur++;
......@@ -604,7 +625,7 @@ rank_cd(PG_FUNCTION_ARGS)
len,
cur,
i,
doclen=0;
doclen = 0;
doc = get_docrep(txt, query, &doclen);
if (!doc)
......@@ -640,9 +661,9 @@ rank_cd(PG_FUNCTION_ARGS)
elog(ERROR, "unrecognized normalization method: %d", method);
}
for(i=0;i<doclen;i++)
if ( doc[i].needfree )
pfree( doc[i].item );
for (i = 0; i < doclen; i++)
if (doc[i].needfree)
pfree(doc[i].item);
pfree(doc);
PG_FREE_IF_COPY(txt, 1);
PG_FREE_IF_COPY(query, 2);
......@@ -784,9 +805,9 @@ get_covers(PG_FUNCTION_ARGS)
VARATT_SIZEP(out) = cptr - ((char *) out);
pfree(dw);
for(i=0;i<rlen;i++)
if ( doc[i].needfree )
pfree( doc[i].item );
for (i = 0; i < rlen; i++)
if (doc[i].needfree)
pfree(doc[i].item);
pfree(doc);
PG_FREE_IF_COPY(txt, 0);
......
......@@ -1609,12 +1609,14 @@ lab0:
return 1;
}
extern struct SN_env *english_ISO_8859_1_create_env(void)
extern struct SN_env *
english_ISO_8859_1_create_env(void)
{
return SN_create_env(0, 2, 1);
}
extern void english_ISO_8859_1_close_env(struct SN_env * z)
extern void
english_ISO_8859_1_close_env(struct SN_env * z)
{
SN_close_env(z);
}
......@@ -6,10 +6,10 @@ extern "C"
{
#endif
extern struct SN_env *english_ISO_8859_1_create_env(void);
extern void english_ISO_8859_1_close_env(struct SN_env * z);
extern struct SN_env *english_ISO_8859_1_create_env(void);
extern void english_ISO_8859_1_close_env(struct SN_env * z);
extern int english_ISO_8859_1_stem(struct SN_env * z);
extern int english_ISO_8859_1_stem(struct SN_env * z);
#ifdef __cplusplus
}
......
......@@ -915,12 +915,14 @@ lab0:
return 1;
}
extern struct SN_env *russian_KOI8_R_create_env(void)
extern struct SN_env *
russian_KOI8_R_create_env(void)
{
return SN_create_env(0, 2, 0);
}
extern void russian_KOI8_R_close_env(struct SN_env * z)
extern void
russian_KOI8_R_close_env(struct SN_env * z)
{
SN_close_env(z);
}
......@@ -6,10 +6,10 @@ extern "C"
{
#endif
extern struct SN_env *russian_KOI8_R_create_env(void);
extern void russian_KOI8_R_close_env(struct SN_env * z);
extern struct SN_env *russian_KOI8_R_create_env(void);
extern void russian_KOI8_R_close_env(struct SN_env * z);
extern int russian_KOI8_R_stem(struct SN_env * z);
extern int russian_KOI8_R_stem(struct SN_env * z);
#ifdef __cplusplus
}
......
......@@ -8,16 +8,19 @@
#if defined(TS_USE_WIDE) && defined(WIN32)
size_t
wchar2char( char *to, const wchar_t *from, size_t len ) {
if (GetDatabaseEncoding() == PG_UTF8) {
int r, nbytes;
if (len==0)
wchar2char(char *to, const wchar_t *from, size_t len)
{
if (GetDatabaseEncoding() == PG_UTF8)
{
int r,
nbytes;
if (len == 0)
return 0;
/* in any case, *to should be allocated with enough space */
nbytes = WideCharToMultiByte(CP_UTF8, 0, from, len, NULL, 0, NULL, NULL);
if ( nbytes==0 )
if (nbytes == 0)
ereport(ERROR,
(errcode(ERRCODE_CHARACTER_NOT_IN_REPERTOIRE),
errmsg("UTF-16 to UTF-8 translation failed: %lu",
......@@ -26,7 +29,7 @@ wchar2char( char *to, const wchar_t *from, size_t len ) {
r = WideCharToMultiByte(CP_UTF8, 0, from, len, to, nbytes,
NULL, NULL);
if ( r==0 )
if (r == 0)
ereport(ERROR,
(errcode(ERRCODE_CHARACTER_NOT_IN_REPERTOIRE),
errmsg("UTF-16 to UTF-8 translation failed: %lu",
......@@ -38,16 +41,19 @@ wchar2char( char *to, const wchar_t *from, size_t len ) {
}
size_t
char2wchar( wchar_t *to, const char *from, size_t len ) {
if (GetDatabaseEncoding() == PG_UTF8) {
char2wchar(wchar_t *to, const char *from, size_t len)
{
if (GetDatabaseEncoding() == PG_UTF8)
{
int r;
if (len==0)
if (len == 0)
return 0;
r = MultiByteToWideChar(CP_UTF8, 0, from, len, to, len);
if (!r) {
if (!r)
{
pg_verifymbstr(from, len, false);
ereport(ERROR,
(errcode(ERRCODE_CHARACTER_NOT_IN_REPERTOIRE),
......@@ -55,7 +61,7 @@ char2wchar( wchar_t *to, const char *from, size_t len ) {
errhint("The server's LC_CTYPE locale is probably incompatible with the database encoding.")));
}
Assert( r <= len );
Assert(r <= len);
return r;
}
......
......@@ -22,17 +22,15 @@
#ifdef WIN32
size_t wchar2char( char *to, const wchar_t *from, size_t len );
size_t char2wchar( wchar_t *to, const char *from, size_t len );
size_t wchar2char(char *to, const wchar_t *from, size_t len);
size_t char2wchar(wchar_t *to, const char *from, size_t len);
#else /* WIN32 */
/* correct mbstowcs */
#define char2wchar mbstowcs
#define wchar2char wcstombs
#endif /* WIN32 */
#endif /* defined(HAVE_WCSTOMBS) && defined(HAVE_TOWLOWER) */
#endif /* defined(HAVE_WCSTOMBS) &&
* defined(HAVE_TOWLOWER) */
#endif /* __TSLOCALE_H__ */
This diff is collapsed.
......@@ -5,7 +5,8 @@
#include <limits.h>
#include "ts_locale.h"
typedef enum {
typedef enum
{
TPS_Base = 0,
TPS_InUWord,
TPS_InLatWord,
......@@ -85,10 +86,13 @@ typedef enum {
struct TParser;
typedef int (*TParserCharTest)(struct TParser*); /* any p_is* functions except p_iseq */
typedef void (*TParserSpecial)(struct TParser*); /* special handler for special cases... */
typedef int (*TParserCharTest) (struct TParser *); /* any p_is* functions
* except p_iseq */
typedef void (*TParserSpecial) (struct TParser *); /* special handler for
* special cases... */
typedef struct {
typedef struct
{
TParserCharTest isclass;
char c;
uint16 flags;
......@@ -97,12 +101,14 @@ typedef struct {
TParserSpecial special;
} TParserStateActionItem;
typedef struct {
typedef struct
{
TParserState state;
TParserStateActionItem *action;
} TParserStateAction;
typedef struct TParserPosition {
typedef struct TParserPosition
{
int posbyte; /* position of parser in bytes */
int poschar; /* osition of parser in characters */
int charlen; /* length of current char */
......@@ -114,7 +120,8 @@ typedef struct TParserPosition {
TParserStateActionItem *pushedAtAction;
} TParserPosition;
typedef struct TParser {
typedef struct TParser
{
/* string and position information */
char *str; /* multibyte string */
int lenstr; /* length of mbstring */
......@@ -140,8 +147,8 @@ typedef struct TParser {
} TParser;
TParser* TParserInit( char *, int );
bool TParserGet( TParser* );
void TParserClose( TParser* );
TParser *TParserInit(char *, int);
bool TParserGet(TParser *);
void TParserClose(TParser *);
#endif
......@@ -39,7 +39,7 @@ Datum prsd_start(PG_FUNCTION_ARGS);
Datum
prsd_start(PG_FUNCTION_ARGS)
{
PG_RETURN_POINTER(TParserInit( (char *) PG_GETARG_POINTER(0), PG_GETARG_INT32(1)));
PG_RETURN_POINTER(TParserInit((char *) PG_GETARG_POINTER(0), PG_GETARG_INT32(1)));
}
PG_FUNCTION_INFO_V1(prsd_getlexeme);
......@@ -47,11 +47,11 @@ Datum prsd_getlexeme(PG_FUNCTION_ARGS);
Datum
prsd_getlexeme(PG_FUNCTION_ARGS)
{
TParser *p=(TParser*)PG_GETARG_POINTER(0);
TParser *p = (TParser *) PG_GETARG_POINTER(0);
char **t = (char **) PG_GETARG_POINTER(1);
int *tlen = (int *) PG_GETARG_POINTER(2);
if ( !TParserGet(p) )
if (!TParserGet(p))
PG_RETURN_INT32(0);
*t = p->lexeme;
......@@ -65,7 +65,8 @@ Datum prsd_end(PG_FUNCTION_ARGS);
Datum
prsd_end(PG_FUNCTION_ARGS)
{
TParser *p=(TParser*)PG_GETARG_POINTER(0);
TParser *p = (TParser *) PG_GETARG_POINTER(0);
TParserClose(p);
PG_RETURN_VOID();
}
......
......@@ -16,7 +16,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.103 2005/11/20 19:49:06 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/common/heaptuple.c,v 1.104 2005/11/22 18:17:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -512,11 +512,11 @@ nocachegetattr(HeapTuple tuple,
/*
* Now we know that we have to walk the tuple CAREFULLY.
*
* Note - This loop is a little tricky. For each non-null attribute, we
* have to first account for alignment padding before the attr, then
* advance over the attr based on its length. Nulls have no storage
* and no alignment padding either. We can use/set attcacheoff until
* we pass either a null or a var-width attribute.
* Note - This loop is a little tricky. For each non-null attribute,
* we have to first account for alignment padding before the attr,
* then advance over the attr based on its length. Nulls have no
* storage and no alignment padding either. We can use/set
* attcacheoff until we pass either a null or a var-width attribute.
*/
for (i = 0; i < attnum; i++)
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.112 2005/10/15 02:49:08 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/common/tupdesc.c,v 1.113 2005/11/22 18:17:05 momjian Exp $
*
* NOTES
* some of the executor utility code such as "ExecTypeFromTL" should be
......@@ -49,8 +49,8 @@ CreateTemplateTupleDesc(int natts, bool hasoid)
* Allocate enough memory for the tuple descriptor, including the
* attribute rows, and set up the attribute row pointers.
*
* Note: we assume that sizeof(struct tupleDesc) is a multiple of the struct
* pointer alignment requirement, and hence we don't need to insert
* Note: we assume that sizeof(struct tupleDesc) is a multiple of the
* struct pointer alignment requirement, and hence we don't need to insert
* alignment padding between the struct and the array of attribute row
* pointers.
*/
......
......@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.53 2005/11/06 22:39:20 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/gist/gistget.c,v 1.54 2005/11/22 18:17:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -396,8 +396,8 @@ gistindex_keytest(IndexTuple tuple,
* are the index datum (as a GISTENTRY*), the comparison datum, and
* the comparison operator's strategy number and subtype from pg_amop.
*
* (Presently there's no need to pass the subtype since it'll always be
* zero, but might as well pass it for possible future use.)
* (Presently there's no need to pass the subtype since it'll always
* be zero, but might as well pass it for possible future use.)
*/
test = FunctionCall4(&key->sk_func,
PointerGetDatum(&de),
......
......@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gist/gistutil.c,v 1.8 2005/11/06 22:39:20 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/gist/gistutil.c,v 1.9 2005/11/22 18:17:05 momjian Exp $
*-------------------------------------------------------------------------
*/
#include "postgres.h"
......@@ -877,9 +877,10 @@ gistcheckpage(Relation rel, Buffer buf)
Page page = BufferGetPage(buf);
/*
* ReadBuffer verifies that every newly-read page passes PageHeaderIsValid,
* which means it either contains a reasonably sane page header or is
* all-zero. We have to defend against the all-zero case, however.
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
* page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
ereport(ERROR,
......@@ -925,6 +926,7 @@ gistNewBuffer(Relation r)
break; /* nothing left in FSM */
buffer = ReadBuffer(r, blkno);
/*
* We have to guard against the possibility that someone else already
* recycled this page; the buffer may be locked if so.
......
......@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.10 2005/11/06 22:39:20 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/gist/gistvacuum.c,v 1.11 2005/11/22 18:17:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -65,6 +65,7 @@ gistVacuumUpdate(GistVacuum *gv, BlockNumber blkno, bool needunion)
lencompleted = 16;
buffer = ReadBuffer(gv->index, blkno);
/*
* This is only used during VACUUM FULL, so we need not bother to lock
* individual index pages
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.48 2005/11/06 19:29:00 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashovfl.c,v 1.49 2005/11/22 18:17:05 momjian Exp $
*
* NOTES
* Overflow pages look like ordinary relation pages.
......@@ -488,9 +488,9 @@ _hash_initbitmap(Relation rel, HashMetaPage metap, BlockNumber blkno)
* It is okay to write-lock the new bitmap page while holding metapage
* write lock, because no one else could be contending for the new page.
*
* There is some loss of concurrency in possibly doing I/O for the new page
* while holding the metapage lock, but this path is taken so seldom that
* it's not worth worrying about.
* There is some loss of concurrency in possibly doing I/O for the new
* page while holding the metapage lock, but this path is taken so seldom
* that it's not worth worrying about.
*/
buf = _hash_getbuf(rel, blkno, HASH_WRITE);
pg = BufferGetPage(buf);
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.53 2005/11/06 19:29:00 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashpage.c,v 1.54 2005/11/22 18:17:05 momjian Exp $
*
* NOTES
* Postgres hash pages look like ordinary relation pages. The opaque
......@@ -402,8 +402,8 @@ _hash_expandtable(Relation rel, Buffer metabuf)
* The lock protects us against other backends, but not against our own
* backend. Must check for active scans separately.
*
* Ideally we would lock the new bucket too before proceeding, but if we are
* about to cross a splitpoint then the BUCKET_TO_BLKNO mapping isn't
* Ideally we would lock the new bucket too before proceeding, but if we
* are about to cross a splitpoint then the BUCKET_TO_BLKNO mapping isn't
* correct yet. For simplicity we update the metapage first and then
* lock. This should be okay because no one else should be trying to lock
* the new bucket yet...
......@@ -422,11 +422,11 @@ _hash_expandtable(Relation rel, Buffer metabuf)
/*
* Okay to proceed with split. Update the metapage bucket mapping info.
*
* Since we are scribbling on the metapage data right in the shared buffer,
* any failure in this next little bit leaves us with a big problem: the
* metapage is effectively corrupt but could get written back to disk. We
* don't really expect any failure, but just to be sure, establish a
* critical section.
* Since we are scribbling on the metapage data right in the shared
* buffer, any failure in this next little bit leaves us with a big
* problem: the metapage is effectively corrupt but could get written back
* to disk. We don't really expect any failure, but just to be sure,
* establish a critical section.
*/
START_CRIT_SECTION();
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.43 2005/11/06 19:29:00 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/hash/hashutil.c,v 1.44 2005/11/22 18:17:05 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -115,9 +115,10 @@ _hash_checkpage(Relation rel, Buffer buf, int flags)
Page page = BufferGetPage(buf);
/*
* ReadBuffer verifies that every newly-read page passes PageHeaderIsValid,
* which means it either contains a reasonably sane page header or is
* all-zero. We have to defend against the all-zero case, however.
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
* page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
ereport(ERROR,
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.202 2005/11/20 19:49:07 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/heapam.c,v 1.203 2005/11/22 18:17:06 momjian Exp $
*
*
* INTERFACE ROUTINES
......@@ -1127,8 +1127,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
* If the new tuple is too big for storage or contains already toasted
* out-of-line attributes from some other relation, invoke the toaster.
*
* Note: below this point, heaptup is the data we actually intend to
* store into the relation; tup is the caller's original untoasted data.
* Note: below this point, heaptup is the data we actually intend to store
* into the relation; tup is the caller's original untoasted data.
*/
if (HeapTupleHasExternal(tup) ||
(MAXALIGN(tup->t_len) > TOAST_TUPLE_THRESHOLD))
......@@ -1215,8 +1215,8 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
/*
* If tuple is cachable, mark it for invalidation from the caches in case
* we abort. Note it is OK to do this after WriteBuffer releases the
* buffer, because the heaptup data structure is all in local memory,
* not in the shared buffer.
* buffer, because the heaptup data structure is all in local memory, not
* in the shared buffer.
*/
CacheInvalidateHeapTuple(relation, heaptup);
......@@ -1323,8 +1323,8 @@ l1:
* heap_lock_tuple). LockTuple will release us when we are
* next-in-line for the tuple.
*
* If we are forced to "start over" below, we keep the tuple lock; this
* arranges that we stay at the head of the line while rechecking
* If we are forced to "start over" below, we keep the tuple lock;
* this arranges that we stay at the head of the line while rechecking
* tuple state.
*/
if (!have_tuple_lock)
......@@ -1638,8 +1638,8 @@ l2:
* heap_lock_tuple). LockTuple will release us when we are
* next-in-line for the tuple.
*
* If we are forced to "start over" below, we keep the tuple lock; this
* arranges that we stay at the head of the line while rechecking
* If we are forced to "start over" below, we keep the tuple lock;
* this arranges that we stay at the head of the line while rechecking
* tuple state.
*/
if (!have_tuple_lock)
......@@ -1771,8 +1771,8 @@ l2:
* show that it's already being updated, else other processes may try to
* update it themselves.
*
* We need to invoke the toaster if there are already any out-of-line toasted
* values present, or if the new tuple is over-threshold.
* We need to invoke the toaster if there are already any out-of-line
* toasted values present, or if the new tuple is over-threshold.
*/
newtupsize = MAXALIGN(newtup->t_len);
......@@ -2111,8 +2111,8 @@ l3:
* LockTuple will release us when we are next-in-line for the tuple.
* We must do this even if we are share-locking.
*
* If we are forced to "start over" below, we keep the tuple lock; this
* arranges that we stay at the head of the line while rechecking
* If we are forced to "start over" below, we keep the tuple lock;
* this arranges that we stay at the head of the line while rechecking
* tuple state.
*/
if (!have_tuple_lock)
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.58 2005/10/15 02:49:08 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/hio.c,v 1.59 2005/11/22 18:17:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -296,11 +296,11 @@ RelationGetBufferForTuple(Relation relation, Size len,
/*
* Remember the new page as our target for future insertions.
*
* XXX should we enter the new page into the free space map immediately, or
* just keep it for this backend's exclusive use in the short run (until
* VACUUM sees it)? Seems to depend on whether you expect the current
* backend to make more insertions or not, which is probably a good bet
* most of the time. So for now, don't add it to FSM yet.
* XXX should we enter the new page into the free space map immediately,
* or just keep it for this backend's exclusive use in the short run
* (until VACUUM sees it)? Seems to depend on whether you expect the
* current backend to make more insertions or not, which is probably a
* good bet most of the time. So for now, don't add it to FSM yet.
*/
relation->rd_targblock = BufferGetBlockNumber(buffer);
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.55 2005/11/20 19:49:07 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/heap/tuptoaster.c,v 1.56 2005/11/22 18:17:06 momjian Exp $
*
*
* INTERFACE ROUTINES
......@@ -1074,8 +1074,8 @@ toast_save_datum(Relation rel, Datum value)
* FormIndexDatum: this relies on the knowledge that the index columns
* are the same as the initial columns of the table.
*
* Note also that there had better not be any user-created index on the
* TOAST table, since we don't bother to update anything else.
* Note also that there had better not be any user-created index on
* the TOAST table, since we don't bother to update anything else.
*/
index_insert(toastidx, t_values, t_isnull,
&(toasttup->t_self),
......@@ -1213,9 +1213,9 @@ toast_fetch_datum(varattrib *attr)
/*
* Read the chunks by index
*
* Note that because the index is actually on (valueid, chunkidx) we will see
* the chunks in chunkidx order, even though we didn't explicitly ask for
* it.
* Note that because the index is actually on (valueid, chunkidx) we will
* see the chunks in chunkidx order, even though we didn't explicitly ask
* for it.
*/
nextidx = 0;
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.50 2005/11/20 19:49:07 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/index/genam.c,v 1.51 2005/11/22 18:17:06 momjian Exp $
*
* NOTES
* many of the old access method routines have been turned into
......@@ -202,8 +202,8 @@ systable_beginscan(Relation heapRelation,
/*
* Change attribute numbers to be index column numbers.
*
* This code could be generalized to search for the index key numbers to
* substitute, but for now there's no need.
* This code could be generalized to search for the index key numbers
* to substitute, but for now there's no need.
*/
for (i = 0; i < nkeys; i++)
{
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.128 2005/11/06 19:29:00 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtinsert.c,v 1.129 2005/11/22 18:17:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -104,8 +104,8 @@ top:
* If we're not allowing duplicates, make sure the key isn't already in
* the index.
*
* NOTE: obviously, _bt_check_unique can only detect keys that are already in
* the index; so it cannot defend against concurrent insertions of the
* NOTE: obviously, _bt_check_unique can only detect keys that are already
* in the index; so it cannot defend against concurrent insertions of the
* same key. We protect against that by means of holding a write lock on
* the target page. Any other would-be inserter of the same key must
* acquire a write lock on the same target page, so only one would-be
......@@ -114,8 +114,8 @@ top:
* our insertion, so no later inserter can fail to see our insertion.
* (This requires some care in _bt_insertonpg.)
*
* If we must wait for another xact, we release the lock while waiting, and
* then must start over completely.
* If we must wait for another xact, we release the lock while waiting,
* and then must start over completely.
*/
if (index_is_unique)
{
......@@ -193,8 +193,8 @@ _bt_check_unique(Relation rel, BTItem btitem, Relation heapRel,
/*
* We can skip items that are marked killed.
*
* Formerly, we applied _bt_isequal() before checking the kill flag,
* so as to fall out of the item loop as soon as possible.
* Formerly, we applied _bt_isequal() before checking the kill
* flag, so as to fall out of the item loop as soon as possible.
* However, in the presence of heavy update activity an index may
* contain many killed items with the same key; running
* _bt_isequal() on each killed item gets expensive. Furthermore
......@@ -431,11 +431,11 @@ _bt_insertonpg(Relation rel,
/*
* step right to next non-dead page
*
* must write-lock that page before releasing write lock on current
* page; else someone else's _bt_check_unique scan could fail to
* see our insertion. write locks on intermediate dead pages
* won't do because we don't know when they will get de-linked
* from the tree.
* must write-lock that page before releasing write lock on
* current page; else someone else's _bt_check_unique scan could
* fail to see our insertion. write locks on intermediate dead
* pages won't do because we don't know when they will get
* de-linked from the tree.
*/
Buffer rbuf = InvalidBuffer;
......@@ -471,9 +471,9 @@ _bt_insertonpg(Relation rel,
/*
* Do we need to split the page to fit the item on it?
*
* Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result, so
* this comparison is correct even though we appear to be accounting only
* for the item and not for its line pointer.
* Note: PageGetFreeSpace() subtracts sizeof(ItemIdData) from its result,
* so this comparison is correct even though we appear to be accounting
* only for the item and not for its line pointer.
*/
if (PageGetFreeSpace(page) < itemsz)
{
......@@ -1158,10 +1158,10 @@ _bt_insert_parent(Relation rel,
* the next higher level that someone constructed meanwhile, and find the
* right place to insert as for the normal case.
*
* If we have to search for the parent level, we do so by re-descending from
* the root. This is not super-efficient, but it's rare enough not to
* matter. (This path is also taken when called from WAL recovery --- we
* have no stack in that case.)
* If we have to search for the parent level, we do so by re-descending
* from the root. This is not super-efficient, but it's rare enough not
* to matter. (This path is also taken when called from WAL recovery ---
* we have no stack in that case.)
*/
if (is_root)
{
......
......@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.89 2005/11/06 19:29:00 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtpage.c,v 1.90 2005/11/22 18:17:06 momjian Exp $
*
* NOTES
* Postgres btree pages look like ordinary relation pages. The opaque
......@@ -412,9 +412,10 @@ _bt_checkpage(Relation rel, Buffer buf)
Page page = BufferGetPage(buf);
/*
* ReadBuffer verifies that every newly-read page passes PageHeaderIsValid,
* which means it either contains a reasonably sane page header or is
* all-zero. We have to defend against the all-zero case, however.
* ReadBuffer verifies that every newly-read page passes
* PageHeaderIsValid, which means it either contains a reasonably sane
* page header or is all-zero. We have to defend against the all-zero
* case, however.
*/
if (PageIsNew(page))
ereport(ERROR,
......@@ -475,21 +476,21 @@ _bt_getbuf(Relation rel, BlockNumber blkno, int access)
* have been re-used between the time the last VACUUM scanned it and
* the time the VACUUM made its FSM updates.)
*
* In fact, it's worse than that: we can't even assume that it's safe to
* take a lock on the reported page. If somebody else has a lock on
* it, or even worse our own caller does, we could deadlock. (The
* In fact, it's worse than that: we can't even assume that it's safe
* to take a lock on the reported page. If somebody else has a lock
* on it, or even worse our own caller does, we could deadlock. (The
* own-caller scenario is actually not improbable. Consider an index
* on a serial or timestamp column. Nearly all splits will be at the
* rightmost page, so it's entirely likely that _bt_split will call us
* while holding a lock on the page most recently acquired from FSM.
* A VACUUM running concurrently with the previous split could well
* have placed that page back in FSM.)
* while holding a lock on the page most recently acquired from FSM. A
* VACUUM running concurrently with the previous split could well have
* placed that page back in FSM.)
*
* To get around that, we ask for only a conditional lock on the reported
* page. If we fail, then someone else is using the page, and we may
* reasonably assume it's not free. (If we happen to be wrong, the
* worst consequence is the page will be lost to use till the next
* VACUUM, which is no big problem.)
* To get around that, we ask for only a conditional lock on the
* reported page. If we fail, then someone else is using the page,
* and we may reasonably assume it's not free. (If we happen to be
* wrong, the worst consequence is the page will be lost to use till
* the next VACUUM, which is no big problem.)
*/
for (;;)
{
......@@ -839,12 +840,12 @@ _bt_pagedel(Relation rel, Buffer buf, bool vacuum_full)
* We have to lock the pages we need to modify in the standard order:
* moving right, then up. Else we will deadlock against other writers.
*
* So, we need to find and write-lock the current left sibling of the target
* page. The sibling that was current a moment ago could have split, so
* we may have to move right. This search could fail if either the
* sibling or the target page was deleted by someone else meanwhile; if
* so, give up. (Right now, that should never happen, since page deletion
* is only done in VACUUM and there shouldn't be multiple VACUUMs
* So, we need to find and write-lock the current left sibling of the
* target page. The sibling that was current a moment ago could have
* split, so we may have to move right. This search could fail if either
* the sibling or the target page was deleted by someone else meanwhile;
* if so, give up. (Right now, that should never happen, since page
* deletion is only done in VACUUM and there shouldn't be multiple VACUUMs
* concurrently on the same table.)
*/
if (leftsib != P_NONE)
......
......@@ -12,7 +12,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.133 2005/11/06 19:29:00 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtree.c,v 1.134 2005/11/22 18:17:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -307,8 +307,8 @@ btgettuple(PG_FUNCTION_ARGS)
* Save heap TID to use it in _bt_restscan. Then release the read lock on
* the buffer so that we aren't blocking other backends.
*
* NOTE: we do keep the pin on the buffer! This is essential to ensure that
* someone else doesn't delete the index entry we are stopped on.
* NOTE: we do keep the pin on the buffer! This is essential to ensure
* that someone else doesn't delete the index entry we are stopped on.
*/
if (res)
{
......@@ -774,8 +774,8 @@ btvacuumcleanup(PG_FUNCTION_ARGS)
/*
* We can't use _bt_getbuf() here because it always applies
* _bt_checkpage(), which will barf on an all-zero page.
* We want to recycle all-zero pages, not fail.
* _bt_checkpage(), which will barf on an all-zero page. We want to
* recycle all-zero pages, not fail.
*/
buf = ReadBuffer(rel, blkno);
LockBuffer(buf, BT_READ);
......
......@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.96 2005/10/18 01:06:23 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsearch.c,v 1.97 2005/11/22 18:17:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -164,10 +164,11 @@ _bt_moveright(Relation rel,
*
* When nextkey = true: move right if the scan key is >= page's high key.
*
* The page could even have split more than once, so scan as far as needed.
* The page could even have split more than once, so scan as far as
* needed.
*
* We also have to move right if we followed a link that brought us to a dead
* page.
* We also have to move right if we followed a link that brought us to a
* dead page.
*/
cmpval = nextkey ? 0 : 1;
......@@ -255,8 +256,8 @@ _bt_binsrch(Relation rel,
* For nextkey=false (cmpval=1), the loop invariant is: all slots before
* 'low' are < scan key, all slots at or after 'high' are >= scan key.
*
* For nextkey=true (cmpval=0), the loop invariant is: all slots before 'low'
* are <= scan key, all slots at or after 'high' are > scan key.
* For nextkey=true (cmpval=0), the loop invariant is: all slots before
* 'low' are <= scan key, all slots at or after 'high' are > scan key.
*
* We can fall out when high == low.
*/
......@@ -282,8 +283,8 @@ _bt_binsrch(Relation rel,
* At this point we have high == low, but be careful: they could point
* past the last slot on the page.
*
* On a leaf page, we always return the first key >= scan key (resp. > scan
* key), which could be the last slot + 1.
* On a leaf page, we always return the first key >= scan key (resp. >
* scan key), which could be the last slot + 1.
*/
if (P_ISLEAF(opaque))
return low;
......@@ -350,8 +351,8 @@ _bt_compare(Relation rel,
* you think about how multi-key ordering works, you'll understand why
* this is.
*
* We don't test for violation of this condition here, however. The initial
* setup for the index scan had better have gotten it right (see
* We don't test for violation of this condition here, however. The
* initial setup for the index scan had better have gotten it right (see
* _bt_first).
*/
......@@ -692,9 +693,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* where we need to start the scan, and set flag variables to control the
* code below.
*
* If nextkey = false, _bt_search and _bt_binsrch will locate the first item
* >= scan key. If nextkey = true, they will locate the first item > scan
* key.
* If nextkey = false, _bt_search and _bt_binsrch will locate the first
* item >= scan key. If nextkey = true, they will locate the first item >
* scan key.
*
* If goback = true, we will then step back one item, while if goback =
* false, we will start the scan on the located item.
......@@ -819,9 +820,9 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
* than or equal to the scan key and we know that everything on later
* pages is greater than scan key.
*
* The actually desired starting point is either this item or the prior one,
* or in the end-of-page case it's the first item on the next page or the
* last item on this page. We apply _bt_step if needed to get to the
* The actually desired starting point is either this item or the prior
* one, or in the end-of-page case it's the first item on the next page or
* the last item on this page. We apply _bt_step if needed to get to the
* right place.
*
* If _bt_step fails (meaning we fell off the end of the index in one
......@@ -1044,9 +1045,9 @@ _bt_walk_left(Relation rel, Buffer buf)
* the original page got deleted and isn't in the sibling chain at all
* anymore, not that its left sibling got split more than four times.
*
* Note that it is correct to test P_ISDELETED not P_IGNORE here, because
* half-dead pages are still in the sibling chain. Caller must reject
* half-dead pages if wanted.
* Note that it is correct to test P_ISDELETED not P_IGNORE here,
* because half-dead pages are still in the sibling chain. Caller
* must reject half-dead pages if wanted.
*/
tries = 0;
for (;;)
......
......@@ -56,7 +56,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.95 2005/10/15 02:49:09 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtsort.c,v 1.96 2005/11/22 18:17:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -487,9 +487,9 @@ _bt_buildadd(BTWriteState *wstate, BTPageState *state, BTItem bti)
* the per-page available space. Note that at this point, btisz doesn't
* include the ItemId.
*
* NOTE: similar code appears in _bt_insertonpg() to defend against oversize
* items being inserted into an already-existing index. But during
* creation of an index, we don't go through there.
* NOTE: similar code appears in _bt_insertonpg() to defend against
* oversize items being inserted into an already-existing index. But
* during creation of an index, we don't go through there.
*/
if (btisz > BTMaxItemSize(npage))
ereport(ERROR,
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.65 2005/10/18 01:06:23 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/nbtree/nbtutils.c,v 1.66 2005/11/22 18:17:06 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -549,8 +549,8 @@ _bt_checkkeys(IndexScanDesc scan, IndexTuple tuple,
* able to conclude no further tuples will pass, either. We have
* to look at the scan direction and the qual type.
*
* Note: the only case in which we would keep going after failing a
* required qual is if there are partially-redundant quals that
* Note: the only case in which we would keep going after failing
* a required qual is if there are partially-redundant quals that
* _bt_preprocess_keys() was unable to eliminate. For example,
* given "x > 4 AND x > 10" where both are cross-type comparisons
* and so not removable, we might start the scan at the x = 4
......
......@@ -42,7 +42,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.12 2005/11/05 21:19:47 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/multixact.c,v 1.13 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -129,22 +129,23 @@ typedef struct MultiXactStateData
* member of a MultiXact, and that MultiXact would have to be created
* during or after the lock acquisition.)
*
* OldestVisibleMXactId[k] is the oldest MultiXactId each backend's current
* transaction(s) think is potentially live, or InvalidMultiXactId when
* not in a transaction or not in a transaction that's paid any attention
* to MultiXacts yet. This is computed when first needed in a given
* transaction, and cleared at transaction end. We can compute it as the
* minimum of the valid OldestMemberMXactId[] entries at the time we
* compute it (using nextMXact if none are valid). Each backend is
* OldestVisibleMXactId[k] is the oldest MultiXactId each backend's
* current transaction(s) think is potentially live, or InvalidMultiXactId
* when not in a transaction or not in a transaction that's paid any
* attention to MultiXacts yet. This is computed when first needed in a
* given transaction, and cleared at transaction end. We can compute it
* as the minimum of the valid OldestMemberMXactId[] entries at the time
* we compute it (using nextMXact if none are valid). Each backend is
* required not to attempt to access any SLRU data for MultiXactIds older
* than its own OldestVisibleMXactId[] setting; this is necessary because
* the checkpointer could truncate away such data at any instant.
*
* The checkpointer can compute the safe truncation point as the oldest valid
* value among all the OldestMemberMXactId[] and OldestVisibleMXactId[]
* entries, or nextMXact if none are valid. Clearly, it is not possible
* for any later-computed OldestVisibleMXactId value to be older than
* this, and so there is no risk of truncating data that is still needed.
* The checkpointer can compute the safe truncation point as the oldest
* valid value among all the OldestMemberMXactId[] and
* OldestVisibleMXactId[] entries, or nextMXact if none are valid.
* Clearly, it is not possible for any later-computed OldestVisibleMXactId
* value to be older than this, and so there is no risk of truncating data
* that is still needed.
*/
MultiXactId perBackendXactIds[1]; /* VARIABLE LENGTH ARRAY */
} MultiXactStateData;
......@@ -631,8 +632,8 @@ CreateMultiXactId(int nxids, TransactionId *xids)
}
/*
* Assign the MXID and offsets range to use, and make sure there is
* space in the OFFSETs and MEMBERs files. NB: this routine does
* Assign the MXID and offsets range to use, and make sure there is space
* in the OFFSETs and MEMBERs files. NB: this routine does
* START_CRIT_SECTION().
*/
multi = GetNewMultiXactId(nxids, &offset);
......@@ -788,9 +789,9 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
ExtendMultiXactOffset(result);
/*
* Reserve the members space, similarly to above. Also, be
* careful not to return zero as the starting offset for any multixact.
* See GetMultiXactIdMembers() for motivation.
* Reserve the members space, similarly to above. Also, be careful not to
* return zero as the starting offset for any multixact. See
* GetMultiXactIdMembers() for motivation.
*/
nextOffset = MultiXactState->nextOffset;
if (nextOffset == 0)
......@@ -804,8 +805,8 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
ExtendMultiXactMember(nextOffset, nxids);
/*
* Critical section from here until caller has written the data into
* the just-reserved SLRU space; we don't want to error out with a partly
* Critical section from here until caller has written the data into the
* just-reserved SLRU space; we don't want to error out with a partly
* written MultiXact structure. (In particular, failing to write our
* start offset after advancing nextMXact would effectively corrupt the
* previous MultiXact.)
......@@ -819,8 +820,8 @@ GetNewMultiXactId(int nxids, MultiXactOffset *offset)
* We don't care about MultiXactId wraparound here; it will be handled by
* the next iteration. But note that nextMXact may be InvalidMultiXactId
* after this routine exits, so anyone else looking at the variable must
* be prepared to deal with that. Similarly, nextOffset may be zero,
* but we won't use that as the actual start offset of the next multixact.
* be prepared to deal with that. Similarly, nextOffset may be zero, but
* we won't use that as the actual start offset of the next multixact.
*/
(MultiXactState->nextMXact)++;
......@@ -915,26 +916,26 @@ GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids)
/*
* Find out the offset at which we need to start reading MultiXactMembers
* and the number of members in the multixact. We determine the latter
* as the difference between this multixact's starting offset and the
* next one's. However, there are some corner cases to worry about:
* and the number of members in the multixact. We determine the latter as
* the difference between this multixact's starting offset and the next
* one's. However, there are some corner cases to worry about:
*
* 1. This multixact may be the latest one created, in which case there
* is no next one to look at. In this case the nextOffset value we just
* 1. This multixact may be the latest one created, in which case there is
* no next one to look at. In this case the nextOffset value we just
* saved is the correct endpoint.
*
* 2. The next multixact may still be in process of being filled in:
* that is, another process may have done GetNewMultiXactId but not yet
* written the offset entry for that ID. In that scenario, it is
* guaranteed that the offset entry for that multixact exists (because
* GetNewMultiXactId won't release MultiXactGenLock until it does)
* but contains zero (because we are careful to pre-zero offset pages).
* Because GetNewMultiXactId will never return zero as the starting offset
* for a multixact, when we read zero as the next multixact's offset, we
* know we have this case. We sleep for a bit and try again.
* 2. The next multixact may still be in process of being filled in: that
* is, another process may have done GetNewMultiXactId but not yet written
* the offset entry for that ID. In that scenario, it is guaranteed that
* the offset entry for that multixact exists (because GetNewMultiXactId
* won't release MultiXactGenLock until it does) but contains zero
* (because we are careful to pre-zero offset pages). Because
* GetNewMultiXactId will never return zero as the starting offset for a
* multixact, when we read zero as the next multixact's offset, we know we
* have this case. We sleep for a bit and try again.
*
* 3. Because GetNewMultiXactId increments offset zero to offset one
* to handle case #2, there is an ambiguity near the point of offset
* 3. Because GetNewMultiXactId increments offset zero to offset one to
* handle case #2, there is an ambiguity near the point of offset
* wraparound. If we see next multixact's offset is one, is that our
* multixact's actual endpoint, or did it end at zero with a subsequent
* increment? We handle this using the knowledge that if the zero'th
......
......@@ -37,7 +37,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.30 2005/11/05 21:19:47 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/slru.c,v 1.31 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -236,13 +236,14 @@ SimpleLruWaitIO(SlruCtl ctl, int slotno)
LWLockAcquire(shared->buffer_locks[slotno], LW_SHARED);
LWLockRelease(shared->buffer_locks[slotno]);
LWLockAcquire(shared->ControlLock, LW_EXCLUSIVE);
/*
* If the slot is still in an io-in-progress state, then either someone
* already started a new I/O on the slot, or a previous I/O failed and
* neglected to reset the page state. That shouldn't happen, really,
* but it seems worth a few extra cycles to check and recover from it.
* We can cheaply test for failure by seeing if the buffer lock is still
* held (we assume that transaction abort would release the lock).
* neglected to reset the page state. That shouldn't happen, really, but
* it seems worth a few extra cycles to check and recover from it. We can
* cheaply test for failure by seeing if the buffer lock is still held (we
* assume that transaction abort would release the lock).
*/
if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS ||
shared->page_status[slotno] == SLRU_PAGE_WRITE_IN_PROGRESS)
......@@ -252,7 +253,8 @@ SimpleLruWaitIO(SlruCtl ctl, int slotno)
/* indeed, the I/O must have failed */
if (shared->page_status[slotno] == SLRU_PAGE_READ_IN_PROGRESS)
shared->page_status[slotno] = SLRU_PAGE_EMPTY;
else /* write_in_progress */
else
/* write_in_progress */
{
shared->page_status[slotno] = SLRU_PAGE_VALID;
shared->page_dirty[slotno] = true;
......@@ -375,8 +377,8 @@ SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata)
}
/*
* Do nothing if page is not dirty, or if buffer no longer contains
* the same page we were called for.
* Do nothing if page is not dirty, or if buffer no longer contains the
* same page we were called for.
*/
if (!shared->page_dirty[slotno] ||
shared->page_status[slotno] != SLRU_PAGE_VALID ||
......@@ -384,8 +386,8 @@ SimpleLruWritePage(SlruCtl ctl, int slotno, SlruFlush fdata)
return;
/*
* Mark the slot write-busy, and clear the dirtybit. After this point,
* a transaction status update on this page will mark it dirty again.
* Mark the slot write-busy, and clear the dirtybit. After this point, a
* transaction status update on this page will mark it dirty again.
*/
shared->page_status[slotno] = SLRU_PAGE_WRITE_IN_PROGRESS;
shared->page_dirty[slotno] = false;
......
......@@ -22,7 +22,7 @@
* Portions Copyright (c) 1996-2005, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
* $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.12 2005/11/05 21:19:47 tgl Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/subtrans.c,v 1.13 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -261,8 +261,8 @@ ShutdownSUBTRANS(void)
/*
* Flush dirty SUBTRANS pages to disk
*
* This is not actually necessary from a correctness point of view. We do it
* merely as a debugging aid.
* This is not actually necessary from a correctness point of view. We do
* it merely as a debugging aid.
*/
SimpleLruFlush(SubTransCtl, false);
}
......@@ -276,9 +276,9 @@ CheckPointSUBTRANS(void)
/*
* Flush dirty SUBTRANS pages to disk
*
* This is not actually necessary from a correctness point of view. We do it
* merely to improve the odds that writing of dirty pages is done by the
* checkpoint process and not by backends.
* This is not actually necessary from a correctness point of view. We do
* it merely to improve the odds that writing of dirty pages is done by
* the checkpoint process and not by backends.
*/
SimpleLruFlush(SubTransCtl, true);
}
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.66 2005/10/15 02:49:09 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/transam.c,v 1.67 2005/11/22 18:17:07 momjian Exp $
*
* NOTES
* This file contains the high level access-method interface to the
......@@ -174,9 +174,9 @@ TransactionIdDidCommit(TransactionId transactionId)
* pg_subtrans; instead assume that the parent crashed without cleaning up
* its children.
*
* Originally we Assert'ed that the result of SubTransGetParent was not zero.
* However with the introduction of prepared transactions, there can be a
* window just after database startup where we do not have complete
* Originally we Assert'ed that the result of SubTransGetParent was not
* zero. However with the introduction of prepared transactions, there can
* be a window just after database startup where we do not have complete
* knowledge in pg_subtrans of the transactions after TransactionXmin.
* StartupSUBTRANS() has ensured that any missing information will be
* zeroed. Since this case should not happen under normal conditions, it
......
......@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.16 2005/10/29 00:31:50 petere Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/twophase.c,v 1.17 2005/11/22 18:17:07 momjian Exp $
*
* NOTES
* Each global transaction is associated with a global transaction
......@@ -851,10 +851,10 @@ EndPrepare(GlobalTransaction gxact)
/*
* Create the 2PC state file.
*
* Note: because we use BasicOpenFile(), we are responsible for ensuring the
* FD gets closed in any error exit path. Once we get into the critical
* section, though, it doesn't matter since any failure causes PANIC
* anyway.
* Note: because we use BasicOpenFile(), we are responsible for ensuring
* the FD gets closed in any error exit path. Once we get into the
* critical section, though, it doesn't matter since any failure causes
* PANIC anyway.
*/
TwoPhaseFilePath(path, xid);
......@@ -911,8 +911,8 @@ EndPrepare(GlobalTransaction gxact)
* The state file isn't valid yet, because we haven't written the correct
* CRC yet. Before we do that, insert entry in WAL and flush it to disk.
*
* Between the time we have written the WAL entry and the time we write out
* the correct state file CRC, we have an inconsistency: the xact is
* Between the time we have written the WAL entry and the time we write
* out the correct state file CRC, we have an inconsistency: the xact is
* prepared according to WAL but not according to our on-disk state. We
* use a critical section to force a PANIC if we are unable to complete
* the write --- then, WAL replay should repair the inconsistency. The
......@@ -1344,11 +1344,11 @@ CheckPointTwoPhase(XLogRecPtr redo_horizon)
* it just long enough to make a list of the XIDs that require fsyncing,
* and then do the I/O afterwards.
*
* This approach creates a race condition: someone else could delete a GXACT
* between the time we release TwoPhaseStateLock and the time we try to
* open its state file. We handle this by special-casing ENOENT failures:
* if we see that, we verify that the GXACT is no longer valid, and if so
* ignore the failure.
* This approach creates a race condition: someone else could delete a
* GXACT between the time we release TwoPhaseStateLock and the time we try
* to open its state file. We handle this by special-casing ENOENT
* failures: if we see that, we verify that the GXACT is no longer valid,
* and if so ignore the failure.
*/
if (max_prepared_xacts <= 0)
return; /* nothing to do */
......
......@@ -6,7 +6,7 @@
* Copyright (c) 2000-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.68 2005/10/29 00:31:50 petere Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/varsup.c,v 1.69 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -56,8 +56,8 @@ GetNewTransactionId(bool isSubXact)
* (which gives an escape hatch to the DBA who ignored all those
* warnings).
*
* Test is coded to fall out as fast as possible during normal operation, ie,
* when the warn limit is set and we haven't violated it.
* Test is coded to fall out as fast as possible during normal operation,
* ie, when the warn limit is set and we haven't violated it.
*/
if (TransactionIdFollowsOrEquals(xid, ShmemVariableCache->xidWarnLimit) &&
TransactionIdIsValid(ShmemVariableCache->xidWarnLimit))
......@@ -268,8 +268,8 @@ GetNewObjectId(void)
* right after a wrap occurs, so as to avoid a possibly large number of
* iterations in GetNewOid.) Note we are relying on unsigned comparison.
*
* During initdb, we start the OID generator at FirstBootstrapObjectId, so we
* only enforce wrapping to that point when in bootstrap or standalone
* During initdb, we start the OID generator at FirstBootstrapObjectId, so
* we only enforce wrapping to that point when in bootstrap or standalone
* mode. The first time through this routine after normal postmaster
* start, the counter will be forced up to FirstNormalObjectId. This
* mechanism leaves the OIDs between FirstBootstrapObjectId and
......
......@@ -10,7 +10,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.215 2005/10/15 02:49:09 momjian Exp $
* $PostgreSQL: pgsql/src/backend/access/transam/xact.c,v 1.216 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -750,8 +750,8 @@ RecordTransactionCommit(void)
* XLOG record generated by nextval will hit the disk before we report
* the transaction committed.
*
* Note: if we generated a commit record above, MyXactMadeXLogEntry will
* certainly be set now.
* Note: if we generated a commit record above, MyXactMadeXLogEntry
* will certainly be set now.
*/
if (MyXactMadeXLogEntry)
{
......@@ -762,8 +762,8 @@ RecordTransactionCommit(void)
* because on most Unixen, the minimum select() delay is 10msec or
* more, which is way too long.)
*
* We do not sleep if enableFsync is not turned on, nor if there are
* fewer than CommitSiblings other backends with active
* We do not sleep if enableFsync is not turned on, nor if there
* are fewer than CommitSiblings other backends with active
* transactions.
*/
if (CommitDelay > 0 && enableFsync &&
......@@ -993,10 +993,10 @@ RecordTransactionAbort(void)
* nowhere in permanent storage, so no one else will ever care if it
* committed.)
*
* We do not flush XLOG to disk unless deleting files, since the default
* assumption after a crash would be that we aborted, anyway. For the
* same reason, we don't need to worry about interlocking against
* checkpoint start.
* We do not flush XLOG to disk unless deleting files, since the
* default assumption after a crash would be that we aborted, anyway.
* For the same reason, we don't need to worry about interlocking
* against checkpoint start.
*/
if (MyLastRecPtr.xrecoff != 0 || nrels > 0)
{
......@@ -1042,8 +1042,8 @@ RecordTransactionAbort(void)
* Mark the transaction aborted in clog. This is not absolutely
* necessary but we may as well do it while we are here.
*
* The ordering here isn't critical but it seems best to mark the parent
* first. This assures an atomic transition of all the
* The ordering here isn't critical but it seems best to mark the
* parent first. This assures an atomic transition of all the
* subtransactions to aborted state from the point of view of
* concurrent TransactionIdDidAbort calls.
*/
......@@ -1520,11 +1520,11 @@ CommitTransaction(void)
* it's too late to abort the transaction. This should be just
* noncritical resource releasing.
*
* The ordering of operations is not entirely random. The idea is: release
* resources visible to other backends (eg, files, buffer pins); then
* release locks; then release backend-local resources. We want to release
* locks at the point where any backend waiting for us will see our
* transaction as being fully cleaned up.
* The ordering of operations is not entirely random. The idea is:
* release resources visible to other backends (eg, files, buffer pins);
* then release locks; then release backend-local resources. We want to
* release locks at the point where any backend waiting for us will see
* our transaction as being fully cleaned up.
*
* Resources that can be associated with individual queries are handled by
* the ResourceOwner mechanism. The other calls here are for backend-wide
......@@ -1630,9 +1630,9 @@ PrepareTransaction(void)
* Do pre-commit processing (most of this stuff requires database access,
* and in fact could still cause an error...)
*
* It is possible for PrepareHoldablePortals to invoke functions that queue
* deferred triggers, and it's also possible that triggers create holdable
* cursors. So we have to loop until there's nothing left to do.
* It is possible for PrepareHoldablePortals to invoke functions that
* queue deferred triggers, and it's also possible that triggers create
* holdable cursors. So we have to loop until there's nothing left to do.
*/
for (;;)
{
......@@ -1715,9 +1715,9 @@ PrepareTransaction(void)
/*
* Here is where we really truly prepare.
*
* We have to record transaction prepares even if we didn't make any updates,
* because the transaction manager might get confused if we lose a global
* transaction.
* We have to record transaction prepares even if we didn't make any
* updates, because the transaction manager might get confused if we lose
* a global transaction.
*/
EndPrepare(gxact);
......@@ -1868,10 +1868,11 @@ AbortTransaction(void)
* s->currentUser, since it may not be set yet; instead rely on internal
* state of miscinit.c.
*
* (Note: it is not necessary to restore session authorization here because
* that can only be changed via GUC, and GUC will take care of rolling it
* back if need be. However, an error within a SECURITY DEFINER function
* could send control here with the wrong current userid.)
* (Note: it is not necessary to restore session authorization here
* because that can only be changed via GUC, and GUC will take care of
* rolling it back if need be. However, an error within a SECURITY
* DEFINER function could send control here with the wrong current
* userid.)
*/
AtAbort_UserId();
......@@ -2353,8 +2354,8 @@ AbortCurrentTransaction(void)
/*
* Here, we are already in an aborted transaction state and are
* waiting for a ROLLBACK, but for some reason we failed again!
* So we just remain in the abort state.
* waiting for a ROLLBACK, but for some reason we failed again! So
* we just remain in the abort state.
*/
case TBLOCK_ABORT:
case TBLOCK_SUBABORT:
......
This diff is collapsed.
......@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.208 2005/10/20 20:05:44 tgl Exp $
* $PostgreSQL: pgsql/src/backend/bootstrap/bootstrap.c,v 1.209 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -466,8 +466,8 @@ BootstrapMain(int argc, char *argv[])
/*
* Process bootstrap input.
*
* the sed script boot.sed renamed yyparse to Int_yyparse for the bootstrap
* parser to avoid conflicts with the normal SQL parser
* the sed script boot.sed renamed yyparse to Int_yyparse for the
* bootstrap parser to avoid conflicts with the normal SQL parser
*/
Int_yyparse();
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.121 2005/11/21 12:49:30 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/catalog/aclchk.c,v 1.122 2005/11/22 18:17:07 momjian Exp $
*
* NOTES
* See acl.h.
......@@ -113,7 +113,7 @@ merge_acl_with_grant(Acl *old_acl, bool is_grant,
AclItem aclitem;
Acl *newer_acl;
aclitem.ai_grantee = lfirst_oid(j);
aclitem. ai_grantee = lfirst_oid(j);
/*
* Grant options can only be granted to individual roles, not PUBLIC.
......@@ -172,8 +172,8 @@ ExecuteGrantStmt(GrantStmt *stmt)
* Convert the PrivGrantee list into an Oid list. Note that at this point
* we insert an ACL_ID_PUBLIC into the list if an empty role name is
* detected (which is what the grammar uses if PUBLIC is found), so
* downstream there shouldn't be any additional work needed to support this
* case.
* downstream there shouldn't be any additional work needed to support
* this case.
*/
foreach(cell, stmt->grantees)
{
......@@ -336,8 +336,8 @@ objectNamesToOids(GrantObjectType objtype, List *objnames)
relation = heap_open(DatabaseRelationId, AccessShareLock);
/*
* There's no syscache for pg_database, so we must
* look the hard way.
* There's no syscache for pg_database, so we must look the
* hard way.
*/
ScanKeyInit(&entry[0],
Anum_pg_database_datname,
......@@ -387,7 +387,7 @@ objectNamesToOids(GrantObjectType objtype, List *objnames)
}
break;
case ACL_OBJECT_NAMESPACE:
foreach (cell, objnames)
foreach(cell, objnames)
{
char *nspname = strVal(lfirst(cell));
HeapTuple tuple;
......@@ -406,7 +406,7 @@ objectNamesToOids(GrantObjectType objtype, List *objnames)
}
break;
case ACL_OBJECT_TABLESPACE:
foreach (cell, objnames)
foreach(cell, objnames)
{
char *spcname = strVal(lfirst(cell));
ScanKeyData entry[1];
......@@ -456,7 +456,7 @@ ExecGrant_Relation(bool is_grant, List *objects, bool all_privs,
relation = heap_open(RelationRelationId, RowExclusiveLock);
foreach (cell, objects)
foreach(cell, objects)
{
Oid relOid = lfirst_oid(cell);
Datum aclDatum;
......@@ -498,6 +498,7 @@ ExecGrant_Relation(bool is_grant, List *objects, bool all_privs,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
errmsg("\"%s\" is a composite type",
NameStr(pg_class_tuple->relname))));
/*
* Get owner ID and working copy of existing ACL. If there's no ACL,
* substitute the proper default.
......@@ -622,7 +623,7 @@ ExecGrant_Database(bool is_grant, List *objects, bool all_privs,
relation = heap_open(DatabaseRelationId, RowExclusiveLock);
foreach (cell, objects)
foreach(cell, objects)
{
Oid datId = lfirst_oid(cell);
Form_pg_database pg_database_tuple;
......@@ -786,7 +787,7 @@ ExecGrant_Function(bool is_grant, List *objects, bool all_privs,
relation = heap_open(ProcedureRelationId, RowExclusiveLock);
foreach (cell, objects)
foreach(cell, objects)
{
Oid funcId = lfirst_oid(cell);
Form_pg_proc pg_proc_tuple;
......@@ -941,7 +942,7 @@ ExecGrant_Language(bool is_grant, List *objects, bool all_privs,
relation = heap_open(LanguageRelationId, RowExclusiveLock);
foreach (cell, objects)
foreach(cell, objects)
{
Oid langid = lfirst_oid(cell);
Form_pg_language pg_language_tuple;
......@@ -982,8 +983,8 @@ ExecGrant_Language(bool is_grant, List *objects, bool all_privs,
* Get owner ID and working copy of existing ACL. If there's no ACL,
* substitute the proper default.
*
* Note: for now, languages are treated as owned by the bootstrap user.
* We should add an owner column to pg_language instead.
* Note: for now, languages are treated as owned by the bootstrap
* user. We should add an owner column to pg_language instead.
*/
ownerId = BOOTSTRAP_SUPERUSERID;
aclDatum = SysCacheGetAttr(LANGNAME, tuple, Anum_pg_language_lanacl,
......@@ -1887,8 +1888,8 @@ pg_namespace_aclmask(Oid nsp_oid, Oid roleid,
* the namespace. If we don't have CREATE TEMP, act as though we have
* only USAGE (and not CREATE) rights.
*
* This may seem redundant given the check in InitTempTableNamespace, but it
* really isn't since current user ID may have changed since then. The
* This may seem redundant given the check in InitTempTableNamespace, but
* it really isn't since current user ID may have changed since then. The
* upshot of this behavior is that a SECURITY DEFINER function can create
* temp tables that can then be accessed (if permission is granted) by
* code in the same session that doesn't have permissions to create temp
......
......@@ -8,7 +8,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.47 2005/10/15 02:49:12 momjian Exp $
* $PostgreSQL: pgsql/src/backend/catalog/dependency.c,v 1.48 2005/11/22 18:17:07 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -276,8 +276,8 @@ findAutoDeletableObjects(const ObjectAddress *object,
* that depend on it. For each one that is AUTO or INTERNAL, visit the
* referencing object.
*
* When dropping a whole object (subId = 0), find pg_depend records for its
* sub-objects too.
* When dropping a whole object (subId = 0), find pg_depend records for
* its sub-objects too.
*/
ScanKeyInit(&key[0],
Anum_pg_depend_refclassid,
......@@ -411,8 +411,8 @@ recursiveDeletion(const ObjectAddress *object,
* avoid infinite recursion in the case of cycles. Also, some dependency
* types require extra processing here.
*
* When dropping a whole object (subId = 0), remove all pg_depend records for
* its sub-objects too.
* When dropping a whole object (subId = 0), remove all pg_depend records
* for its sub-objects too.
*/
ScanKeyInit(&key[0],
Anum_pg_depend_classid,
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.292 2005/10/18 01:06:23 tgl Exp $
* $PostgreSQL: pgsql/src/backend/catalog/heap.c,v 1.293 2005/11/22 18:17:08 momjian Exp $
*
*
* INTERFACE ROUTINES
......@@ -697,8 +697,8 @@ heap_create_with_catalog(const char *relname,
/*
* Allocate an OID for the relation, unless we were told what to use.
*
* The OID will be the relfilenode as well, so make sure it doesn't collide
* with either pg_class OIDs or existing physical files.
* The OID will be the relfilenode as well, so make sure it doesn't
* collide with either pg_class OIDs or existing physical files.
*/
if (!OidIsValid(relid))
relid = GetNewRelFileNode(reltablespace, shared_relation,
......@@ -724,8 +724,8 @@ heap_create_with_catalog(const char *relname,
* since defining a relation also defines a complex type, we add a new
* system type corresponding to the new relation.
*
* NOTE: we could get a unique-index failure here, in case the same name has
* already been used for a type.
* NOTE: we could get a unique-index failure here, in case the same name
* has already been used for a type.
*/
new_type_oid = AddNewRelationType(relname,
relnamespace,
......@@ -778,9 +778,9 @@ heap_create_with_catalog(const char *relname,
/*
* store constraints and defaults passed in the tupdesc, if any.
*
* NB: this may do a CommandCounterIncrement and rebuild the relcache entry,
* so the relation must be valid and self-consistent at this point. In
* particular, there are not yet constraints and defaults anywhere.
* NB: this may do a CommandCounterIncrement and rebuild the relcache
* entry, so the relation must be valid and self-consistent at this point.
* In particular, there are not yet constraints and defaults anywhere.
*/
StoreConstraints(new_rel_desc, tupdesc);
......@@ -1329,8 +1329,9 @@ StoreRelCheck(Relation rel, char *ccname, char *ccbin)
/*
* Find columns of rel that are used in ccbin
*
* NB: pull_var_clause is okay here only because we don't allow subselects in
* check constraints; it would fail to examine the contents of subselects.
* NB: pull_var_clause is okay here only because we don't allow subselects
* in check constraints; it would fail to examine the contents of
* subselects.
*/
varList = pull_var_clause(expr, false);
keycount = list_length(varList);
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.261 2005/10/15 02:49:12 momjian Exp $
* $PostgreSQL: pgsql/src/backend/catalog/index.c,v 1.262 2005/11/22 18:17:08 momjian Exp $
*
*
* INTERFACE ROUTINES
......@@ -524,8 +524,8 @@ index_create(Oid heapRelationId,
/*
* Allocate an OID for the index, unless we were told what to use.
*
* The OID will be the relfilenode as well, so make sure it doesn't collide
* with either pg_class OIDs or existing physical files.
* The OID will be the relfilenode as well, so make sure it doesn't
* collide with either pg_class OIDs or existing physical files.
*/
if (!OidIsValid(indexRelationId))
indexRelationId = GetNewRelFileNode(tableSpaceId, shared_relation,
......@@ -600,16 +600,16 @@ index_create(Oid heapRelationId,
/*
* Register constraint and dependencies for the index.
*
* If the index is from a CONSTRAINT clause, construct a pg_constraint entry.
* The index is then linked to the constraint, which in turn is linked to
* the table. If it's not a CONSTRAINT, make the dependency directly on
* the table.
* If the index is from a CONSTRAINT clause, construct a pg_constraint
* entry. The index is then linked to the constraint, which in turn is
* linked to the table. If it's not a CONSTRAINT, make the dependency
* directly on the table.
*
* We don't need a dependency on the namespace, because there'll be an
* indirect dependency via our parent table.
*
* During bootstrap we can't register any dependencies, and we don't try to
* make a constraint either.
* During bootstrap we can't register any dependencies, and we don't try
* to make a constraint either.
*/
if (!IsBootstrapProcessingMode())
{
......@@ -737,8 +737,8 @@ index_create(Oid heapRelationId,
* delayed till later (ALTER TABLE can save work in some cases with this).
* Otherwise, we call the AM routine that constructs the index.
*
* In normal processing mode, the heap and index relations are closed, but we
* continue to hold the ShareLock on the heap and the exclusive lock on
* In normal processing mode, the heap and index relations are closed, but
* we continue to hold the ShareLock on the heap and the exclusive lock on
* the index that we acquired above, until end of transaction.
*/
if (IsBootstrapProcessingMode())
......@@ -1243,8 +1243,8 @@ UpdateStats(Oid relid, double reltuples)
* tuple in-place. (Note: as of PG 8.0 this isn't called during
* bootstrap, but leave the code here for possible future use.)
*
* We also must cheat if reindexing pg_class itself, because the target index
* may presently not be part of the set of indexes that
* We also must cheat if reindexing pg_class itself, because the target
* index may presently not be part of the set of indexes that
* CatalogUpdateIndexes would update (see reindex_relation). In this case
* the stats updates will not be WAL-logged and so could be lost in a
* crash. This seems OK considering VACUUM does the same thing.
......@@ -1745,9 +1745,10 @@ reindex_relation(Oid relid, bool toast_too)
* entry for its own pg_class row because we do setNewRelfilenode() before
* we do index_build().
*
* Note that we also clear pg_class's rd_oidindex until the loop is done, so
* that that index can't be accessed either. This means we cannot safely
* generate new relation OIDs while in the loop; shouldn't be a problem.
* Note that we also clear pg_class's rd_oidindex until the loop is done,
* so that that index can't be accessed either. This means we cannot
* safely generate new relation OIDs while in the loop; shouldn't be a
* problem.
*/
is_pg_class = (RelationGetRelid(rel) == RelationRelationId);
doneIndexes = NIL;
......
......@@ -13,7 +13,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.79 2005/10/15 02:49:14 momjian Exp $
* $PostgreSQL: pgsql/src/backend/catalog/namespace.c,v 1.80 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -958,10 +958,11 @@ OpclassGetCandidates(Oid amid)
* something we already accepted? If so, keep only the one that
* appears earlier in the search path.
*
* If we have an ordered list from SearchSysCacheList (the normal case),
* then any conflicting opclass must immediately adjoin this one in
* the list, so we only need to look at the newest result item. If we
* have an unordered list, we have to scan the whole result list.
* If we have an ordered list from SearchSysCacheList (the normal
* case), then any conflicting opclass must immediately adjoin this
* one in the list, so we only need to look at the newest result item.
* If we have an unordered list, we have to scan the whole result
* list.
*/
if (resultList)
{
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.76 2005/10/15 02:49:14 momjian Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_aggregate.c,v 1.77 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -104,10 +104,10 @@ AggregateCreate(const char *aggName,
* enforce_generic_type_consistency, if transtype isn't polymorphic) must
* exactly match declared transtype.
*
* In the non-polymorphic-transtype case, it might be okay to allow a rettype
* that's binary-coercible to transtype, but I'm not quite convinced that
* it's either safe or useful. When transtype is polymorphic we *must*
* demand exact equality.
* In the non-polymorphic-transtype case, it might be okay to allow a
* rettype that's binary-coercible to transtype, but I'm not quite
* convinced that it's either safe or useful. When transtype is
* polymorphic we *must* demand exact equality.
*/
if (rettype != aggTransType)
ereport(ERROR,
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_constraint.c,v 1.27 2005/10/15 02:49:14 momjian Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_constraint.c,v 1.28 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -497,8 +497,8 @@ RemoveConstraintById(Oid conId)
/*
* XXX for now, do nothing special when dropping a domain constraint
*
* Probably there should be some form of locking on the domain type, but
* we have no such concept at the moment.
* Probably there should be some form of locking on the domain type,
* but we have no such concept at the moment.
*/
}
else
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_depend.c,v 1.16 2005/11/21 12:49:30 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_depend.c,v 1.17 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.136 2005/11/17 22:14:51 tgl Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_proc.c,v 1.137 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -584,8 +584,8 @@ fmgr_sql_validator(PG_FUNCTION_ARGS)
* expression results will be unresolvable. The check will be done at
* runtime instead.
*
* We can run the text through the raw parser though; this will at least
* catch silly syntactic errors.
* We can run the text through the raw parser though; this will at
* least catch silly syntactic errors.
*/
if (!haspolyarg)
{
......@@ -654,8 +654,8 @@ function_parse_error_transpose(const char *prosrc)
* Nothing to do unless we are dealing with a syntax error that has a
* cursor position.
*
* Some PLs may prefer to report the error position as an internal error to
* begin with, so check that too.
* Some PLs may prefer to report the error position as an internal error
* to begin with, so check that too.
*/
origerrposition = geterrposition();
if (origerrposition <= 0)
......@@ -770,8 +770,8 @@ match_prosrc_to_literal(const char *prosrc, const char *literal,
* string literal. It does not handle the SQL syntax for literals
* continued across line boundaries.
*
* We do the comparison a character at a time, not a byte at a time, so that
* we can do the correct cursorpos math.
* We do the comparison a character at a time, not a byte at a time, so
* that we can do the correct cursorpos math.
*/
while (*prosrc)
{
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.4 2005/11/21 12:49:30 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/catalog/pg_shdepend.c,v 1.5 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -415,8 +415,8 @@ updateAclDependencies(Oid classId, Oid objectId, Oid ownerId, bool isGrant,
/*
* Skip the owner: he has an OWNER shdep entry instead. (This is
* not just a space optimization; it makes ALTER OWNER easier.
* See notes in changeDependencyOnOwner.)
* not just a space optimization; it makes ALTER OWNER easier. See
* notes in changeDependencyOnOwner.)
*/
if (roleid == ownerId)
continue;
......@@ -585,8 +585,8 @@ checkSharedDependencies(Oid classId, Oid objectId)
/*
* Report seems unreasonably long, so reduce it to per-database info
*
* Note: we don't ever suppress per-database totals, which should be OK
* as long as there aren't too many databases ...
* Note: we don't ever suppress per-database totals, which should be
* OK as long as there aren't too many databases ...
*/
descs.len = 0; /* reset to empty */
descs.data[0] = '\0';
......@@ -1163,6 +1163,7 @@ shdepDropOwned(List *roleids, DropBehavior behavior)
false, DROP_CASCADE);
break;
case SHARED_DEPENDENCY_OWNER:
/*
* If there's a regular (non-shared) dependency on this
* object marked with DEPENDENCY_INTERNAL, skip this
......@@ -1221,6 +1222,7 @@ shdepReassignOwned(List *roleids, Oid newrole)
errmsg("cannot drop objects owned by %s because they are "
"required by the database system",
getObjectDescription(&obj))));
/*
* There's no need to tell the whole truth, which is that we
* didn't track these dependencies at all ...
......@@ -1256,9 +1258,9 @@ shdepReassignOwned(List *roleids, Oid newrole)
continue;
/*
* If there's a regular (non-shared) dependency on this
* object marked with DEPENDENCY_INTERNAL, skip this
* object. We will alter the referencer object instead.
* If there's a regular (non-shared) dependency on this object
* marked with DEPENDENCY_INTERNAL, skip this object. We will
* alter the referencer object instead.
*/
if (objectIsInternalDependency(sdepForm->classid, sdepForm->objid))
continue;
......
......@@ -9,7 +9,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.30 2005/10/15 02:49:14 momjian Exp $
* $PostgreSQL: pgsql/src/backend/commands/aggregatecmds.c,v 1.31 2005/11/22 18:17:08 momjian Exp $
*
* DESCRIPTION
* The "DefineFoo" routines take the parse tree and pick out the
......@@ -119,8 +119,8 @@ DefineAggregate(List *names, List *parameters)
/*
* look up the aggregate's base type (input datatype) and transtype.
*
* We have historically allowed the command to look like basetype = 'ANY' so
* we must do a case-insensitive comparison for the name ANY. Ugh.
* We have historically allowed the command to look like basetype = 'ANY'
* so we must do a case-insensitive comparison for the name ANY. Ugh.
*
* basetype can be a pseudo-type, but transtype can't, since we need to be
* able to store values of the transtype. However, we can allow
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.89 2005/10/15 02:49:15 momjian Exp $
* $PostgreSQL: pgsql/src/backend/commands/analyze.c,v 1.90 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -891,9 +891,9 @@ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
* If we didn't find as many tuples as we wanted then we're done. No sort
* is needed, since they're already in order.
*
* Otherwise we need to sort the collected tuples by position (itempointer).
* It's not worth worrying about corner cases where the tuples are already
* sorted.
* Otherwise we need to sort the collected tuples by position
* (itempointer). It's not worth worrying about corner cases where the
* tuples are already sorted.
*/
if (numrows == targrows)
qsort((void *) rows, numrows, sizeof(HeapTuple), compare_rows);
......@@ -1849,9 +1849,9 @@ compute_scalar_stats(VacAttrStatsP stats,
* Now scan the values in order, find the most common ones, and also
* accumulate ordering-correlation statistics.
*
* To determine which are most common, we first have to count the number
* of duplicates of each value. The duplicates are adjacent in the
* sorted list, so a brute-force approach is to compare successive
* To determine which are most common, we first have to count the
* number of duplicates of each value. The duplicates are adjacent in
* the sorted list, so a brute-force approach is to compare successive
* datum values until we find two that are not equal. However, that
* requires N-1 invocations of the datum comparison routine, which are
* completely redundant with work that was done during the sort. (The
......
......@@ -7,7 +7,7 @@
* Portions Copyright (c) 1994, Regents of the University of California
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.127 2005/11/03 17:11:34 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/commands/async.c,v 1.128 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -820,18 +820,18 @@ EnableNotifyInterrupt(void)
* steps. (A very small time window, perhaps, but Murphy's Law says you
* can hit it...) Instead, we first set the enable flag, then test the
* occurred flag. If we see an unserviced interrupt has occurred, we
* re-clear the enable flag before going off to do the service work.
* (That prevents re-entrant invocation of ProcessIncomingNotify() if
* another interrupt occurs.) If an interrupt comes in between the setting
* and clearing of notifyInterruptEnabled, then it will have done the
* service work and left notifyInterruptOccurred zero, so we have to check
* again after clearing enable. The whole thing has to be in a loop in
* case another interrupt occurs while we're servicing the first. Once we
* get out of the loop, enable is set and we know there is no unserviced
* re-clear the enable flag before going off to do the service work. (That
* prevents re-entrant invocation of ProcessIncomingNotify() if another
* interrupt occurs.) If an interrupt comes in between the setting and
* clearing of notifyInterruptEnabled, then it will have done the service
* work and left notifyInterruptOccurred zero, so we have to check again
* after clearing enable. The whole thing has to be in a loop in case
* another interrupt occurs while we're servicing the first. Once we get
* out of the loop, enable is set and we know there is no unserviced
* interrupt.
*
* NB: an overenthusiastic optimizing compiler could easily break this code.
* Hopefully, they all understand what "volatile" means these days.
* NB: an overenthusiastic optimizing compiler could easily break this
* code. Hopefully, they all understand what "volatile" means these days.
*/
for (;;)
{
......
This diff is collapsed.
......@@ -7,7 +7,7 @@
* Copyright (c) 1996-2005, PostgreSQL Global Development Group
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.84 2005/10/15 02:49:15 momjian Exp $
* $PostgreSQL: pgsql/src/backend/commands/comment.c,v 1.85 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -445,8 +445,8 @@ CommentDatabase(List *qualname, char *comment)
* comment on a database other than the current one. Someday this might be
* improved, but it would take a redesigned infrastructure.
*
* When loading a dump, we may see a COMMENT ON DATABASE for the old name of
* the database. Erroring out would prevent pg_restore from completing
* When loading a dump, we may see a COMMENT ON DATABASE for the old name
* of the database. Erroring out would prevent pg_restore from completing
* (which is really pg_restore's fault, but for now we will work around
* the problem here). Consensus is that the best fix is to treat wrong
* database name as a WARNING not an ERROR.
......
......@@ -8,7 +8,7 @@
*
*
* IDENTIFICATION
* $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.25 2005/11/21 12:49:30 alvherre Exp $
* $PostgreSQL: pgsql/src/backend/commands/conversioncmds.c,v 1.26 2005/11/22 18:17:08 momjian Exp $
*
*-------------------------------------------------------------------------
*/
......@@ -107,7 +107,7 @@ DropConversionCommand(List *name, DropBehavior behavior, bool missing_ok)
conversionOid = FindConversionByName(name);
if (!OidIsValid(conversionOid))
{
if (! missing_ok)
if (!missing_ok)
{
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment