Commit 807b9e0d authored by Bruce Momjian's avatar Bruce Momjian

pgindent run for 9.5

parent 22589255
......@@ -113,12 +113,12 @@ gin_btree_compare_prefix(FunctionCallInfo fcinfo)
cmp;
cmp = DatumGetInt32(DirectFunctionCall2Coll(
data->typecmp,
PG_GET_COLLATION(),
(data->strategy == BTLessStrategyNumber ||
data->strategy == BTLessEqualStrategyNumber)
? data->datum : a,
b));
data->typecmp,
PG_GET_COLLATION(),
(data->strategy == BTLessStrategyNumber ||
data->strategy == BTLessEqualStrategyNumber)
? data->datum : a,
b));
switch (data->strategy)
{
......@@ -186,14 +186,14 @@ Datum \
gin_extract_value_##type(PG_FUNCTION_ARGS) \
{ \
return gin_btree_extract_value(fcinfo, is_varlena); \
} \
} \
PG_FUNCTION_INFO_V1(gin_extract_query_##type); \
Datum \
gin_extract_query_##type(PG_FUNCTION_ARGS) \
{ \
return gin_btree_extract_query(fcinfo, \
is_varlena, leftmostvalue, typecmp); \
} \
} \
PG_FUNCTION_INFO_V1(gin_compare_prefix_##type); \
Datum \
gin_compare_prefix_##type(PG_FUNCTION_ARGS) \
......@@ -209,6 +209,7 @@ leftmostvalue_int2(void)
{
return Int16GetDatum(SHRT_MIN);
}
GIN_SUPPORT(int2, false, leftmostvalue_int2, btint2cmp)
static Datum
......@@ -216,6 +217,7 @@ leftmostvalue_int4(void)
{
return Int32GetDatum(INT_MIN);
}
GIN_SUPPORT(int4, false, leftmostvalue_int4, btint4cmp)
static Datum
......@@ -226,6 +228,7 @@ leftmostvalue_int8(void)
*/
return Int64GetDatum(SEQ_MINVALUE);
}
GIN_SUPPORT(int8, false, leftmostvalue_int8, btint8cmp)
static Datum
......@@ -233,6 +236,7 @@ leftmostvalue_float4(void)
{
return Float4GetDatum(-get_float4_infinity());
}
GIN_SUPPORT(float4, false, leftmostvalue_float4, btfloat4cmp)
static Datum
......@@ -240,6 +244,7 @@ leftmostvalue_float8(void)
{
return Float8GetDatum(-get_float8_infinity());
}
GIN_SUPPORT(float8, false, leftmostvalue_float8, btfloat8cmp)
static Datum
......@@ -250,6 +255,7 @@ leftmostvalue_money(void)
*/
return Int64GetDatum(SEQ_MINVALUE);
}
GIN_SUPPORT(money, false, leftmostvalue_money, cash_cmp)
static Datum
......@@ -257,6 +263,7 @@ leftmostvalue_oid(void)
{
return ObjectIdGetDatum(0);
}
GIN_SUPPORT(oid, false, leftmostvalue_oid, btoidcmp)
static Datum
......@@ -264,6 +271,7 @@ leftmostvalue_timestamp(void)
{
return TimestampGetDatum(DT_NOBEGIN);
}
GIN_SUPPORT(timestamp, false, leftmostvalue_timestamp, timestamp_cmp)
GIN_SUPPORT(timestamptz, false, leftmostvalue_timestamp, timestamp_cmp)
......@@ -273,6 +281,7 @@ leftmostvalue_time(void)
{
return TimeADTGetDatum(0);
}
GIN_SUPPORT(time, false, leftmostvalue_time, time_cmp)
static Datum
......@@ -285,6 +294,7 @@ leftmostvalue_timetz(void)
return TimeTzADTPGetDatum(v);
}
GIN_SUPPORT(timetz, false, leftmostvalue_timetz, timetz_cmp)
static Datum
......@@ -292,6 +302,7 @@ leftmostvalue_date(void)
{
return DateADTGetDatum(DATEVAL_NOBEGIN);
}
GIN_SUPPORT(date, false, leftmostvalue_date, date_cmp)
static Datum
......@@ -304,6 +315,7 @@ leftmostvalue_interval(void)
v->month = 0;
return IntervalPGetDatum(v);
}
GIN_SUPPORT(interval, false, leftmostvalue_interval, interval_cmp)
static Datum
......@@ -313,6 +325,7 @@ leftmostvalue_macaddr(void)
return MacaddrPGetDatum(v);
}
GIN_SUPPORT(macaddr, false, leftmostvalue_macaddr, macaddr_cmp)
static Datum
......@@ -320,6 +333,7 @@ leftmostvalue_inet(void)
{
return DirectFunctionCall1(inet_in, CStringGetDatum("0.0.0.0/0"));
}
GIN_SUPPORT(inet, true, leftmostvalue_inet, network_cmp)
GIN_SUPPORT(cidr, true, leftmostvalue_inet, network_cmp)
......@@ -329,6 +343,7 @@ leftmostvalue_text(void)
{
return PointerGetDatum(cstring_to_text_with_len("", 0));
}
GIN_SUPPORT(text, true, leftmostvalue_text, bttextcmp)
static Datum
......@@ -336,6 +351,7 @@ leftmostvalue_char(void)
{
return CharGetDatum(SCHAR_MIN);
}
GIN_SUPPORT(char, false, leftmostvalue_char, btcharcmp)
GIN_SUPPORT(bytea, true, leftmostvalue_text, byteacmp)
......@@ -348,6 +364,7 @@ leftmostvalue_bit(void)
ObjectIdGetDatum(0),
Int32GetDatum(-1));
}
GIN_SUPPORT(bit, true, leftmostvalue_bit, bitcmp)
static Datum
......@@ -358,6 +375,7 @@ leftmostvalue_varbit(void)
ObjectIdGetDatum(0),
Int32GetDatum(-1));
}
GIN_SUPPORT(varbit, true, leftmostvalue_varbit, bitcmp)
/*
......@@ -402,4 +420,5 @@ leftmostvalue_numeric(void)
{
return PointerGetDatum(NULL);
}
GIN_SUPPORT(numeric, true, leftmostvalue_numeric, gin_numeric_cmp)
......@@ -13,7 +13,7 @@
GISTENTRY *
gbt_num_compress(GISTENTRY *entry, const gbtree_ninfo *tinfo)
{
GISTENTRY *retval;
GISTENTRY *retval;
if (entry->leafkey)
{
......
......@@ -71,7 +71,7 @@ gbt_var_key_readable(const GBT_VARKEY *k)
* Create a leaf-entry to store in the index, from a single Datum.
*/
static GBT_VARKEY *
gbt_var_key_from_datum(const struct varlena *u)
gbt_var_key_from_datum(const struct varlena * u)
{
int32 lowersize = VARSIZE(u);
GBT_VARKEY *r;
......
......@@ -195,7 +195,7 @@ dmetaphone_alt(PG_FUNCTION_ARGS)
* in a case like this.
*/
#define META_FREE(x) ((void)true) /* pfree((x)) */
#define META_FREE(x) ((void)true) /* pfree((x)) */
#else /* not defined DMETAPHONE_MAIN */
/* use the standard malloc library when not running in PostgreSQL */
......
......@@ -72,7 +72,7 @@ typedef struct
static pg_crc32
crc32_sz(char *buf, int size)
{
pg_crc32 crc;
pg_crc32 crc;
INIT_TRADITIONAL_CRC32(crc);
COMP_TRADITIONAL_CRC32(crc, buf, size);
......
......@@ -9,7 +9,7 @@ PG_MODULE_MAGIC;
PG_FUNCTION_INFO_V1(hstore_to_plperl);
Datum hstore_to_plperl(PG_FUNCTION_ARGS);
Datum hstore_to_plperl(PG_FUNCTION_ARGS);
Datum
hstore_to_plperl(PG_FUNCTION_ARGS)
......@@ -26,10 +26,10 @@ hstore_to_plperl(PG_FUNCTION_ARGS)
for (i = 0; i < count; i++)
{
const char *key;
SV *value;
SV *value;
key = pnstrdup(HS_KEY(entries, base, i), HS_KEYLEN(entries, i));
value = HS_VALISNULL(entries, i) ? newSV(0) : cstr2sv(pnstrdup(HS_VAL(entries, base,i), HS_VALLEN(entries, i)));
value = HS_VALISNULL(entries, i) ? newSV(0) : cstr2sv(pnstrdup(HS_VAL(entries, base, i), HS_VALLEN(entries, i)));
(void) hv_store(hv, key, strlen(key), value, 0);
}
......@@ -39,7 +39,7 @@ hstore_to_plperl(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(plperl_to_hstore);
Datum plperl_to_hstore(PG_FUNCTION_ARGS);
Datum plperl_to_hstore(PG_FUNCTION_ARGS);
Datum
plperl_to_hstore(PG_FUNCTION_ARGS)
......@@ -61,8 +61,8 @@ plperl_to_hstore(PG_FUNCTION_ARGS)
i = 0;
while ((he = hv_iternext(hv)))
{
char *key = sv2cstr(HeSVKEY_force(he));
SV *value = HeVAL(he);
char *key = sv2cstr(HeSVKEY_force(he));
SV *value = HeVAL(he);
pairs[i].key = pstrdup(key);
pairs[i].keylen = hstoreCheckKeyLen(strlen(pairs[i].key));
......
......@@ -8,7 +8,7 @@ PG_MODULE_MAGIC;
PG_FUNCTION_INFO_V1(hstore_to_plpython);
Datum hstore_to_plpython(PG_FUNCTION_ARGS);
Datum hstore_to_plpython(PG_FUNCTION_ARGS);
Datum
hstore_to_plpython(PG_FUNCTION_ARGS)
......@@ -31,9 +31,9 @@ hstore_to_plpython(PG_FUNCTION_ARGS)
PyDict_SetItem(dict, key, Py_None);
else
{
PyObject *value;
PyObject *value;
value = PyString_FromStringAndSize(HS_VAL(entries, base,i), HS_VALLEN(entries, i));
value = PyString_FromStringAndSize(HS_VAL(entries, base, i), HS_VALLEN(entries, i));
PyDict_SetItem(dict, key, value);
Py_XDECREF(value);
}
......@@ -45,7 +45,7 @@ hstore_to_plpython(PG_FUNCTION_ARGS)
PG_FUNCTION_INFO_V1(plpython_to_hstore);
Datum plpython_to_hstore(PG_FUNCTION_ARGS);
Datum plpython_to_hstore(PG_FUNCTION_ARGS);
Datum
plpython_to_hstore(PG_FUNCTION_ARGS)
......@@ -75,9 +75,9 @@ plpython_to_hstore(PG_FUNCTION_ARGS)
for (i = 0; i < pcount; i++)
{
PyObject *tuple;
PyObject *key;
PyObject *value;
PyObject *tuple;
PyObject *key;
PyObject *value;
tuple = PyList_GetItem(items, i);
key = PyTuple_GetItem(tuple, 0);
......
......@@ -26,13 +26,14 @@
unsigned int
ltree_crc32_sz(char *buf, int size)
{
pg_crc32 crc;
pg_crc32 crc;
char *p = buf;
INIT_TRADITIONAL_CRC32(crc);
while (size > 0)
{
char c = (char) TOLOWER(*p);
char c = (char) TOLOWER(*p);
COMP_TRADITIONAL_CRC32(crc, &c, 1);
size--;
p++;
......
......@@ -7,7 +7,7 @@ PG_MODULE_MAGIC;
PG_FUNCTION_INFO_V1(ltree_to_plpython);
Datum ltree_to_plpython(PG_FUNCTION_ARGS);
Datum ltree_to_plpython(PG_FUNCTION_ARGS);
Datum
ltree_to_plpython(PG_FUNCTION_ARGS)
......
......@@ -58,7 +58,7 @@ brin_page_type(PG_FUNCTION_ARGS)
{
bytea *raw_page = PG_GETARG_BYTEA_P(0);
Page page = VARDATA(raw_page);
char *type;
char *type;
switch (BrinPageType(page))
{
......@@ -86,8 +86,8 @@ brin_page_type(PG_FUNCTION_ARGS)
static Page
verify_brin_page(bytea *raw_page, uint16 type, const char *strtype)
{
Page page;
int raw_page_size;
Page page;
int raw_page_size;
raw_page_size = VARSIZE(raw_page) - VARHDRSZ;
......@@ -95,7 +95,7 @@ verify_brin_page(bytea *raw_page, uint16 type, const char *strtype)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("input page too small"),
errdetail("Expected size %d, got %d", raw_page_size, BLCKSZ)));
errdetail("Expected size %d, got %d", raw_page_size, BLCKSZ)));
page = VARDATA(raw_page);
......@@ -153,7 +153,7 @@ brin_page_items(PG_FUNCTION_ARGS)
indexRel = index_open(indexRelid, AccessShareLock);
state = palloc(offsetof(brin_page_state, columns) +
sizeof(brin_column_state) * RelationGetDescr(indexRel)->natts);
sizeof(brin_column_state) * RelationGetDescr(indexRel)->natts);
state->bdesc = brin_build_desc(indexRel);
state->page = page;
......@@ -168,10 +168,10 @@ brin_page_items(PG_FUNCTION_ARGS)
*/
for (attno = 1; attno <= state->bdesc->bd_tupdesc->natts; attno++)
{
Oid output;
bool isVarlena;
Oid output;
bool isVarlena;
BrinOpcInfo *opcinfo;
int i;
int i;
brin_column_state *column;
opcinfo = state->bdesc->bd_info[attno - 1];
......@@ -213,7 +213,7 @@ brin_page_items(PG_FUNCTION_ARGS)
*/
if (state->dtup == NULL)
{
BrinTuple *tup;
BrinTuple *tup;
MemoryContext mctx;
ItemId itemId;
......@@ -225,8 +225,8 @@ brin_page_items(PG_FUNCTION_ARGS)
if (ItemIdIsUsed(itemId))
{
tup = (BrinTuple *) PageGetItem(state->page,
PageGetItemId(state->page,
state->offset));
PageGetItemId(state->page,
state->offset));
state->dtup = brin_deform_tuple(state->bdesc, tup);
state->attno = 1;
state->unusedItem = false;
......@@ -253,7 +253,7 @@ brin_page_items(PG_FUNCTION_ARGS)
}
else
{
int att = state->attno - 1;
int att = state->attno - 1;
values[0] = UInt16GetDatum(state->offset);
values[1] = UInt32GetDatum(state->dtup->bt_blkno);
......@@ -263,8 +263,8 @@ brin_page_items(PG_FUNCTION_ARGS)
values[5] = BoolGetDatum(state->dtup->bt_placeholder);
if (!state->dtup->bt_columns[att].bv_allnulls)
{
BrinValues *bvalues = &state->dtup->bt_columns[att];
StringInfoData s;
BrinValues *bvalues = &state->dtup->bt_columns[att];
StringInfoData s;
bool first;
int i;
......@@ -274,7 +274,7 @@ brin_page_items(PG_FUNCTION_ARGS)
first = true;
for (i = 0; i < state->columns[att]->nstored; i++)
{
char *val;
char *val;
if (!first)
appendStringInfoString(&s, " .. ");
......@@ -312,8 +312,8 @@ brin_page_items(PG_FUNCTION_ARGS)
}
/*
* If we're beyond the end of the page, set flag to end the function in
* the following iteration.
* If we're beyond the end of the page, set flag to end the function
* in the following iteration.
*/
if (state->offset > PageGetMaxOffsetNumber(state->page))
state->done = true;
......@@ -366,8 +366,8 @@ brin_revmap_data(PG_FUNCTION_ARGS)
struct
{
ItemPointerData *tids;
int idx;
} *state;
int idx;
} *state;
FuncCallContext *fctx;
if (!superuser())
......
......@@ -167,7 +167,7 @@ typedef struct gin_leafpage_items_state
TupleDesc tupd;
GinPostingList *seg;
GinPostingList *lastseg;
} gin_leafpage_items_state;
} gin_leafpage_items_state;
Datum
gin_leafpage_items(PG_FUNCTION_ARGS)
......
This diff is collapsed.
......@@ -34,6 +34,7 @@ typedef struct
bool isvalid;
bool isdirty;
uint16 usagecount;
/*
* An int32 is sufficiently large, as MAX_BACKENDS prevents a buffer from
* being pinned by too many backends and each backend will only pin once
......
......@@ -138,10 +138,10 @@ typedef struct Counters
{
int64 calls; /* # of times executed */
double total_time; /* total execution time, in msec */
double min_time; /* minimim execution time in msec */
double max_time; /* maximum execution time in msec */
double mean_time; /* mean execution time in msec */
double sum_var_time; /* sum of variances in execution time in msec */
double min_time; /* minimim execution time in msec */
double max_time; /* maximum execution time in msec */
double mean_time; /* mean execution time in msec */
double sum_var_time; /* sum of variances in execution time in msec */
int64 rows; /* total # of retrieved or affected rows */
int64 shared_blks_hit; /* # of shared buffer hits */
int64 shared_blks_read; /* # of shared disk blocks read */
......@@ -1231,10 +1231,10 @@ pgss_store(const char *query, uint32 queryId,
else
{
/*
* Welford's method for accurately computing variance.
* See <http://www.johndcook.com/blog/standard_deviation/>
* Welford's method for accurately computing variance. See
* <http://www.johndcook.com/blog/standard_deviation/>
*/
double old_mean = e->counters.mean_time;
double old_mean = e->counters.mean_time;
e->counters.mean_time +=
(total_time - old_mean) / e->counters.calls;
......@@ -1572,10 +1572,11 @@ pg_stat_statements_internal(FunctionCallInfo fcinfo,
values[i++] = Float8GetDatumFast(tmp.min_time);
values[i++] = Float8GetDatumFast(tmp.max_time);
values[i++] = Float8GetDatumFast(tmp.mean_time);
/*
* Note we are calculating the population variance here, not the
* sample variance, as we have data for the whole population,
* so Bessel's correction is not used, and we don't divide by
* sample variance, as we have data for the whole population, so
* Bessel's correction is not used, and we don't divide by
* tmp.calls - 1.
*/
if (tmp.calls > 1)
......@@ -2687,16 +2688,16 @@ JumbleExpr(pgssJumbleState *jstate, Node *node)
break;
case T_OnConflictExpr:
{
OnConflictExpr *conf = (OnConflictExpr *) node;
OnConflictExpr *conf = (OnConflictExpr *) node;
APP_JUMB(conf->action);
JumbleExpr(jstate, (Node *) conf->arbiterElems);
JumbleExpr(jstate, conf->arbiterWhere);
JumbleExpr(jstate, (Node *) conf->onConflictSet);
JumbleExpr(jstate, (Node *) conf->onConflictSet);
JumbleExpr(jstate, conf->onConflictWhere);
APP_JUMB(conf->constraint);
APP_JUMB(conf->exclRelIndex);
JumbleExpr(jstate, (Node *) conf->exclRelTlist);
JumbleExpr(jstate, (Node *) conf->exclRelTlist);
}
break;
case T_List:
......
......@@ -399,7 +399,7 @@ pgp_extract_armor_headers(const uint8 *src, unsigned len,
char *line;
char *nextline;
char *eol,
*colon;
*colon;
int hlen;
char *buf;
int hdrlines;
......
......@@ -259,6 +259,7 @@ set_arg(PGP_Context *ctx, char *key, char *val,
res = pgp_set_convert_crlf(ctx, atoi(val));
else if (strcmp(key, "unicode-mode") == 0)
res = pgp_set_unicode_mode(ctx, atoi(val));
/*
* The remaining options are for debugging/testing and are therefore not
* documented in the user-facing docs.
......@@ -834,22 +835,22 @@ static int
parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
char ***p_keys, char ***p_values)
{
int nkdims = ARR_NDIM(key_array);
int nvdims = ARR_NDIM(val_array);
char **keys,
**values;
Datum *key_datums,
*val_datums;
bool *key_nulls,
*val_nulls;
int key_count,
val_count;
int i;
int nkdims = ARR_NDIM(key_array);
int nvdims = ARR_NDIM(val_array);
char **keys,
**values;
Datum *key_datums,
*val_datums;
bool *key_nulls,
*val_nulls;
int key_count,
val_count;
int i;
if (nkdims > 1 || nkdims != nvdims)
ereport(ERROR,
(errcode(ERRCODE_ARRAY_SUBSCRIPT_ERROR),
errmsg("wrong number of array subscripts")));
errmsg("wrong number of array subscripts")));
if (nkdims == 0)
return 0;
......@@ -871,7 +872,7 @@ parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
for (i = 0; i < key_count; i++)
{
char *v;
char *v;
/* Check that the key doesn't contain anything funny */
if (key_nulls[i])
......@@ -884,7 +885,7 @@ parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
if (!string_is_ascii(v))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("header key must not contain non-ASCII characters")));
errmsg("header key must not contain non-ASCII characters")));
if (strstr(v, ": "))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
......@@ -906,7 +907,7 @@ parse_key_value_arrays(ArrayType *key_array, ArrayType *val_array,
if (!string_is_ascii(v))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("header value must not contain non-ASCII characters")));
errmsg("header value must not contain non-ASCII characters")));
if (strchr(v, '\n'))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
......@@ -1045,7 +1046,7 @@ pgp_armor_headers(PG_FUNCTION_ARGS)
SRF_RETURN_DONE(funcctx);
else
{
char *values[2];
char *values[2];
/* we assume that the keys (and values) are in UTF-8. */
utf8key = state->keys[funcctx->call_cntr];
......
......@@ -278,11 +278,11 @@ void pgp_cfb_free(PGP_CFB *ctx);
int pgp_cfb_encrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
int pgp_cfb_decrypt(PGP_CFB *ctx, const uint8 *data, int len, uint8 *dst);
void pgp_armor_encode(const uint8 *src, unsigned len, StringInfo dst,
int num_headers, char **keys, char **values);
void pgp_armor_encode(const uint8 *src, unsigned len, StringInfo dst,
int num_headers, char **keys, char **values);
int pgp_armor_decode(const uint8 *src, int len, StringInfo dst);
int pgp_extract_armor_headers(const uint8 *src, unsigned len,
int *nheaders, char ***keys, char ***values);
int pgp_extract_armor_headers(const uint8 *src, unsigned len,
int *nheaders, char ***keys, char ***values);
int pgp_compress_filter(PushFilter **res, PGP_Context *ctx, PushFilter *dst);
int pgp_decompress_filter(PullFilter **res, PGP_Context *ctx, PullFilter *src);
......
......@@ -84,8 +84,8 @@ statapprox_heap(Relation rel, output_type *stat)
CHECK_FOR_INTERRUPTS();
/*
* If the page has only visible tuples, then we can find out the
* free space from the FSM and move on.
* If the page has only visible tuples, then we can find out the free
* space from the FSM and move on.
*/
if (visibilitymap_test(rel, blkno, &vmbuffer))
{
......@@ -103,8 +103,8 @@ statapprox_heap(Relation rel, output_type *stat)
page = BufferGetPage(buf);
/*
* It's not safe to call PageGetHeapFreeSpace() on new pages, so
* we treat them as being free space for our purposes.
* It's not safe to call PageGetHeapFreeSpace() on new pages, so we
* treat them as being free space for our purposes.
*/
if (!PageIsNew(page))
stat->free_space += PageGetHeapFreeSpace(page);
......@@ -120,9 +120,9 @@ statapprox_heap(Relation rel, output_type *stat)
scanned++;
/*
* Look at each tuple on the page and decide whether it's live
* or dead, then count it and its size. Unlike lazy_scan_heap,
* we can afford to ignore problems and special cases.
* Look at each tuple on the page and decide whether it's live or
* dead, then count it and its size. Unlike lazy_scan_heap, we can
* afford to ignore problems and special cases.
*/
maxoff = PageGetMaxOffsetNumber(page);
......@@ -179,9 +179,10 @@ statapprox_heap(Relation rel, output_type *stat)
UnlockReleaseBuffer(buf);
}
stat->table_len = (uint64) nblocks * BLCKSZ;
stat->table_len = (uint64) nblocks *BLCKSZ;
stat->tuple_count = vac_estimate_reltuples(rel, false, nblocks, scanned,
stat->tuple_count+misc_count);
stat->tuple_count + misc_count);
/*
* Calculate percentages if the relation has one or more pages.
......@@ -240,9 +241,9 @@ pgstattuple_approx(PG_FUNCTION_ARGS)
errmsg("cannot access temporary tables of other sessions")));
/*
* We support only ordinary relations and materialised views,
* because we depend on the visibility map and free space map
* for our estimates about unscanned pages.
* We support only ordinary relations and materialised views, because we
* depend on the visibility map and free space map for our estimates about
* unscanned pages.
*/
if (!(rel->rd_rel->relkind == RELKIND_RELATION ||
rel->rd_rel->relkind == RELKIND_MATVIEW))
......@@ -268,6 +269,6 @@ pgstattuple_approx(PG_FUNCTION_ARGS)
values[i++] = Int64GetDatum(stat.free_space);
values[i++] = Float8GetDatum(stat.free_percent);
ret = heap_form_tuple(tupdesc, values, nulls);
ret = heap_form_tuple(tupdesc, values, nulls);
return HeapTupleGetDatum(ret);
}
......@@ -203,7 +203,7 @@ typedef struct PgFdwAnalyzeState
/* for random sampling */
double samplerows; /* # of rows fetched */
double rowstoskip; /* # of rows to skip before next sample */
ReservoirStateData rstate; /* state for reservoir sampling*/
ReservoirStateData rstate; /* state for reservoir sampling */
/* working memory contexts */
MemoryContext anl_cxt; /* context for per-analyze lifespan data */
......
......@@ -53,16 +53,16 @@ static void pg_decode_shutdown(LogicalDecodingContext *ctx);
static void pg_decode_begin_txn(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn);
static void pg_output_begin(LogicalDecodingContext *ctx,
TestDecodingData *data,
ReorderBufferTXN *txn,
bool last_write);
TestDecodingData *data,
ReorderBufferTXN *txn,
bool last_write);
static void pg_decode_commit_txn(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn, XLogRecPtr commit_lsn);
static void pg_decode_change(LogicalDecodingContext *ctx,
ReorderBufferTXN *txn, Relation rel,
ReorderBufferChange *change);
static bool pg_decode_filter(LogicalDecodingContext *ctx,
RepOriginId origin_id);
RepOriginId origin_id);
void
_PG_init(void)
......
......@@ -33,14 +33,14 @@ PG_MODULE_MAGIC;
typedef struct
{
SamplerRandomState randstate;
uint32 seed; /* random seed */
BlockNumber nblocks; /* number of block in relation */
int32 ntuples; /* number of tuples to return */
int32 donetuples; /* tuples already returned */
OffsetNumber lt; /* last tuple returned from current block */
BlockNumber step; /* step size */
BlockNumber lb; /* last block visited */
BlockNumber doneblocks; /* number of already returned blocks */
uint32 seed; /* random seed */
BlockNumber nblocks; /* number of block in relation */
int32 ntuples; /* number of tuples to return */
int32 donetuples; /* tuples already returned */
OffsetNumber lt; /* last tuple returned from current block */
BlockNumber step; /* step size */
BlockNumber lb; /* last block visited */
BlockNumber doneblocks; /* number of already returned blocks */
} SystemSamplerData;
......@@ -60,11 +60,11 @@ static uint32 random_relative_prime(uint32 n, SamplerRandomState randstate);
Datum
tsm_system_rows_init(PG_FUNCTION_ARGS)
{
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
uint32 seed = PG_GETARG_UINT32(1);
int32 ntuples = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
HeapScanDesc scan = tsdesc->heapScan;
SystemSamplerData *sampler;
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
uint32 seed = PG_GETARG_UINT32(1);
int32 ntuples = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
HeapScanDesc scan = tsdesc->heapScan;
SystemSamplerData *sampler;
if (ntuples < 1)
ereport(ERROR,
......@@ -86,6 +86,7 @@ tsm_system_rows_init(PG_FUNCTION_ARGS)
/* Find relative prime as step size for linear probing. */
sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate);
/*
* Randomize start position so that blocks close to step size don't have
* higher probability of being chosen on very short scan.
......@@ -106,8 +107,8 @@ tsm_system_rows_init(PG_FUNCTION_ARGS)
Datum
tsm_system_rows_nextblock(PG_FUNCTION_ARGS)
{
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
sampler->lb = (sampler->lb + sampler->step) % sampler->nblocks;
sampler->doneblocks++;
......@@ -127,10 +128,10 @@ tsm_system_rows_nextblock(PG_FUNCTION_ARGS)
Datum
tsm_system_rows_nexttuple(PG_FUNCTION_ARGS)
{
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
OffsetNumber maxoffset = PG_GETARG_UINT16(2);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
OffsetNumber tupoffset = sampler->lt;
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
OffsetNumber maxoffset = PG_GETARG_UINT16(2);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
OffsetNumber tupoffset = sampler->lt;
if (tupoffset == InvalidOffsetNumber)
tupoffset = FirstOffsetNumber;
......@@ -152,9 +153,9 @@ tsm_system_rows_nexttuple(PG_FUNCTION_ARGS)
Datum
tsm_system_rows_examinetuple(PG_FUNCTION_ARGS)
{
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
bool visible = PG_GETARG_BOOL(3);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
bool visible = PG_GETARG_BOOL(3);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
if (!visible)
PG_RETURN_BOOL(false);
......@@ -183,8 +184,8 @@ tsm_system_rows_end(PG_FUNCTION_ARGS)
Datum
tsm_system_rows_reset(PG_FUNCTION_ARGS)
{
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
sampler->lt = InvalidOffsetNumber;
sampler->donetuples = 0;
......@@ -203,14 +204,14 @@ tsm_system_rows_reset(PG_FUNCTION_ARGS)
Datum
tsm_system_rows_cost(PG_FUNCTION_ARGS)
{
PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
Path *path = (Path *) PG_GETARG_POINTER(1);
RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
List *args = (List *) PG_GETARG_POINTER(3);
BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
double *tuples = (double *) PG_GETARG_POINTER(5);
Node *limitnode;
int32 ntuples;
PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
Path *path = (Path *) PG_GETARG_POINTER(1);
RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
List *args = (List *) PG_GETARG_POINTER(3);
BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
double *tuples = (double *) PG_GETARG_POINTER(5);
Node *limitnode;
int32 ntuples;
limitnode = linitial(args);
limitnode = estimate_expression_value(root, limitnode);
......@@ -235,9 +236,9 @@ tsm_system_rows_cost(PG_FUNCTION_ARGS)
static uint32
gcd (uint32 a, uint32 b)
gcd(uint32 a, uint32 b)
{
uint32 c;
uint32 c;
while (a != 0)
{
......@@ -253,8 +254,8 @@ static uint32
random_relative_prime(uint32 n, SamplerRandomState randstate)
{
/* Pick random starting number, with some limits on what it can be. */
uint32 r = (uint32) sampler_random_fract(randstate) * n/2 + n/4,
t;
uint32 r = (uint32) sampler_random_fract(randstate) * n / 2 + n / 4,
t;
/*
* This should only take 2 or 3 iterations as the probability of 2 numbers
......
......@@ -35,16 +35,17 @@ PG_MODULE_MAGIC;
typedef struct
{
SamplerRandomState randstate;
uint32 seed; /* random seed */
BlockNumber nblocks; /* number of block in relation */
int32 time; /* time limit for sampling */
TimestampTz start_time; /* start time of sampling */
TimestampTz end_time; /* end time of sampling */
OffsetNumber lt; /* last tuple returned from current block */
BlockNumber step; /* step size */
BlockNumber lb; /* last block visited */
BlockNumber estblocks; /* estimated number of returned blocks (moving) */
BlockNumber doneblocks; /* number of already returned blocks */
uint32 seed; /* random seed */
BlockNumber nblocks; /* number of block in relation */
int32 time; /* time limit for sampling */
TimestampTz start_time; /* start time of sampling */
TimestampTz end_time; /* end time of sampling */
OffsetNumber lt; /* last tuple returned from current block */
BlockNumber step; /* step size */
BlockNumber lb; /* last block visited */
BlockNumber estblocks; /* estimated number of returned blocks
* (moving) */
BlockNumber doneblocks; /* number of already returned blocks */
} SystemSamplerData;
......@@ -63,11 +64,11 @@ static uint32 random_relative_prime(uint32 n, SamplerRandomState randstate);
Datum
tsm_system_time_init(PG_FUNCTION_ARGS)
{
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
uint32 seed = PG_GETARG_UINT32(1);
int32 time = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
HeapScanDesc scan = tsdesc->heapScan;
SystemSamplerData *sampler;
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
uint32 seed = PG_GETARG_UINT32(1);
int32 time = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2);
HeapScanDesc scan = tsdesc->heapScan;
SystemSamplerData *sampler;
if (time < 1)
ereport(ERROR,
......@@ -92,6 +93,7 @@ tsm_system_time_init(PG_FUNCTION_ARGS)
/* Find relative prime as step size for linear probing. */
sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate);
/*
* Randomize start position so that blocks close to step size don't have
* higher probability of being chosen on very short scan.
......@@ -111,8 +113,8 @@ tsm_system_time_init(PG_FUNCTION_ARGS)
Datum
tsm_system_time_nextblock(PG_FUNCTION_ARGS)
{
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
sampler->lb = (sampler->lb + sampler->step) % sampler->nblocks;
sampler->doneblocks++;
......@@ -125,16 +127,16 @@ tsm_system_time_nextblock(PG_FUNCTION_ARGS)
* Update the estimations for time limit at least 10 times per estimated
* number of returned blocks to handle variations in block read speed.
*/
if (sampler->doneblocks % Max(sampler->estblocks/10, 1) == 0)
if (sampler->doneblocks % Max(sampler->estblocks / 10, 1) == 0)
{
TimestampTz now = GetCurrentTimestamp();
long secs;
int usecs;
TimestampTz now = GetCurrentTimestamp();
long secs;
int usecs;
int usecs_remaining;
int time_per_block;
TimestampDifference(sampler->start_time, now, &secs, &usecs);
usecs += (int) secs * 1000000;
usecs += (int) secs *1000000;
time_per_block = usecs / sampler->doneblocks;
......@@ -144,7 +146,7 @@ tsm_system_time_nextblock(PG_FUNCTION_ARGS)
PG_RETURN_UINT32(InvalidBlockNumber);
/* Remaining microseconds */
usecs_remaining = usecs + (int) secs * 1000000;
usecs_remaining = usecs + (int) secs *1000000;
/* Recalculate estimated returned number of blocks */
if (time_per_block < usecs_remaining && time_per_block > 0)
......@@ -161,10 +163,10 @@ tsm_system_time_nextblock(PG_FUNCTION_ARGS)
Datum
tsm_system_time_nexttuple(PG_FUNCTION_ARGS)
{
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
OffsetNumber maxoffset = PG_GETARG_UINT16(2);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
OffsetNumber tupoffset = sampler->lt;
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
OffsetNumber maxoffset = PG_GETARG_UINT16(2);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
OffsetNumber tupoffset = sampler->lt;
if (tupoffset == InvalidOffsetNumber)
tupoffset = FirstOffsetNumber;
......@@ -198,8 +200,8 @@ tsm_system_time_end(PG_FUNCTION_ARGS)
Datum
tsm_system_time_reset(PG_FUNCTION_ARGS)
{
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
sampler->lt = InvalidOffsetNumber;
sampler->start_time = GetCurrentTimestamp();
......@@ -221,18 +223,18 @@ tsm_system_time_reset(PG_FUNCTION_ARGS)
Datum
tsm_system_time_cost(PG_FUNCTION_ARGS)
{
PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
Path *path = (Path *) PG_GETARG_POINTER(1);
RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
List *args = (List *) PG_GETARG_POINTER(3);
BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
double *tuples = (double *) PG_GETARG_POINTER(5);
Node *limitnode;
int32 time;
BlockNumber relpages;
double reltuples;
double density;
double spc_random_page_cost;
PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
Path *path = (Path *) PG_GETARG_POINTER(1);
RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
List *args = (List *) PG_GETARG_POINTER(3);
BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
double *tuples = (double *) PG_GETARG_POINTER(5);
Node *limitnode;
int32 time;
BlockNumber relpages;
double reltuples;
double density;
double spc_random_page_cost;
limitnode = linitial(args);
limitnode = estimate_expression_value(root, limitnode);
......@@ -269,10 +271,10 @@ tsm_system_time_cost(PG_FUNCTION_ARGS)
/*
* Assumption here is that we'll never read less than 1% of table pages,
* this is here mainly because it is much less bad to overestimate than
* underestimate and using just spc_random_page_cost will probably lead
* to underestimations in general.
* underestimate and using just spc_random_page_cost will probably lead to
* underestimations in general.
*/
*pages = Min(baserel->pages, Max(time/spc_random_page_cost, baserel->pages/100));
*pages = Min(baserel->pages, Max(time / spc_random_page_cost, baserel->pages / 100));
*tuples = rint(density * (double) *pages * path->rows / baserel->tuples);
path->rows = *tuples;
......@@ -280,9 +282,9 @@ tsm_system_time_cost(PG_FUNCTION_ARGS)
}
static uint32
gcd (uint32 a, uint32 b)
gcd(uint32 a, uint32 b)
{
uint32 c;
uint32 c;
while (a != 0)
{
......@@ -298,8 +300,8 @@ static uint32
random_relative_prime(uint32 n, SamplerRandomState randstate)
{
/* Pick random starting number, with some limits on what it can be. */
uint32 r = (uint32) sampler_random_fract(randstate) * n/2 + n/4,
t;
uint32 r = (uint32) sampler_random_fract(randstate) * n / 2 + n / 4,
t;
/*
* This should only take 2 or 3 iterations as the probability of 2 numbers
......
......@@ -387,7 +387,7 @@ bringetbitmap(PG_FUNCTION_ARGS)
*/
Assert((key->sk_flags & SK_ISNULL) ||
(key->sk_collation ==
bdesc->bd_tupdesc->attrs[keyattno - 1]->attcollation));
bdesc->bd_tupdesc->attrs[keyattno - 1]->attcollation));
/* First time this column? look up consistent function */
if (consistentFn[keyattno - 1].fn_oid == InvalidOid)
......@@ -523,10 +523,10 @@ brinbuildCallback(Relation index,
thisblock = ItemPointerGetBlockNumber(&htup->t_self);
/*
* If we're in a block that belongs to a future range, summarize what we've
* got and start afresh. Note the scan might have skipped many pages,
* if they were devoid of live tuples; make sure to insert index tuples
* for those too.
* If we're in a block that belongs to a future range, summarize what
* we've got and start afresh. Note the scan might have skipped many
* pages, if they were devoid of live tuples; make sure to insert index
* tuples for those too.
*/
while (thisblock > state->bs_currRangeStart + state->bs_pagesPerRange - 1)
{
......@@ -660,7 +660,6 @@ brinbuild(PG_FUNCTION_ARGS)
Datum
brinbuildempty(PG_FUNCTION_ARGS)
{
Relation index = (Relation) PG_GETARG_POINTER(0);
Buffer metabuf;
......@@ -696,7 +695,7 @@ brinbulkdelete(PG_FUNCTION_ARGS)
{
/* other arguments are not currently used */
IndexBulkDeleteResult *stats =
(IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
(IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
/* allocate stats if first time through, else re-use existing struct */
if (stats == NULL)
......@@ -714,7 +713,7 @@ brinvacuumcleanup(PG_FUNCTION_ARGS)
{
IndexVacuumInfo *info = (IndexVacuumInfo *) PG_GETARG_POINTER(0);
IndexBulkDeleteResult *stats =
(IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
(IndexBulkDeleteResult *) PG_GETARG_POINTER(1);
Relation heapRel;
/* No-op in ANALYZE ONLY mode */
......@@ -900,7 +899,7 @@ terminate_brin_buildstate(BrinBuildState *state)
page = BufferGetPage(state->bs_currentInsertBuf);
RecordPageWithFreeSpace(state->bs_irel,
BufferGetBlockNumber(state->bs_currentInsertBuf),
BufferGetBlockNumber(state->bs_currentInsertBuf),
PageGetFreeSpace(page));
ReleaseBuffer(state->bs_currentInsertBuf);
}
......
......@@ -61,11 +61,11 @@
* 0 - the union of the values in the block range
* 1 - whether an empty value is present in any tuple in the block range
* 2 - whether the values in the block range cannot be merged (e.g. an IPv6
* address amidst IPv4 addresses).
* address amidst IPv4 addresses).
*/
#define INCLUSION_UNION 0
#define INCLUSION_UNMERGEABLE 1
#define INCLUSION_CONTAINS_EMPTY 2
#define INCLUSION_UNION 0
#define INCLUSION_UNMERGEABLE 1
#define INCLUSION_CONTAINS_EMPTY 2
typedef struct InclusionOpaque
......@@ -294,22 +294,22 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
unionval = column->bv_values[INCLUSION_UNION];
switch (key->sk_strategy)
{
/*
* Placement strategies
*
* These are implemented by logically negating the result of the
* converse placement operator; for this to work, the converse operator
* must be part of the opclass. An error will be thrown by
* inclusion_get_strategy_procinfo() if the required strategy is not
* part of the opclass.
*
* These all return false if either argument is empty, so there is
* no need to check for empty elements.
*/
/*
* Placement strategies
*
* These are implemented by logically negating the result of the
* converse placement operator; for this to work, the converse
* operator must be part of the opclass. An error will be thrown
* by inclusion_get_strategy_procinfo() if the required strategy
* is not part of the opclass.
*
* These all return false if either argument is empty, so there is
* no need to check for empty elements.
*/
case RTLeftStrategyNumber:
finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
RTOverRightStrategyNumber);
RTOverRightStrategyNumber);
result = FunctionCall2Coll(finfo, colloid, unionval, query);
PG_RETURN_BOOL(!DatumGetBool(result));
......@@ -333,7 +333,7 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
case RTBelowStrategyNumber:
finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
RTOverAboveStrategyNumber);
RTOverAboveStrategyNumber);
result = FunctionCall2Coll(finfo, colloid, unionval, query);
PG_RETURN_BOOL(!DatumGetBool(result));
......@@ -351,7 +351,7 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
case RTAboveStrategyNumber:
finfo = inclusion_get_strategy_procinfo(bdesc, attno, subtype,
RTOverBelowStrategyNumber);
RTOverBelowStrategyNumber);
result = FunctionCall2Coll(finfo, colloid, unionval, query);
PG_RETURN_BOOL(!DatumGetBool(result));
......@@ -381,8 +381,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
* strategies because some elements can be contained even though
* the union is not; instead we use the overlap operator.
*
* We check for empty elements separately as they are not merged to
* the union but contained by everything.
* We check for empty elements separately as they are not merged
* to the union but contained by everything.
*/
case RTContainedByStrategyNumber:
......@@ -400,8 +400,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
/*
* Adjacent strategy
*
* We test for overlap first but to be safe we need to call
* the actual adjacent operator also.
* We test for overlap first but to be safe we need to call the
* actual adjacent operator also.
*
* An empty element cannot be adjacent to any other, so there is
* no need to check for it.
......@@ -426,8 +426,8 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS)
* the contains operator. Generally, inequality strategies do not
* make much sense for the types which will be used with the
* inclusion BRIN family of opclasses, but is is possible to
* implement them with logical negation of the left-of and right-of
* operators.
* implement them with logical negation of the left-of and
* right-of operators.
*
* NB: These strategies cannot be used with geometric datatypes
* that use comparison of areas! The only exception is the "same"
......
......@@ -33,7 +33,7 @@ Datum brin_minmax_add_value(PG_FUNCTION_ARGS);
Datum brin_minmax_consistent(PG_FUNCTION_ARGS);
Datum brin_minmax_union(PG_FUNCTION_ARGS);
static FmgrInfo *minmax_get_strategy_procinfo(BrinDesc *bdesc, uint16 attno,
Oid subtype, uint16 strategynum);
Oid subtype, uint16 strategynum);
Datum
......@@ -209,7 +209,7 @@ brin_minmax_consistent(PG_FUNCTION_ARGS)
break;
/* max() >= scankey */
finfo = minmax_get_strategy_procinfo(bdesc, attno, subtype,
BTGreaterEqualStrategyNumber);
BTGreaterEqualStrategyNumber);
matches = FunctionCall2Coll(finfo, colloid, column->bv_values[1],
value);
break;
......@@ -260,10 +260,10 @@ brin_minmax_union(PG_FUNCTION_ARGS)
attr = bdesc->bd_tupdesc->attrs[attno - 1];
/*
* Adjust "allnulls". If A doesn't have values, just copy the values
* from B into A, and we're done. We cannot run the operators in this
* case, because values in A might contain garbage. Note we already
* established that B contains values.
* Adjust "allnulls". If A doesn't have values, just copy the values from
* B into A, and we're done. We cannot run the operators in this case,
* because values in A might contain garbage. Note we already established
* that B contains values.
*/
if (col_a->bv_allnulls)
{
......@@ -355,7 +355,7 @@ minmax_get_strategy_procinfo(BrinDesc *bdesc, uint16 attno, Oid subtype,
strategynum, attr->atttypid, subtype, opfamily);
oprid = DatumGetObjectId(SysCacheGetAttr(AMOPSTRATEGY, tuple,
Anum_pg_amop_amopopr, &isNull));
Anum_pg_amop_amopopr, &isNull));
ReleaseSysCache(tuple);
Assert(!isNull && RegProcedureIsValid(oprid));
......
......@@ -48,7 +48,7 @@ struct BrinRevmap
{
Relation rm_irel;
BlockNumber rm_pagesPerRange;
BlockNumber rm_lastRevmapPage; /* cached from the metapage */
BlockNumber rm_lastRevmapPage; /* cached from the metapage */
Buffer rm_metaBuf;
Buffer rm_currBuf;
};
......@@ -57,7 +57,7 @@ struct BrinRevmap
static BlockNumber revmap_get_blkno(BrinRevmap *revmap,
BlockNumber heapBlk);
BlockNumber heapBlk);
static Buffer revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk);
static BlockNumber revmap_extend_and_get_blkno(BrinRevmap *revmap,
BlockNumber heapBlk);
......@@ -110,7 +110,7 @@ brinRevmapTerminate(BrinRevmap *revmap)
void
brinRevmapExtend(BrinRevmap *revmap, BlockNumber heapBlk)
{
BlockNumber mapBlk PG_USED_FOR_ASSERTS_ONLY;
BlockNumber mapBlk PG_USED_FOR_ASSERTS_ONLY;
mapBlk = revmap_extend_and_get_blkno(revmap, heapBlk);
......@@ -245,7 +245,7 @@ brinGetTupleForHeapBlock(BrinRevmap *revmap, BlockNumber heapBlk,
if (ItemPointerIsValid(&previptr) && ItemPointerEquals(&previptr, iptr))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
errmsg_internal("corrupted BRIN index: inconsistent range map")));
errmsg_internal("corrupted BRIN index: inconsistent range map")));
previptr = *iptr;
blk = ItemPointerGetBlockNumber(iptr);
......@@ -356,7 +356,7 @@ revmap_get_buffer(BrinRevmap *revmap, BlockNumber heapBlk)
static BlockNumber
revmap_extend_and_get_blkno(BrinRevmap *revmap, BlockNumber heapBlk)
{
BlockNumber targetblk;
BlockNumber targetblk;
/* obtain revmap block number, skip 1 for metapage block */
targetblk = HEAPBLK_TO_REVMAP_BLK(revmap->rm_pagesPerRange, heapBlk) + 1;
......@@ -445,10 +445,10 @@ revmap_physical_extend(BrinRevmap *revmap)
if (!PageIsNew(page) && !BRIN_IS_REGULAR_PAGE(page))
ereport(ERROR,
(errcode(ERRCODE_INDEX_CORRUPTED),
errmsg("unexpected page type 0x%04X in BRIN index \"%s\" block %u",
BrinPageType(page),
RelationGetRelationName(irel),
BufferGetBlockNumber(buf))));
errmsg("unexpected page type 0x%04X in BRIN index \"%s\" block %u",
BrinPageType(page),
RelationGetRelationName(irel),
BufferGetBlockNumber(buf))));
/* If the page is in use, evacuate it and restart */
if (brin_start_evacuating_page(irel, buf))
......
......@@ -68,7 +68,7 @@ brtuple_disk_tupdesc(BrinDesc *brdesc)
{
for (j = 0; j < brdesc->bd_info[i]->oi_nstored; j++)
TupleDescInitEntry(tupdesc, attno++, NULL,
brdesc->bd_info[i]->oi_typcache[j]->type_id,
brdesc->bd_info[i]->oi_typcache[j]->type_id,
-1, 0);
}
......
......@@ -1785,7 +1785,8 @@ gingetbitmap(PG_FUNCTION_ARGS)
/*
* Set up the scan keys, and check for unsatisfiable query.
*/
ginFreeScanKeys(so); /* there should be no keys yet, but just to be sure */
ginFreeScanKeys(so); /* there should be no keys yet, but just to be
* sure */
ginNewScanKey(scan);
if (GinIsVoidRes(scan))
......
......@@ -527,7 +527,7 @@ ginoptions(PG_FUNCTION_ARGS)
static const relopt_parse_elt tab[] = {
{"fastupdate", RELOPT_TYPE_BOOL, offsetof(GinOptions, useFastUpdate)},
{"gin_pending_list_limit", RELOPT_TYPE_INT, offsetof(GinOptions,
pendingListCleanupSize)}
pendingListCleanupSize)}
};
options = parseRelOptions(reloptions, validate, RELOPT_KIND_GIN,
......
......@@ -1407,7 +1407,7 @@ initGISTstate(Relation index)
/* opclasses are not required to provide a Fetch method */
if (OidIsValid(index_getprocid(index, i + 1, GIST_FETCH_PROC)))
fmgr_info_copy(&(giststate->fetchFn[i]),
index_getprocinfo(index, i + 1, GIST_FETCH_PROC),
index_getprocinfo(index, i + 1, GIST_FETCH_PROC),
scanCxt);
else
giststate->fetchFn[i].fn_oid = InvalidOid;
......
......@@ -154,8 +154,8 @@ gistrescan(PG_FUNCTION_ARGS)
}
/*
* If we're doing an index-only scan, on the first call, also initialize
* a tuple descriptor to represent the returned index tuples and create a
* If we're doing an index-only scan, on the first call, also initialize a
* tuple descriptor to represent the returned index tuples and create a
* memory context to hold them during the scan.
*/
if (scan->xs_want_itup && !scan->xs_itupdesc)
......@@ -169,7 +169,7 @@ gistrescan(PG_FUNCTION_ARGS)
* descriptor. Instead, construct a descriptor with the original data
* types.
*/
natts = RelationGetNumberOfAttributes(scan->indexRelation);
natts = RelationGetNumberOfAttributes(scan->indexRelation);
so->giststate->fetchTupdesc = CreateTemplateTupleDesc(natts, false);
for (attno = 1; attno <= natts; attno++)
{
......@@ -288,9 +288,9 @@ gistrescan(PG_FUNCTION_ARGS)
fmgr_info_copy(&(skey->sk_func), finfo, so->giststate->scanCxt);
/*
* Look up the datatype returned by the original ordering operator.
* GiST always uses a float8 for the distance function, but the
* ordering operator could be anything else.
* Look up the datatype returned by the original ordering
* operator. GiST always uses a float8 for the distance function,
* but the ordering operator could be anything else.
*
* XXX: The distance function is only allowed to be lossy if the
* ordering operator's result type is float4 or float8. Otherwise
......
......@@ -583,7 +583,7 @@ gistFormTuple(GISTSTATE *giststate, Relation r,
isleaf);
cep = (GISTENTRY *)
DatumGetPointer(FunctionCall1Coll(&giststate->compressFn[i],
giststate->supportCollation[i],
giststate->supportCollation[i],
PointerGetDatum(&centry)));
compatt[i] = cep->key;
}
......
This diff is collapsed.
......@@ -60,9 +60,9 @@ RelationPutHeapTuple(Relation relation,
ItemPointerSet(&(tuple->t_self), BufferGetBlockNumber(buffer), offnum);
/*
* Insert the correct position into CTID of the stored tuple, too
* (unless this is a speculative insertion, in which case the token is
* held in CTID field instead)
* Insert the correct position into CTID of the stored tuple, too (unless
* this is a speculative insertion, in which case the token is held in
* CTID field instead)
*/
if (!token)
{
......
......@@ -185,11 +185,11 @@ BuildIndexValueDescription(Relation indexRelation,
* Check permissions- if the user does not have access to view all of the
* key columns then return NULL to avoid leaking data.
*
* First check if RLS is enabled for the relation. If so, return NULL
* to avoid leaking data.
* First check if RLS is enabled for the relation. If so, return NULL to
* avoid leaking data.
*
* Next we need to check table-level SELECT access and then, if
* there is no access there, check column-level permissions.
* Next we need to check table-level SELECT access and then, if there is
* no access there, check column-level permissions.
*/
/*
......@@ -215,18 +215,18 @@ BuildIndexValueDescription(Relation indexRelation,
if (aclresult != ACLCHECK_OK)
{
/*
* No table-level access, so step through the columns in the
* index and make sure the user has SELECT rights on all of them.
* No table-level access, so step through the columns in the index and
* make sure the user has SELECT rights on all of them.
*/
for (keyno = 0; keyno < idxrec->indnatts; keyno++)
{
AttrNumber attnum = idxrec->indkey.values[keyno];
/*
* Note that if attnum == InvalidAttrNumber, then this is an
* index based on an expression and we return no detail rather
* than try to figure out what column(s) the expression includes
* and if the user has SELECT rights on them.
* Note that if attnum == InvalidAttrNumber, then this is an index
* based on an expression and we return no detail rather than try
* to figure out what column(s) the expression includes and if the
* user has SELECT rights on them.
*/
if (attnum == InvalidAttrNumber ||
pg_attribute_aclcheck(indrelid, attnum, GetUserId(),
......
......@@ -160,8 +160,8 @@ top:
*/
if (checkUnique != UNIQUE_CHECK_NO)
{
TransactionId xwait;
uint32 speculativeToken;
TransactionId xwait;
uint32 speculativeToken;
offset = _bt_binsrch(rel, buf, natts, itup_scankey, false);
xwait = _bt_check_unique(rel, itup, heapRel, buf, offset, itup_scankey,
......@@ -171,9 +171,10 @@ top:
{
/* Have to wait for the other guy ... */
_bt_relbuf(rel, buf);
/*
* If it's a speculative insertion, wait for it to finish (ie.
* to go ahead with the insertion, or kill the tuple). Otherwise
* If it's a speculative insertion, wait for it to finish (ie. to
* go ahead with the insertion, or kill the tuple). Otherwise
* wait for the transaction to finish as usual.
*/
if (speculativeToken)
......@@ -417,8 +418,8 @@ _bt_check_unique(Relation rel, IndexTuple itup, Relation heapRel,
(errcode(ERRCODE_UNIQUE_VIOLATION),
errmsg("duplicate key value violates unique constraint \"%s\"",
RelationGetRelationName(rel)),
key_desc ? errdetail("Key %s already exists.",
key_desc) : 0,
key_desc ? errdetail("Key %s already exists.",
key_desc) : 0,
errtableconstraint(heapRel,
RelationGetRelationName(rel))));
}
......
......@@ -1233,6 +1233,7 @@ _bt_pagedel(Relation rel, Buffer buf)
lbuf = _bt_getbuf(rel, leftsib, BT_READ);
lpage = BufferGetPage(lbuf);
lopaque = (BTPageOpaque) PageGetSpecialPointer(lpage);
/*
* If the left sibling is split again by another backend,
* after we released the lock, we know that the first
......@@ -1345,11 +1346,11 @@ _bt_mark_page_halfdead(Relation rel, Buffer leafbuf, BTStack stack)
leafrightsib = opaque->btpo_next;
/*
* Before attempting to lock the parent page, check that the right
* sibling is not in half-dead state. A half-dead right sibling would
* have no downlink in the parent, which would be highly confusing later
* when we delete the downlink that follows the current page's downlink.
* (I believe the deletion would work correctly, but it would fail the
* Before attempting to lock the parent page, check that the right sibling
* is not in half-dead state. A half-dead right sibling would have no
* downlink in the parent, which would be highly confusing later when we
* delete the downlink that follows the current page's downlink. (I
* believe the deletion would work correctly, but it would fail the
* cross-check we make that the following downlink points to the right
* sibling of the delete page.)
*/
......
......@@ -40,9 +40,8 @@ typedef struct
BTSpool *spool;
/*
* spool2 is needed only when the index is a unique index. Dead tuples
* are put into spool2 instead of spool in order to avoid uniqueness
* check.
* spool2 is needed only when the index is a unique index. Dead tuples are
* put into spool2 instead of spool in order to avoid uniqueness check.
*/
BTSpool *spool2;
double indtuples;
......
......@@ -1027,10 +1027,10 @@ _bt_first(IndexScanDesc scan, ScanDirection dir)
offnum = OffsetNumberPrev(offnum);
/*
* By here the scan position is now set for the first key. If all
* further tuples are expected to match we set the SK_BT_MATCHED flag
* to avoid re-checking the scan key later. This is a big win for
* slow key matches though is still significant even for fast datatypes.
* By here the scan position is now set for the first key. If all further
* tuples are expected to match we set the SK_BT_MATCHED flag to avoid
* re-checking the scan key later. This is a big win for slow key matches
* though is still significant even for fast datatypes.
*/
switch (startKeys[0]->sk_strategy)
{
......
......@@ -742,7 +742,7 @@ _bt_load(BTWriteState *wstate, BTSpool *btspool, BTSpool *btspool2)
{
for (i = 1; i <= keysz; i++)
{
SortSupport entry;
SortSupport entry;
Datum attrDatum1,
attrDatum2;
bool isNull1,
......
......@@ -1430,8 +1430,8 @@ _bt_checkkeys(IndexScanDesc scan,
Datum test;
/*
* If the scan key has already matched we can skip this key, as
* long as the index tuple does not contain NULL values.
* If the scan key has already matched we can skip this key, as long
* as the index tuple does not contain NULL values.
*/
if (key->sk_flags & SK_BT_MATCHED && !IndexTupleHasNulls(tuple))
continue;
......@@ -1740,7 +1740,7 @@ _bt_check_rowcompare(ScanKey skey, IndexTuple tuple, TupleDesc tupdesc,
* any items from the page, and so there is no need to search left from the
* recorded offset. (This observation also guarantees that the item is still
* the right one to delete, which might otherwise be questionable since heap
* TIDs can get recycled.) This holds true even if the page has been modified
* TIDs can get recycled.) This holds true even if the page has been modified
* by inserts and page splits, so there is no need to consult the LSN.
*
* If the pin was released after reading the page, then we re-read it. If it
......
/*-------------------------------------------------------------------------
*
* committsdesc.c
* rmgr descriptor routines for access/transam/commit_ts.c
* rmgr descriptor routines for access/transam/commit_ts.c
*
* Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/rmgrdesc/committsdesc.c
* src/backend/access/rmgrdesc/committsdesc.c
*
*-------------------------------------------------------------------------
*/
......@@ -41,7 +41,7 @@ commit_ts_desc(StringInfo buf, XLogReaderState *record)
else if (info == COMMIT_TS_SETTS)
{
xl_commit_ts_set *xlrec = (xl_commit_ts_set *) rec;
int nsubxids;
int nsubxids;
appendStringInfo(buf, "set %s/%d for: %u",
timestamptz_to_str(xlrec->timestamp),
......@@ -51,7 +51,7 @@ commit_ts_desc(StringInfo buf, XLogReaderState *record)
sizeof(TransactionId));
if (nsubxids > 0)
{
int i;
int i;
TransactionId *subxids;
subxids = palloc(sizeof(TransactionId) * nsubxids);
......
/*-------------------------------------------------------------------------
*
* replorigindesc.c
* rmgr descriptor routines for replication/logical/replication_origin.c
* rmgr descriptor routines for replication/logical/replication_origin.c
*
* Portions Copyright (c) 2015, PostgreSQL Global Development Group
*
*
* IDENTIFICATION
* src/backend/access/rmgrdesc/replorigindesc.c
* src/backend/access/rmgrdesc/replorigindesc.c
*
*-------------------------------------------------------------------------
*/
......@@ -26,6 +26,7 @@ replorigin_desc(StringInfo buf, XLogReaderState *record)
case XLOG_REPLORIGIN_SET:
{
xl_replorigin_set *xlrec;
xlrec = (xl_replorigin_set *) rec;
appendStringInfo(buf, "set %u; lsn %X/%X; force: %d",
......@@ -38,6 +39,7 @@ replorigin_desc(StringInfo buf, XLogReaderState *record)
case XLOG_REPLORIGIN_DROP:
{
xl_replorigin_drop *xlrec;
xlrec = (xl_replorigin_drop *) rec;
appendStringInfo(buf, "drop %u", xlrec->node_id);
......
......@@ -37,7 +37,8 @@ ParseCommitRecord(uint8 info, xl_xact_commit *xlrec, xl_xact_parsed_commit *pars
memset(parsed, 0, sizeof(*parsed));
parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is present */
parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is
* present */
parsed->xact_time = xlrec->xact_time;
......@@ -62,7 +63,7 @@ ParseCommitRecord(uint8 info, xl_xact_commit *xlrec, xl_xact_parsed_commit *pars
if (parsed->xinfo & XACT_XINFO_HAS_SUBXACTS)
{
xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
parsed->nsubxacts = xl_subxacts->nsubxacts;
parsed->subxacts = xl_subxacts->subxacts;
......@@ -123,7 +124,8 @@ ParseAbortRecord(uint8 info, xl_xact_abort *xlrec, xl_xact_parsed_abort *parsed)
memset(parsed, 0, sizeof(*parsed));
parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is present */
parsed->xinfo = 0; /* default, if no XLOG_XACT_HAS_INFO is
* present */
parsed->xact_time = xlrec->xact_time;
......@@ -138,7 +140,7 @@ ParseAbortRecord(uint8 info, xl_xact_abort *xlrec, xl_xact_parsed_abort *parsed)
if (parsed->xinfo & XACT_XINFO_HAS_SUBXACTS)
{
xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
xl_xact_subxacts *xl_subxacts = (xl_xact_subxacts *) data;
parsed->nsubxacts = xl_subxacts->nsubxacts;
parsed->subxacts = xl_subxacts->subxacts;
......@@ -236,8 +238,8 @@ xact_desc_commit(StringInfo buf, uint8 info, xl_xact_commit *xlrec, RepOriginId
{
appendStringInfo(buf, "; origin: node %u, lsn %X/%X, at %s",
origin_id,
(uint32)(parsed.origin_lsn >> 32),
(uint32)parsed.origin_lsn,
(uint32) (parsed.origin_lsn >> 32),
(uint32) parsed.origin_lsn,
timestamptz_to_str(parsed.origin_timestamp));
}
}
......
......@@ -658,6 +658,7 @@ Datum
spgcanreturn(PG_FUNCTION_ARGS)
{
Relation index = (Relation) PG_GETARG_POINTER(0);
/* int i = PG_GETARG_INT32(1); */
SpGistCache *cache;
......
......@@ -27,13 +27,15 @@
/* tsdesc */
typedef struct
{
uint32 seed; /* random seed */
BlockNumber startblock; /* starting block, we use ths for syncscan support */
uint32 seed; /* random seed */
BlockNumber startblock; /* starting block, we use ths for syncscan
* support */
BlockNumber nblocks; /* number of blocks */
BlockNumber blockno; /* current block */
float4 probability; /* probabilty that tuple will be returned (0.0-1.0) */
float4 probability; /* probabilty that tuple will be returned
* (0.0-1.0) */
OffsetNumber lt; /* last tuple returned from current block */
SamplerRandomState randstate; /* random generator tsdesc */
SamplerRandomState randstate; /* random generator tsdesc */
} BernoulliSamplerData;
/*
......@@ -42,10 +44,10 @@ typedef struct
Datum
tsm_bernoulli_init(PG_FUNCTION_ARGS)
{
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
uint32 seed = PG_GETARG_UINT32(1);
float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
HeapScanDesc scan = tsdesc->heapScan;
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
uint32 seed = PG_GETARG_UINT32(1);
float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
HeapScanDesc scan = tsdesc->heapScan;
BernoulliSamplerData *sampler;
if (percent < 0 || percent > 100)
......@@ -77,14 +79,13 @@ tsm_bernoulli_init(PG_FUNCTION_ARGS)
Datum
tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
{
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
BernoulliSamplerData *sampler =
(BernoulliSamplerData *) tsdesc->tsmdata;
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
BernoulliSamplerData *sampler =
(BernoulliSamplerData *) tsdesc->tsmdata;
/*
* Bernoulli sampling scans all blocks on the table and supports
* syncscan so loop from startblock to startblock instead of
* from 0 to nblocks.
* Bernoulli sampling scans all blocks on the table and supports syncscan
* so loop from startblock to startblock instead of from 0 to nblocks.
*/
if (sampler->blockno == InvalidBlockNumber)
sampler->blockno = sampler->startblock;
......@@ -116,7 +117,7 @@ tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
* tuples have same probability of being returned the visible and invisible
* tuples will be returned in same ratio as they have in the actual table.
* This means that there is no skew towards either visible or invisible tuples
* and the number returned visible tuples to from the executor node is the
* and the number returned visible tuples to from the executor node is the
* fraction of visible tuples which was specified in input.
*
* This is faster than doing the coinflip in the examinetuple because we don't
......@@ -128,12 +129,12 @@ tsm_bernoulli_nextblock(PG_FUNCTION_ARGS)
Datum
tsm_bernoulli_nexttuple(PG_FUNCTION_ARGS)
{
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
OffsetNumber maxoffset = PG_GETARG_UINT16(2);
BernoulliSamplerData *sampler =
(BernoulliSamplerData *) tsdesc->tsmdata;
OffsetNumber tupoffset = sampler->lt;
float4 probability = sampler->probability;
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
OffsetNumber maxoffset = PG_GETARG_UINT16(2);
BernoulliSamplerData *sampler =
(BernoulliSamplerData *) tsdesc->tsmdata;
OffsetNumber tupoffset = sampler->lt;
float4 probability = sampler->probability;
if (tupoffset == InvalidOffsetNumber)
tupoffset = FirstOffsetNumber;
......@@ -142,8 +143,8 @@ tsm_bernoulli_nexttuple(PG_FUNCTION_ARGS)
/*
* Loop over tuple offsets until the random generator returns value that
* is within the probability of returning the tuple or until we reach
* end of the block.
* is within the probability of returning the tuple or until we reach end
* of the block.
*
* (This is our implementation of bernoulli trial)
*/
......@@ -183,9 +184,9 @@ tsm_bernoulli_end(PG_FUNCTION_ARGS)
Datum
tsm_bernoulli_reset(PG_FUNCTION_ARGS)
{
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
BernoulliSamplerData *sampler =
(BernoulliSamplerData *) tsdesc->tsmdata;
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
BernoulliSamplerData *sampler =
(BernoulliSamplerData *) tsdesc->tsmdata;
sampler->blockno = InvalidBlockNumber;
sampler->lt = InvalidOffsetNumber;
......@@ -200,14 +201,14 @@ tsm_bernoulli_reset(PG_FUNCTION_ARGS)
Datum
tsm_bernoulli_cost(PG_FUNCTION_ARGS)
{
PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
Path *path = (Path *) PG_GETARG_POINTER(1);
RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
List *args = (List *) PG_GETARG_POINTER(3);
BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
double *tuples = (double *) PG_GETARG_POINTER(5);
Node *pctnode;
float4 samplesize;
PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
Path *path = (Path *) PG_GETARG_POINTER(1);
RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
List *args = (List *) PG_GETARG_POINTER(3);
BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
double *tuples = (double *) PG_GETARG_POINTER(5);
Node *pctnode;
float4 samplesize;
*pages = baserel->pages;
......
......@@ -31,9 +31,9 @@
typedef struct
{
BlockSamplerData bs;
uint32 seed; /* random seed */
uint32 seed; /* random seed */
BlockNumber nblocks; /* number of block in relation */
int samplesize; /* number of blocks to return */
int samplesize; /* number of blocks to return */
OffsetNumber lt; /* last tuple returned from current block */
} SystemSamplerData;
......@@ -44,11 +44,11 @@ typedef struct
Datum
tsm_system_init(PG_FUNCTION_ARGS)
{
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
uint32 seed = PG_GETARG_UINT32(1);
float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
HeapScanDesc scan = tsdesc->heapScan;
SystemSamplerData *sampler;
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
uint32 seed = PG_GETARG_UINT32(1);
float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2);
HeapScanDesc scan = tsdesc->heapScan;
SystemSamplerData *sampler;
if (percent < 0 || percent > 100)
ereport(ERROR,
......@@ -80,9 +80,9 @@ tsm_system_init(PG_FUNCTION_ARGS)
Datum
tsm_system_nextblock(PG_FUNCTION_ARGS)
{
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
BlockNumber blockno;
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
BlockNumber blockno;
if (!BlockSampler_HasMore(&sampler->bs))
PG_RETURN_UINT32(InvalidBlockNumber);
......@@ -99,10 +99,10 @@ tsm_system_nextblock(PG_FUNCTION_ARGS)
Datum
tsm_system_nexttuple(PG_FUNCTION_ARGS)
{
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
OffsetNumber maxoffset = PG_GETARG_UINT16(2);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
OffsetNumber tupoffset = sampler->lt;
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
OffsetNumber maxoffset = PG_GETARG_UINT16(2);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
OffsetNumber tupoffset = sampler->lt;
if (tupoffset == InvalidOffsetNumber)
tupoffset = FirstOffsetNumber;
......@@ -136,8 +136,8 @@ tsm_system_end(PG_FUNCTION_ARGS)
Datum
tsm_system_reset(PG_FUNCTION_ARGS)
{
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0);
SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata;
sampler->lt = InvalidOffsetNumber;
BlockSampler_Init(&sampler->bs, sampler->nblocks, sampler->samplesize,
......@@ -152,14 +152,14 @@ tsm_system_reset(PG_FUNCTION_ARGS)
Datum
tsm_system_cost(PG_FUNCTION_ARGS)
{
PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
Path *path = (Path *) PG_GETARG_POINTER(1);
RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
List *args = (List *) PG_GETARG_POINTER(3);
BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
double *tuples = (double *) PG_GETARG_POINTER(5);
Node *pctnode;
float4 samplesize;
PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0);
Path *path = (Path *) PG_GETARG_POINTER(1);
RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2);
List *args = (List *) PG_GETARG_POINTER(3);
BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4);
double *tuples = (double *) PG_GETARG_POINTER(5);
Node *pctnode;
float4 samplesize;
pctnode = linitial(args);
pctnode = estimate_expression_value(root, pctnode);
......
/*-------------------------------------------------------------------------
*
* tablesample.c
* TABLESAMPLE internal API
* TABLESAMPLE internal API
*
* Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/access/tablesample/tablesample.c
* src/backend/access/tablesample/tablesample.c
*
* TABLESAMPLE is the SQL standard clause for sampling the relations.
*
......@@ -53,7 +53,7 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
List *args = tablesample->args;
ListCell *arg;
ExprContext *econtext = scanstate->ss.ps.ps_ExprContext;
TableSampleDesc *tsdesc = (TableSampleDesc *) palloc0(sizeof(TableSampleDesc));
TableSampleDesc *tsdesc = (TableSampleDesc *) palloc0(sizeof(TableSampleDesc));
/* Load functions */
fmgr_info(tablesample->tsminit, &(tsdesc->tsminit));
......@@ -78,21 +78,21 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
fcinfo.argnull[0] = false;
/*
* Second arg for init function is always REPEATABLE
* When tablesample->repeatable is NULL then REPEATABLE clause was not
* specified.
* When specified, the expression cannot evaluate to NULL.
* Second arg for init function is always REPEATABLE When
* tablesample->repeatable is NULL then REPEATABLE clause was not
* specified. When specified, the expression cannot evaluate to NULL.
*/
if (tablesample->repeatable)
{
ExprState *argstate = ExecInitExpr((Expr *) tablesample->repeatable,
(PlanState *) scanstate);
fcinfo.arg[1] = ExecEvalExpr(argstate, econtext,
&fcinfo.argnull[1], NULL);
if (fcinfo.argnull[1])
ereport(ERROR,
(errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED),
errmsg("REPEATABLE clause must be NOT NULL numeric value")));
errmsg("REPEATABLE clause must be NOT NULL numeric value")));
}
else
{
......@@ -130,15 +130,15 @@ tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample)
HeapTuple
tablesample_getnext(TableSampleDesc *desc)
{
HeapScanDesc scan = desc->heapScan;
HeapTuple tuple = &(scan->rs_ctup);
bool pagemode = scan->rs_pageatatime;
BlockNumber blockno;
Page page;
bool page_all_visible;
ItemId itemid;
OffsetNumber tupoffset,
maxoffset;
HeapScanDesc scan = desc->heapScan;
HeapTuple tuple = &(scan->rs_ctup);
bool pagemode = scan->rs_pageatatime;
BlockNumber blockno;
Page page;
bool page_all_visible;
ItemId itemid;
OffsetNumber tupoffset,
maxoffset;
if (!scan->rs_inited)
{
......@@ -152,7 +152,7 @@ tablesample_getnext(TableSampleDesc *desc)
return NULL;
}
blockno = DatumGetInt32(FunctionCall1(&desc->tsmnextblock,
PointerGetDatum(desc)));
PointerGetDatum(desc)));
if (!BlockNumberIsValid(blockno))
{
tuple->t_data = NULL;
......@@ -184,14 +184,14 @@ tablesample_getnext(TableSampleDesc *desc)
CHECK_FOR_INTERRUPTS();
tupoffset = DatumGetUInt16(FunctionCall3(&desc->tsmnexttuple,
PointerGetDatum(desc),
UInt32GetDatum(blockno),
UInt16GetDatum(maxoffset)));
PointerGetDatum(desc),
UInt32GetDatum(blockno),
UInt16GetDatum(maxoffset)));
if (OffsetNumberIsValid(tupoffset))
{
bool visible;
bool found;
bool visible;
bool found;
/* Skip invalid tuple pointers. */
itemid = PageGetItemId(page, tupoffset);
......@@ -208,8 +208,8 @@ tablesample_getnext(TableSampleDesc *desc)
visible = SampleTupleVisible(tuple, tupoffset, scan);
/*
* Let the sampling method examine the actual tuple and decide if we
* should return it.
* Let the sampling method examine the actual tuple and decide if
* we should return it.
*
* Note that we let it examine even invisible tuples for
* statistical purposes, but not return them since user should
......@@ -218,10 +218,10 @@ tablesample_getnext(TableSampleDesc *desc)
if (OidIsValid(desc->tsmexaminetuple.fn_oid))
{
found = DatumGetBool(FunctionCall4(&desc->tsmexaminetuple,
PointerGetDatum(desc),
UInt32GetDatum(blockno),
PointerGetDatum(tuple),
BoolGetDatum(visible)));
PointerGetDatum(desc),
UInt32GetDatum(blockno),
PointerGetDatum(tuple),
BoolGetDatum(visible)));
/* Should not happen if sampling method is well written. */
if (found && !visible)
elog(ERROR, "Sampling method wanted to return invisible tuple");
......@@ -248,19 +248,19 @@ tablesample_getnext(TableSampleDesc *desc)
LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK);
blockno = DatumGetInt32(FunctionCall1(&desc->tsmnextblock,
PointerGetDatum(desc)));
PointerGetDatum(desc)));
/*
* Report our new scan position for synchronization purposes. We
* don't do that when moving backwards, however. That would just
* mess up any other forward-moving scanners.
* Report our new scan position for synchronization purposes. We don't
* do that when moving backwards, however. That would just mess up any
* other forward-moving scanners.
*
* Note: we do this before checking for end of scan so that the
* final state of the position hint is back at the start of the
* rel. That's not strictly necessary, but otherwise when you run
* the same query multiple times the starting position would shift
* a little bit backwards on every invocation, which is confusing.
* We don't guarantee any specific ordering in general, though.
* Note: we do this before checking for end of scan so that the final
* state of the position hint is back at the start of the rel. That's
* not strictly necessary, but otherwise when you run the same query
* multiple times the starting position would shift a little bit
* backwards on every invocation, which is confusing. We don't
* guarantee any specific ordering in general, though.
*/
if (scan->rs_syncscan)
ss_report_location(scan->rs_rd, BlockNumberIsValid(blockno) ?
......@@ -321,25 +321,25 @@ SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan)
{
/*
* If this scan is reading whole pages at a time, there is already
* visibility info present in rs_vistuples so we can just search it
* for the tupoffset.
* visibility info present in rs_vistuples so we can just search it for
* the tupoffset.
*/
if (scan->rs_pageatatime)
{
int start = 0,
end = scan->rs_ntuples - 1;
int start = 0,
end = scan->rs_ntuples - 1;
/*
* Do the binary search over rs_vistuples, it's already sorted by
* OffsetNumber so we don't need to do any sorting ourselves here.
*
* We could use bsearch() here but it's slower for integers because
* of the function call overhead and because it needs boiler plate code
* We could use bsearch() here but it's slower for integers because of
* the function call overhead and because it needs boiler plate code
* it would not save us anything code-wise anyway.
*/
while (start <= end)
{
int mid = start + (end - start) / 2;
int mid = start + (end - start) / 2;
OffsetNumber curoffset = scan->rs_vistuples[mid];
if (curoffset == tupoffset)
......@@ -358,7 +358,7 @@ SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan)
Snapshot snapshot = scan->rs_snapshot;
Buffer buffer = scan->rs_cbuf;
bool visible = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
bool visible = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer);
CheckForSerializableConflictOut(visible, scan->rs_rd, tuple, buffer,
snapshot);
......
......@@ -55,8 +55,8 @@
*/
typedef struct CommitTimestampEntry
{
TimestampTz time;
RepOriginId nodeid;
TimestampTz time;
RepOriginId nodeid;
} CommitTimestampEntry;
#define SizeOfCommitTimestampEntry (offsetof(CommitTimestampEntry, nodeid) + \
......@@ -65,7 +65,7 @@ typedef struct CommitTimestampEntry
#define COMMIT_TS_XACTS_PER_PAGE \
(BLCKSZ / SizeOfCommitTimestampEntry)
#define TransactionIdToCTsPage(xid) \
#define TransactionIdToCTsPage(xid) \
((xid) / (TransactionId) COMMIT_TS_XACTS_PER_PAGE)
#define TransactionIdToCTsEntry(xid) \
((xid) % (TransactionId) COMMIT_TS_XACTS_PER_PAGE)
......@@ -83,21 +83,21 @@ static SlruCtlData CommitTsCtlData;
*/
typedef struct CommitTimestampShared
{
TransactionId xidLastCommit;
TransactionId xidLastCommit;
CommitTimestampEntry dataLastCommit;
} CommitTimestampShared;
CommitTimestampShared *commitTsShared;
CommitTimestampShared *commitTsShared;
/* GUC variable */
bool track_commit_timestamp;
bool track_commit_timestamp;
static void SetXidCommitTsInPage(TransactionId xid, int nsubxids,
TransactionId *subxids, TimestampTz ts,
RepOriginId nodeid, int pageno);
static void TransactionIdSetCommitTs(TransactionId xid, TimestampTz ts,
RepOriginId nodeid, int slotno);
RepOriginId nodeid, int slotno);
static int ZeroCommitTsPage(int pageno, bool writeXlog);
static bool CommitTsPagePrecedes(int page1, int page2);
static void WriteZeroPageXlogRec(int pageno);
......@@ -141,8 +141,8 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
return;
/*
* Comply with the WAL-before-data rule: if caller specified it wants
* this value to be recorded in WAL, do so before touching the data.
* Comply with the WAL-before-data rule: if caller specified it wants this
* value to be recorded in WAL, do so before touching the data.
*/
if (do_xlog)
WriteSetTimestampXlogRec(xid, nsubxids, subxids, timestamp, nodeid);
......@@ -159,9 +159,9 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
/*
* We split the xids to set the timestamp to in groups belonging to the
* same SLRU page; the first element in each such set is its head. The
* first group has the main XID as the head; subsequent sets use the
* first subxid not on the previous page as head. This way, we only have
* to lock/modify each SLRU page once.
* first group has the main XID as the head; subsequent sets use the first
* subxid not on the previous page as head. This way, we only have to
* lock/modify each SLRU page once.
*/
for (i = 0, headxid = xid;;)
{
......@@ -183,8 +183,8 @@ TransactionTreeSetCommitTsData(TransactionId xid, int nsubxids,
break;
/*
* Set the new head and skip over it, as well as over the subxids
* we just wrote.
* Set the new head and skip over it, as well as over the subxids we
* just wrote.
*/
headxid = subxids[j];
i += j - i + 1;
......@@ -271,14 +271,14 @@ TransactionIdGetCommitTsData(TransactionId xid, TimestampTz *ts,
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not get commit timestamp data"),
errhint("Make sure the configuration parameter \"%s\" is set.",
"track_commit_timestamp")));
errhint("Make sure the configuration parameter \"%s\" is set.",
"track_commit_timestamp")));
/* error if the given Xid doesn't normally commit */
if (!TransactionIdIsNormal(xid))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("cannot retrieve commit timestamp for transaction %u", xid)));
errmsg("cannot retrieve commit timestamp for transaction %u", xid)));
/*
* Return empty if the requested value is outside our valid range.
......@@ -350,15 +350,15 @@ TransactionIdGetCommitTsData(TransactionId xid, TimestampTz *ts,
TransactionId
GetLatestCommitTsData(TimestampTz *ts, RepOriginId *nodeid)
{
TransactionId xid;
TransactionId xid;
/* Error if module not enabled */
if (!track_commit_timestamp)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("could not get commit timestamp data"),
errhint("Make sure the configuration parameter \"%s\" is set.",
"track_commit_timestamp")));
errhint("Make sure the configuration parameter \"%s\" is set.",
"track_commit_timestamp")));
LWLockAcquire(CommitTsLock, LW_SHARED);
xid = commitTsShared->xidLastCommit;
......@@ -377,9 +377,9 @@ GetLatestCommitTsData(TimestampTz *ts, RepOriginId *nodeid)
Datum
pg_xact_commit_timestamp(PG_FUNCTION_ARGS)
{
TransactionId xid = PG_GETARG_UINT32(0);
TimestampTz ts;
bool found;
TransactionId xid = PG_GETARG_UINT32(0);
TimestampTz ts;
bool found;
found = TransactionIdGetCommitTsData(xid, &ts, NULL);
......@@ -393,11 +393,11 @@ pg_xact_commit_timestamp(PG_FUNCTION_ARGS)
Datum
pg_last_committed_xact(PG_FUNCTION_ARGS)
{
TransactionId xid;
TimestampTz ts;
Datum values[2];
bool nulls[2];
TupleDesc tupdesc;
TransactionId xid;
TimestampTz ts;
Datum values[2];
bool nulls[2];
TupleDesc tupdesc;
HeapTuple htup;
/* and construct a tuple with our data */
......@@ -462,7 +462,7 @@ CommitTsShmemSize(void)
void
CommitTsShmemInit(void)
{
bool found;
bool found;
CommitTsCtl->PagePrecedes = CommitTsPagePrecedes;
SimpleLruInit(CommitTsCtl, "CommitTs Ctl", CommitTsShmemBuffers(), 0,
......@@ -495,8 +495,8 @@ BootStrapCommitTs(void)
{
/*
* Nothing to do here at present, unlike most other SLRU modules; segments
* are created when the server is started with this module enabled.
* See StartupCommitTs.
* are created when the server is started with this module enabled. See
* StartupCommitTs.
*/
}
......@@ -561,9 +561,9 @@ CompleteCommitTsInitialization(void)
/*
* Activate this module whenever necessary.
* This must happen during postmaster or standalong-backend startup,
* or during WAL replay anytime the track_commit_timestamp setting is
* changed in the master.
* This must happen during postmaster or standalong-backend startup,
* or during WAL replay anytime the track_commit_timestamp setting is
* changed in the master.
*
* The reason why this SLRU needs separate activation/deactivation functions is
* that it can be enabled/disabled during start and the activation/deactivation
......@@ -612,7 +612,7 @@ ActivateCommitTs(void)
/* Finally, create the current segment file, if necessary */
if (!SimpleLruDoesPhysicalPageExist(CommitTsCtl, pageno))
{
int slotno;
int slotno;
LWLockAcquire(CommitTsControlLock, LW_EXCLUSIVE);
slotno = ZeroCommitTsPage(pageno, false);
......@@ -834,7 +834,7 @@ WriteSetTimestampXlogRec(TransactionId mainxid, int nsubxids,
TransactionId *subxids, TimestampTz timestamp,
RepOriginId nodeid)
{
xl_commit_ts_set record;
xl_commit_ts_set record;
record.timestamp = timestamp;
record.nodeid = nodeid;
......@@ -907,7 +907,7 @@ commit_ts_redo(XLogReaderState *record)
subxids = NULL;
TransactionTreeSetCommitTsData(setts->mainxid, nsubxids, subxids,
setts->timestamp, setts->nodeid, false);
setts->timestamp, setts->nodeid, false);
if (subxids)
pfree(subxids);
}
......
......@@ -965,7 +965,7 @@ GetNewMultiXactId(int nmembers, MultiXactOffset *offset)
*/
if (!MultiXactIdPrecedes(result, MultiXactState->multiVacLimit) ||
(MultiXactState->nextOffset - MultiXactState->oldestOffset
> MULTIXACT_MEMBER_SAFE_THRESHOLD))
> MULTIXACT_MEMBER_SAFE_THRESHOLD))
{
/*
* For safety's sake, we release MultiXactGenLock while sending
......@@ -1190,9 +1190,9 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
MultiXactIdSetOldestVisible();
/*
* If we know the multi is used only for locking and not for updates,
* then we can skip checking if the value is older than our oldest
* visible multi. It cannot possibly still be running.
* If we know the multi is used only for locking and not for updates, then
* we can skip checking if the value is older than our oldest visible
* multi. It cannot possibly still be running.
*/
if (onlyLock &&
MultiXactIdPrecedes(multi, OldestVisibleMXactId[MyBackendId]))
......@@ -1207,14 +1207,14 @@ GetMultiXactIdMembers(MultiXactId multi, MultiXactMember **members,
*
* An ID older than MultiXactState->oldestMultiXactId cannot possibly be
* useful; it has already been removed, or will be removed shortly, by
* truncation. Returning the wrong values could lead
* to an incorrect visibility result. However, to support pg_upgrade we
* need to allow an empty set to be returned regardless, if the caller is
* willing to accept it; the caller is expected to check that it's an
* allowed condition (such as ensuring that the infomask bits set on the
* tuple are consistent with the pg_upgrade scenario). If the caller is
* expecting this to be called only on recently created multis, then we
* raise an error.
* truncation. Returning the wrong values could lead to an incorrect
* visibility result. However, to support pg_upgrade we need to allow an
* empty set to be returned regardless, if the caller is willing to accept
* it; the caller is expected to check that it's an allowed condition
* (such as ensuring that the infomask bits set on the tuple are
* consistent with the pg_upgrade scenario). If the caller is expecting
* this to be called only on recently created multis, then we raise an
* error.
*
* Conversely, an ID >= nextMXact shouldn't ever be seen here; if it is
* seen, it implies undetected ID wraparound has occurred. This raises a
......@@ -2123,11 +2123,11 @@ MultiXactSetNextMXact(MultiXactId nextMulti,
* enough to contain the next value that would be created.
*
* We need to do this pretty early during the first startup in binary
* upgrade mode: before StartupMultiXact() in fact, because this routine is
* called even before that by StartupXLOG(). And we can't do it earlier
* than at this point, because during that first call of this routine we
* determine the MultiXactState->nextMXact value that MaybeExtendOffsetSlru
* needs.
* upgrade mode: before StartupMultiXact() in fact, because this routine
* is called even before that by StartupXLOG(). And we can't do it
* earlier than at this point, because during that first call of this
* routine we determine the MultiXactState->nextMXact value that
* MaybeExtendOffsetSlru needs.
*/
if (IsBinaryUpgrade)
MaybeExtendOffsetSlru();
......@@ -2202,11 +2202,11 @@ SetMultiXactIdLimit(MultiXactId oldest_datminmxid, Oid oldest_datoid)
/*
* Determine the offset of the oldest multixact that might still be
* referenced. Normally, we can read the offset from the multixact itself,
* but there's an important special case: if there are no multixacts in
* existence at all, oldest_datminmxid obviously can't point to one. It
* will instead point to the multixact ID that will be assigned the next
* time one is needed.
* referenced. Normally, we can read the offset from the multixact
* itself, but there's an important special case: if there are no
* multixacts in existence at all, oldest_datminmxid obviously can't point
* to one. It will instead point to the multixact ID that will be
* assigned the next time one is needed.
*
* NB: oldest_dataminmxid is the oldest multixact that might still be
* referenced from a table, unlike in DetermineSafeOldestOffset, where we
......@@ -2520,10 +2520,9 @@ DetermineSafeOldestOffset(MultiXactId oldestMXact)
* obviously can't point to one. It will instead point to the multixact
* ID that will be assigned the next time one is needed.
*
* NB: oldestMXact should be the oldest multixact that still exists in
* the SLRU, unlike in SetMultiXactIdLimit, where we do this same
* computation based on the oldest value that might be referenced in a
* table.
* NB: oldestMXact should be the oldest multixact that still exists in the
* SLRU, unlike in SetMultiXactIdLimit, where we do this same computation
* based on the oldest value that might be referenced in a table.
*/
LWLockAcquire(MultiXactGenLock, LW_SHARED);
if (MultiXactState->nextMXact == oldestMXact)
......@@ -2679,9 +2678,9 @@ int
MultiXactMemberFreezeThreshold(void)
{
MultiXactOffset members;
uint32 multixacts;
uint32 victim_multixacts;
double fraction;
uint32 multixacts;
uint32 victim_multixacts;
double fraction;
ReadMultiXactCounts(&multixacts, &members);
......@@ -2800,7 +2799,7 @@ SlruScanDirCbFindEarliest(SlruCtl ctl, char *filename, int segpage, void *data)
void
TruncateMultiXact(void)
{
MultiXactId oldestMXact;
MultiXactId oldestMXact;
MultiXactOffset oldestOffset;
MultiXactOffset nextOffset;
mxtruncinfo trunc;
......
This diff is collapsed.
......@@ -117,7 +117,7 @@ typedef struct GlobalTransactionData
TimestampTz prepared_at; /* time of preparation */
XLogRecPtr prepare_lsn; /* XLOG offset of prepare record */
Oid owner; /* ID of user that executed the xact */
BackendId locking_backend; /* backend currently working on the xact */
BackendId locking_backend; /* backend currently working on the xact */
bool valid; /* TRUE if PGPROC entry is in proc array */
char gid[GIDSIZE]; /* The GID assigned to the prepared xact */
} GlobalTransactionData;
......@@ -256,24 +256,24 @@ AtAbort_Twophase(void)
return;
/*
* What to do with the locked global transaction entry? If we were in
* the process of preparing the transaction, but haven't written the WAL
* What to do with the locked global transaction entry? If we were in the
* process of preparing the transaction, but haven't written the WAL
* record and state file yet, the transaction must not be considered as
* prepared. Likewise, if we are in the process of finishing an
* already-prepared transaction, and fail after having already written
* the 2nd phase commit or rollback record to the WAL, the transaction
* should not be considered as prepared anymore. In those cases, just
* remove the entry from shared memory.
* already-prepared transaction, and fail after having already written the
* 2nd phase commit or rollback record to the WAL, the transaction should
* not be considered as prepared anymore. In those cases, just remove the
* entry from shared memory.
*
* Otherwise, the entry must be left in place so that the transaction
* can be finished later, so just unlock it.
* Otherwise, the entry must be left in place so that the transaction can
* be finished later, so just unlock it.
*
* If we abort during prepare, after having written the WAL record, we
* might not have transferred all locks and other state to the prepared
* transaction yet. Likewise, if we abort during commit or rollback,
* after having written the WAL record, we might not have released
* all the resources held by the transaction yet. In those cases, the
* in-memory state can be wrong, but it's too late to back out.
* after having written the WAL record, we might not have released all the
* resources held by the transaction yet. In those cases, the in-memory
* state can be wrong, but it's too late to back out.
*/
if (!MyLockedGxact->valid)
{
......@@ -408,8 +408,8 @@ MarkAsPreparing(TransactionId xid, const char *gid,
TwoPhaseState->prepXacts[TwoPhaseState->numPrepXacts++] = gxact;
/*
* Remember that we have this GlobalTransaction entry locked for us.
* If we abort after this, we must release it.
* Remember that we have this GlobalTransaction entry locked for us. If we
* abort after this, we must release it.
*/
MyLockedGxact = gxact;
......@@ -499,8 +499,8 @@ LockGXact(const char *gid, Oid user)
if (gxact->locking_backend != InvalidBackendId)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("prepared transaction with identifier \"%s\" is busy",
gid)));
errmsg("prepared transaction with identifier \"%s\" is busy",
gid)));
if (user != gxact->owner && !superuser_arg(user))
ereport(ERROR,
......@@ -1423,8 +1423,8 @@ FinishPreparedTransaction(const char *gid, bool isCommit)
/*
* In case we fail while running the callbacks, mark the gxact invalid so
* no one else will try to commit/rollback, and so it will be recycled
* if we fail after this point. It is still locked by our backend so it
* no one else will try to commit/rollback, and so it will be recycled if
* we fail after this point. It is still locked by our backend so it
* won't go away yet.
*
* (We assume it's safe to do this without taking TwoPhaseStateLock.)
......@@ -2055,8 +2055,9 @@ RecoverPreparedTransactions(void)
StandbyReleaseLockTree(xid, hdr->nsubxacts, subxids);
/*
* We're done with recovering this transaction. Clear MyLockedGxact,
* like we do in PrepareTransaction() during normal operation.
* We're done with recovering this transaction. Clear
* MyLockedGxact, like we do in PrepareTransaction() during normal
* operation.
*/
PostPrepare_Twophase();
......
This diff is collapsed.
This diff is collapsed.
......@@ -33,7 +33,7 @@
#include "pg_trace.h"
/* Buffer size required to store a compressed version of backup block image */
#define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(BLCKSZ)
#define PGLZ_MAX_BLCKSZ PGLZ_MAX_OUTPUT(BLCKSZ)
/*
* For each block reference registered with XLogRegisterBuffer, we fill in
......@@ -58,7 +58,7 @@ typedef struct
/* buffer to store a compressed version of backup block image */
char compressed_page[PGLZ_MAX_BLCKSZ];
} registered_buffer;
} registered_buffer;
static registered_buffer *registered_buffers;
static int max_registered_buffers; /* allocated size */
......@@ -110,7 +110,7 @@ static XLogRecData *XLogRecordAssemble(RmgrId rmid, uint8 info,
XLogRecPtr RedoRecPtr, bool doPageWrites,
XLogRecPtr *fpw_lsn);
static bool XLogCompressBackupBlock(char *page, uint16 hole_offset,
uint16 hole_length, char *dest, uint16 *dlen);
uint16 hole_length, char *dest, uint16 *dlen);
/*
* Begin constructing a WAL record. This must be called before the
......@@ -602,7 +602,10 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
&compressed_len);
}
/* Fill in the remaining fields in the XLogRecordBlockHeader struct */
/*
* Fill in the remaining fields in the XLogRecordBlockHeader
* struct
*/
bkpb.fork_flags |= BKPBLOCK_HAS_IMAGE;
/*
......@@ -762,7 +765,7 @@ XLogRecordAssemble(RmgrId rmid, uint8 info,
* the length of compressed block image.
*/
static bool
XLogCompressBackupBlock(char * page, uint16 hole_offset, uint16 hole_length,
XLogCompressBackupBlock(char *page, uint16 hole_offset, uint16 hole_length,
char *dest, uint16 *dlen)
{
int32 orig_len = BLCKSZ - hole_length;
......@@ -790,16 +793,15 @@ XLogCompressBackupBlock(char * page, uint16 hole_offset, uint16 hole_length,
source = page;
/*
* We recheck the actual size even if pglz_compress() reports success
* and see if the number of bytes saved by compression is larger than
* the length of extra data needed for the compressed version of block
* image.
* We recheck the actual size even if pglz_compress() reports success and
* see if the number of bytes saved by compression is larger than the
* length of extra data needed for the compressed version of block image.
*/
len = pglz_compress(source, orig_len, dest, PGLZ_strategy_default);
if (len >= 0 &&
len + extra_bytes < orig_len)
{
*dlen = (uint16) len; /* successful compression */
*dlen = (uint16) len; /* successful compression */
return true;
}
return false;
......
This diff is collapsed.
......@@ -401,6 +401,7 @@ AuxiliaryProcessMain(int argc, char *argv[])
proc_exit(1); /* should never return */
case BootstrapProcess:
/*
* There was a brief instant during which mode was Normal; this is
* okay. We need to be in bootstrap mode during BootStrapXLOG for
......
......@@ -189,7 +189,8 @@ sub Catalogs
}
else
{
die "unknown column option $attopt on column $attname"
die
"unknown column option $attopt on column $attname";
}
}
push @{ $catalog{columns} }, \%row;
......
This diff is collapsed.
......@@ -213,8 +213,8 @@ deleteObjectsInList(ObjectAddresses *targetObjects, Relation *depRel,
{
const ObjectAddress *thisobj = &targetObjects->refs[i];
const ObjectAddressExtra *extra = &targetObjects->extras[i];
bool original = false;
bool normal = false;
bool original = false;
bool normal = false;
if (extra->flags & DEPFLAG_ORIGINAL)
original = true;
......@@ -1611,10 +1611,10 @@ find_expr_references_walker(Node *node,
context->addrs);
break;
/*
* Dependencies for regrole should be shared among all
* databases, so explicitly inhibit to have dependencies.
*/
/*
* Dependencies for regrole should be shared among all
* databases, so explicitly inhibit to have dependencies.
*/
case REGROLEOID:
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment