Commit f92d6a54 authored by Heikki Linnakangas's avatar Heikki Linnakangas

Use appendStringInfoString/Char et al where appropriate.

Patch by David Rowley. Backpatch to 9.5, as some of the calls were new in
9.5, and keeping the code in sync with master makes future backpatching
easier.
parent 7931622d
......@@ -2738,7 +2738,7 @@ postgresImportForeignSchema(ImportForeignSchemaStmt *stmt, Oid serverOid)
}
/* Append ORDER BY at the end of query to ensure output ordering */
appendStringInfo(&buf, " ORDER BY c.relname, a.attnum");
appendStringInfoString(&buf, " ORDER BY c.relname, a.attnum");
/* Fetch the data */
res = PQexec(conn, buf.data);
......
......@@ -113,7 +113,7 @@ gin_desc(StringInfo buf, XLogReaderState *record)
(ginxlogRecompressDataLeaf *) payload;
if (XLogRecHasBlockImage(record, 0))
appendStringInfo(buf, " (full page image)");
appendStringInfoString(buf, " (full page image)");
else
desc_recompress_leaf(buf, insertData);
}
......@@ -147,7 +147,7 @@ gin_desc(StringInfo buf, XLogReaderState *record)
ginxlogVacuumDataLeafPage *xlrec = (ginxlogVacuumDataLeafPage *) rec;
if (XLogRecHasBlockImage(record, 0))
appendStringInfo(buf, " (full page image)");
appendStringInfoString(buf, " (full page image)");
else
desc_recompress_leaf(buf, &xlrec->data);
}
......
......@@ -30,14 +30,14 @@ spg_desc(StringInfo buf, XLogReaderState *record)
{
spgxlogAddLeaf *xlrec = (spgxlogAddLeaf *) rec;
appendStringInfo(buf, "add leaf to page");
appendStringInfoString(buf, "add leaf to page");
appendStringInfo(buf, "; off %u; headoff %u; parentoff %u",
xlrec->offnumLeaf, xlrec->offnumHeadLeaf,
xlrec->offnumParent);
if (xlrec->newPage)
appendStringInfo(buf, " (newpage)");
appendStringInfoString(buf, " (newpage)");
if (xlrec->storesNulls)
appendStringInfo(buf, " (nulls)");
appendStringInfoString(buf, " (nulls)");
}
break;
case XLOG_SPGIST_MOVE_LEAFS:
......@@ -63,9 +63,9 @@ spg_desc(StringInfo buf, XLogReaderState *record)
appendStringInfo(buf, "ndel %u; nins %u",
xlrec->nDelete, xlrec->nInsert);
if (xlrec->innerIsParent)
appendStringInfo(buf, " (innerIsParent)");
appendStringInfoString(buf, " (innerIsParent)");
if (xlrec->isRootSplit)
appendStringInfo(buf, " (isRootSplit)");
appendStringInfoString(buf, " (isRootSplit)");
}
break;
case XLOG_SPGIST_VACUUM_LEAF:
......
......@@ -232,7 +232,7 @@ xact_desc_commit(StringInfo buf, uint8 info, xl_xact_commit *xlrec, RepOriginId
}
if (XactCompletionForceSyncCommit(parsed.xinfo))
appendStringInfo(buf, "; sync");
appendStringInfoString(buf, "; sync");
if (parsed.xinfo & XACT_XINFO_HAS_ORIGIN)
{
......
......@@ -1097,7 +1097,7 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn)
if (!debug_reader)
{
appendStringInfo(&buf, "error decoding record: out of memory");
appendStringInfoString(&buf, "error decoding record: out of memory");
}
else if (!DecodeXLogRecord(debug_reader, (XLogRecord *) recordBuf.data,
&errormsg))
......@@ -9528,7 +9528,7 @@ xlog_outrec(StringInfo buf, XLogReaderState *record)
rnode.spcNode, rnode.dbNode, rnode.relNode,
blk);
if (XLogRecHasBlockImage(record, block_id))
appendStringInfo(buf, " FPW");
appendStringInfoString(buf, " FPW");
}
}
#endif /* WAL_DEBUG */
......
......@@ -306,7 +306,7 @@ pairingheap_dump_recurse(StringInfo buf,
appendStringInfoSpaces(buf, depth * 4);
dumpfunc(node, buf, opaque);
appendStringInfoString(buf, "\n");
appendStringInfoChar(buf, '\n');
if (node->first_child)
pairingheap_dump_recurse(buf, node->first_child, dumpfunc, opaque, depth + 1, node);
prev_or_parent = node;
......
......@@ -5487,7 +5487,7 @@ get_insert_query_def(Query *query, deparse_context *context)
{
OnConflictExpr *confl = query->onConflict;
appendStringInfo(buf, " ON CONFLICT");
appendStringInfoString(buf, " ON CONFLICT");
if (confl->arbiterElems)
{
......
......@@ -2473,7 +2473,7 @@ query_to_xml_internal(const char *query, char *tablename,
{
xmldata_root_element_start(result, xmltn, xmlschema,
targetns, top_level);
appendStringInfoString(result, "\n");
appendStringInfoChar(result, '\n');
}
if (xmlschema)
......@@ -2637,7 +2637,7 @@ schema_to_xml_internal(Oid nspid, const char *xmlschema, bool nulls,
result = makeStringInfo();
xmldata_root_element_start(result, xmlsn, xmlschema, targetns, top_level);
appendStringInfoString(result, "\n");
appendStringInfoChar(result, '\n');
if (xmlschema)
appendStringInfo(result, "%s\n\n", xmlschema);
......@@ -2815,7 +2815,7 @@ database_to_xml_internal(const char *xmlschema, bool nulls,
result = makeStringInfo();
xmldata_root_element_start(result, xmlcn, xmlschema, targetns, true);
appendStringInfoString(result, "\n");
appendStringInfoChar(result, '\n');
if (xmlschema)
appendStringInfo(result, "%s\n\n", xmlschema);
......
......@@ -1516,7 +1516,7 @@ GenerateRecoveryConf(PGconn *conn)
/* Separate key-value pairs with spaces */
if (conninfo_buf.len != 0)
appendPQExpBufferStr(&conninfo_buf, " ");
appendPQExpBufferChar(&conninfo_buf, ' ');
/*
* Write "keyword=value" pieces, the value string is escaped and/or
......
......@@ -533,7 +533,7 @@ RestoreArchive(Archive *AHX)
* search for hardcoded "DROP CONSTRAINT" instead.
*/
if (strcmp(te->desc, "DEFAULT") == 0)
appendPQExpBuffer(ftStmt, "%s", dropStmt);
appendPQExpBufferStr(ftStmt, dropStmt);
else
{
if (strcmp(te->desc, "CONSTRAINT") == 0 ||
......
......@@ -1659,7 +1659,7 @@ dumpTableData_insert(Archive *fout, DumpOptions *dopt, void *dcontext)
/* append the list of column names if required */
if (dopt->column_inserts)
{
appendPQExpBufferStr(insertStmt, "(");
appendPQExpBufferChar(insertStmt, '(');
for (field = 0; field < nfields; field++)
{
if (field > 0)
......@@ -11332,7 +11332,7 @@ dumpOpclass(Archive *fout, DumpOptions *dopt, OpclassInfo *opcinfo)
appendPQExpBufferStr(q, " FAMILY ");
if (strcmp(opcfamilynsp, opcinfo->dobj.namespace->dobj.name) != 0)
appendPQExpBuffer(q, "%s.", fmtId(opcfamilynsp));
appendPQExpBuffer(q, "%s", fmtId(opcfamilyname));
appendPQExpBufferStr(q, fmtId(opcfamilyname));
}
appendPQExpBufferStr(q, " AS\n ");
......@@ -13844,7 +13844,7 @@ dumpTableSchema(Archive *fout, DumpOptions *dopt, TableInfo *tbinfo)
if (actual_atts == 0)
appendPQExpBufferStr(q, " (");
else
appendPQExpBufferStr(q, ",");
appendPQExpBufferChar(q, ',');
appendPQExpBufferStr(q, "\n ");
actual_atts++;
......
......@@ -1611,7 +1611,7 @@ describeOneTableDetails(const char *schemaname,
if (!PQgetisnull(res, i, 5))
{
if (tmpbuf.len > 0)
appendPQExpBufferStr(&tmpbuf, " ");
appendPQExpBufferChar(&tmpbuf, ' ');
appendPQExpBuffer(&tmpbuf, _("collate %s"),
PQgetvalue(res, i, 5));
}
......@@ -1619,7 +1619,7 @@ describeOneTableDetails(const char *schemaname,
if (strcmp(PQgetvalue(res, i, 3), "t") == 0)
{
if (tmpbuf.len > 0)
appendPQExpBufferStr(&tmpbuf, " ");
appendPQExpBufferChar(&tmpbuf, ' ');
appendPQExpBufferStr(&tmpbuf, _("not null"));
}
......@@ -1628,7 +1628,7 @@ describeOneTableDetails(const char *schemaname,
if (strlen(PQgetvalue(res, i, 2)) != 0)
{
if (tmpbuf.len > 0)
appendPQExpBufferStr(&tmpbuf, " ");
appendPQExpBufferChar(&tmpbuf, ' ');
/* translator: default values of column definitions */
appendPQExpBuffer(&tmpbuf, _("default %s"),
PQgetvalue(res, i, 2));
......@@ -2440,7 +2440,7 @@ describeOneTableDetails(const char *schemaname,
printfPQExpBuffer(&buf, "%*s %s",
sw, "", PQgetvalue(result, i, 0));
if (i < tuples - 1)
appendPQExpBufferStr(&buf, ",");
appendPQExpBufferChar(&buf, ',');
printTableAddFooter(&cont, buf.data);
}
......
......@@ -201,7 +201,7 @@ cluster_one_database(const char *dbname, bool verbose, const char *table,
appendPQExpBufferStr(&sql, " VERBOSE");
if (table)
appendPQExpBuffer(&sql, " %s", table);
appendPQExpBufferStr(&sql, ";");
appendPQExpBufferChar(&sql, ';');
conn = connectDatabase(dbname, host, port, username, prompt_password,
progname, false);
......
......@@ -195,7 +195,7 @@ main(int argc, char *argv[])
if (lc_ctype)
appendPQExpBuffer(&sql, " LC_CTYPE '%s'", lc_ctype);
appendPQExpBufferStr(&sql, ";");
appendPQExpBufferChar(&sql, ';');
/* No point in trying to use postgres db when creating postgres db. */
if (maintenance_db == NULL && strcmp(dbname, "postgres") == 0)
......@@ -222,7 +222,7 @@ main(int argc, char *argv[])
{
printfPQExpBuffer(&sql, "COMMENT ON DATABASE %s IS ", fmtId(dbname));
appendStringLiteralConn(&sql, comment, conn);
appendPQExpBufferStr(&sql, ";");
appendPQExpBufferChar(&sql, ';');
if (echo)
printf("%s\n", sql.data);
......
......@@ -321,7 +321,7 @@ main(int argc, char *argv[])
appendPQExpBuffer(&sql, "%s", fmtId(cell->val));
}
}
appendPQExpBufferStr(&sql, ";");
appendPQExpBufferChar(&sql, ';');
if (echo)
printf("%s\n", sql.data);
......
......@@ -295,7 +295,7 @@ reindex_one_database(const char *name, const char *dbname, const char *type,
appendPQExpBuffer(&sql, " SCHEMA %s", name);
else if (strcmp(type, "DATABASE") == 0)
appendPQExpBuffer(&sql, " DATABASE %s", fmtId(name));
appendPQExpBufferStr(&sql, ";");
appendPQExpBufferChar(&sql, ';');
conn = connectDatabase(dbname, host, port, username, prompt_password,
progname, false);
......
......@@ -392,7 +392,7 @@ vacuum_one_database(const char *dbname, vacuumingOptions *vacopts,
ntups = PQntuples(res);
for (i = 0; i < ntups; i++)
{
appendPQExpBuffer(&buf, "%s",
appendPQExpBufferStr(&buf,
fmtQualifiedId(PQserverVersion(conn),
PQgetvalue(res, i, 1),
PQgetvalue(res, i, 0)));
......@@ -643,7 +643,7 @@ prepare_vacuum_command(PQExpBuffer sql, PGconn *conn, vacuumingOptions *vacopts,
sep = comma;
}
if (sep != paren)
appendPQExpBufferStr(sql, ")");
appendPQExpBufferChar(sql, ')');
}
else
{
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment