Commit 0b11a674 authored by Tom Lane's avatar Tom Lane

Fix a boatload of typos in C comments.

Justin Pryzby

Discussion: https://postgr.es/m/20180331105640.GK28454@telsasoft.com
parent 686d399f
...@@ -163,7 +163,7 @@ gen_tabs(void) ...@@ -163,7 +163,7 @@ gen_tabs(void)
q; q;
/* log and power tables for GF(2**8) finite field with */ /* log and power tables for GF(2**8) finite field with */
/* 0x11b as modular polynomial - the simplest prmitive */ /* 0x11b as modular polynomial - the simplest primitive */
/* root is 0x11, used here to generate the tables */ /* root is 0x11, used here to generate the tables */
for (i = 0, p = 1; i < 256; ++i) for (i = 0, p = 1; i < 256; ++i)
......
...@@ -60,7 +60,7 @@ InitializeSession(void) ...@@ -60,7 +60,7 @@ InitializeSession(void)
* Initialize the per-session DSM segment if it isn't already initialized, and * Initialize the per-session DSM segment if it isn't already initialized, and
* return its handle so that worker processes can attach to it. * return its handle so that worker processes can attach to it.
* *
* Unlike the per-context DSM segment, this segement and its contents are * Unlike the per-context DSM segment, this segment and its contents are
* reused for future parallel queries. * reused for future parallel queries.
* *
* Return DSM_HANDLE_INVALID if a segment can't be allocated due to lack of * Return DSM_HANDLE_INVALID if a segment can't be allocated due to lack of
......
...@@ -187,9 +187,9 @@ top: ...@@ -187,9 +187,9 @@ top:
_bt_relbuf(rel, buf); _bt_relbuf(rel, buf);
/* /*
* Something did not workout. Just forget about the cached * Something did not work out. Just forget about the cached
* block and follow the normal path. It might be set again if * block and follow the normal path. It might be set again if
* the conditions are favourble. * the conditions are favourable.
*/ */
RelationSetTargetBlock(rel, InvalidBlockNumber); RelationSetTargetBlock(rel, InvalidBlockNumber);
} }
......
...@@ -409,7 +409,7 @@ ExecSetExecProcNode(PlanState *node, ExecProcNodeMtd function) ...@@ -409,7 +409,7 @@ ExecSetExecProcNode(PlanState *node, ExecProcNodeMtd function)
* Add a wrapper around the ExecProcNode callback that checks stack depth * Add a wrapper around the ExecProcNode callback that checks stack depth
* during the first execution and maybe adds an instrumentation * during the first execution and maybe adds an instrumentation
* wrapper. When the callback is changed after execution has already begun * wrapper. When the callback is changed after execution has already begun
* that means we'll superflously execute ExecProcNodeFirst, but that seems * that means we'll superfluously execute ExecProcNodeFirst, but that seems
* ok. * ok.
*/ */
node->ExecProcNodeReal = function; node->ExecProcNodeReal = function;
......
...@@ -1768,7 +1768,7 @@ llvm_compile_expr(ExprState *state) ...@@ -1768,7 +1768,7 @@ llvm_compile_expr(ExprState *state)
b_compare_result, b_compare_result,
b_null); b_null);
/* build block analying the !NULL comparator result */ /* build block analyzing the !NULL comparator result */
LLVMPositionBuilderAtEnd(b, b_compare_result); LLVMPositionBuilderAtEnd(b, b_compare_result);
/* if results equal, compare next, otherwise done */ /* if results equal, compare next, otherwise done */
......
...@@ -92,7 +92,7 @@ print_gen(FILE *fp, Pool *pool, int generation) ...@@ -92,7 +92,7 @@ print_gen(FILE *fp, Pool *pool, int generation)
{ {
int lowest; int lowest;
/* Get index to lowest ranking gene in poplulation. */ /* Get index to lowest ranking gene in population. */
/* Use 2nd to last since last is buffer. */ /* Use 2nd to last since last is buffer. */
lowest = pool->size > 1 ? pool->size - 2 : 0; lowest = pool->size > 1 ? pool->size - 2 : 0;
......
...@@ -6709,7 +6709,7 @@ create_partial_grouping_paths(PlannerInfo *root, ...@@ -6709,7 +6709,7 @@ create_partial_grouping_paths(PlannerInfo *root,
* Gather Merge. * Gather Merge.
* *
* NB: This function shouldn't be used for anything other than a grouped or * NB: This function shouldn't be used for anything other than a grouped or
* partially grouped relation not only because of the fact that it explcitly * partially grouped relation not only because of the fact that it explicitly
* references group_pathkeys but we pass "true" as the third argument to * references group_pathkeys but we pass "true" as the third argument to
* generate_gather_paths(). * generate_gather_paths().
*/ */
...@@ -6841,7 +6841,7 @@ apply_scanjoin_target_to_paths(PlannerInfo *root, ...@@ -6841,7 +6841,7 @@ apply_scanjoin_target_to_paths(PlannerInfo *root,
*/ */
rel->reltarget = llast_node(PathTarget, scanjoin_targets); rel->reltarget = llast_node(PathTarget, scanjoin_targets);
/* Special case: handly dummy relations separately. */ /* Special case: handle dummy relations separately. */
if (is_dummy_rel) if (is_dummy_rel)
{ {
/* /*
......
...@@ -710,7 +710,7 @@ adjustJoinTreeList(Query *parsetree, bool removert, int rt_index) ...@@ -710,7 +710,7 @@ adjustJoinTreeList(Query *parsetree, bool removert, int rt_index)
* using the parent relation as reference. It must not do anything that * using the parent relation as reference. It must not do anything that
* will not be correct when transposed to the child relation(s). (Step 4 * will not be correct when transposed to the child relation(s). (Step 4
* is incorrect by this light, since child relations might have different * is incorrect by this light, since child relations might have different
* colun ordering, but the planner will fix things by re-sorting the tlist * column ordering, but the planner will fix things by re-sorting the tlist
* for each child.) * for each child.)
*/ */
static List * static List *
......
...@@ -374,7 +374,7 @@ on_shmem_exit(pg_on_exit_callback function, Datum arg) ...@@ -374,7 +374,7 @@ on_shmem_exit(pg_on_exit_callback function, Datum arg)
/* ---------------------------------------------------------------- /* ----------------------------------------------------------------
* cancel_before_shmem_exit * cancel_before_shmem_exit
* *
* this function removes a previously-registed before_shmem_exit * this function removes a previously-registered before_shmem_exit
* callback. For simplicity, only the latest entry can be * callback. For simplicity, only the latest entry can be
* removed. (We could work harder but there is no need for * removed. (We could work harder but there is no need for
* current uses.) * current uses.)
......
...@@ -1172,7 +1172,7 @@ get_object_field_end(void *state, char *fname, bool isnull) ...@@ -1172,7 +1172,7 @@ get_object_field_end(void *state, char *fname, bool isnull)
if (get_last && _state->result_start != NULL) if (get_last && _state->result_start != NULL)
{ {
/* /*
* make a text object from the string from the prevously noted json * make a text object from the string from the previously noted json
* start up to the end of the previous token (the lexer is by now * start up to the end of the previous token (the lexer is by now
* ahead of us on whatever came after what we're interested in). * ahead of us on whatever came after what we're interested in).
*/ */
......
...@@ -912,7 +912,7 @@ ascii(PG_FUNCTION_ARGS) ...@@ -912,7 +912,7 @@ ascii(PG_FUNCTION_ARGS)
* *
* Returns the character having the binary equivalent to val. * Returns the character having the binary equivalent to val.
* *
* For UTF8 we treat the argumwent as a Unicode code point. * For UTF8 we treat the argument as a Unicode code point.
* For other multi-byte encodings we raise an error for arguments * For other multi-byte encodings we raise an error for arguments
* outside the strict ASCII range (1..127). * outside the strict ASCII range (1..127).
* *
......
...@@ -649,7 +649,7 @@ dsa_pin_mapping(dsa_area *area) ...@@ -649,7 +649,7 @@ dsa_pin_mapping(dsa_area *area)
* will result in an ERROR. * will result in an ERROR.
* *
* DSA_ALLOC_NO_OOM causes this function to return InvalidDsaPointer when * DSA_ALLOC_NO_OOM causes this function to return InvalidDsaPointer when
* no memory is available or a size limit establed by set_dsa_size_limit * no memory is available or a size limit established by set_dsa_size_limit
* would be exceeded. Otherwise, such allocations will result in an ERROR. * would be exceeded. Otherwise, such allocations will result in an ERROR.
* *
* DSA_ALLOC_ZERO causes the allocated memory to be zeroed. Otherwise, the * DSA_ALLOC_ZERO causes the allocated memory to be zeroed. Otherwise, the
......
...@@ -386,7 +386,7 @@ sts_puttuple(SharedTuplestoreAccessor *accessor, void *meta_data, ...@@ -386,7 +386,7 @@ sts_puttuple(SharedTuplestoreAccessor *accessor, void *meta_data,
sts_flush_chunk(accessor); sts_flush_chunk(accessor);
/* /*
* How many oveflow chunks to go? This will allow readers to * How many overflow chunks to go? This will allow readers to
* skip all of them at once instead of reading each one. * skip all of them at once instead of reading each one.
*/ */
accessor->write_chunk->overflow = (size + STS_CHUNK_DATA_SIZE - 1) / accessor->write_chunk->overflow = (size + STS_CHUNK_DATA_SIZE - 1) /
......
...@@ -121,7 +121,7 @@ sendFeedback(PGconn *conn, TimestampTz now, bool force, bool replyRequested) ...@@ -121,7 +121,7 @@ sendFeedback(PGconn *conn, TimestampTz now, bool force, bool replyRequested)
int len = 0; int len = 0;
/* /*
* we normally don't want to send superfluous feedbacks, but if it's * we normally don't want to send superfluous feedback, but if it's
* because of a timeout we need to, otherwise wal_sender_timeout will kill * because of a timeout we need to, otherwise wal_sender_timeout will kill
* us. * us.
*/ */
......
...@@ -811,7 +811,7 @@ main(int argc, char **argv) ...@@ -811,7 +811,7 @@ main(int argc, char **argv)
/* /*
* In binary-upgrade mode, we do not have to worry about the actual blob * In binary-upgrade mode, we do not have to worry about the actual blob
* data or the associated metadata that resides in the pg_largeobject and * data or the associated metadata that resides in the pg_largeobject and
* pg_largeobject_metadata tables, respectivly. * pg_largeobject_metadata tables, respectively.
* *
* However, we do need to collect blob information as there may be * However, we do need to collect blob information as there may be
* comments or other information on blobs that we do need to dump out. * comments or other information on blobs that we do need to dump out.
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
* too much time if the crosstab to generate happens to be unreasonably large * too much time if the crosstab to generate happens to be unreasonably large
* (worst case: a NxN cartesian product with N=number of tuples). * (worst case: a NxN cartesian product with N=number of tuples).
* The value of 1600 corresponds to the maximum columns per table in storage, * The value of 1600 corresponds to the maximum columns per table in storage,
* but it could be as much as INT_MAX theorically. * but it could be as much as INT_MAX theoretically.
*/ */
#define CROSSTABVIEW_MAX_COLUMNS 1600 #define CROSSTABVIEW_MAX_COLUMNS 1600
......
...@@ -239,7 +239,7 @@ typedef HashScanOpaqueData *HashScanOpaque; ...@@ -239,7 +239,7 @@ typedef HashScanOpaqueData *HashScanOpaque;
#define HASH_SPLITPOINT_PHASE_MASK (HASH_SPLITPOINT_PHASES_PER_GRP - 1) #define HASH_SPLITPOINT_PHASE_MASK (HASH_SPLITPOINT_PHASES_PER_GRP - 1)
#define HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE 10 #define HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE 10
/* defines max number of splitpoit phases a hash index can have */ /* defines max number of splitpoint phases a hash index can have */
#define HASH_MAX_SPLITPOINT_GROUP 32 #define HASH_MAX_SPLITPOINT_GROUP 32
#define HASH_MAX_SPLITPOINTS \ #define HASH_MAX_SPLITPOINTS \
(((HASH_MAX_SPLITPOINT_GROUP - HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE) * \ (((HASH_MAX_SPLITPOINT_GROUP - HASH_SPLITPOINT_GROUPS_WITH_ONE_PHASE) * \
......
...@@ -190,7 +190,7 @@ typedef struct ParallelHashJoinBatch ...@@ -190,7 +190,7 @@ typedef struct ParallelHashJoinBatch
/* /*
* Each backend requires a small amount of per-batch state to interact with * Each backend requires a small amount of per-batch state to interact with
* each ParalellHashJoinBatch. * each ParallelHashJoinBatch.
*/ */
typedef struct ParallelHashJoinBatchAccessor typedef struct ParallelHashJoinBatchAccessor
{ {
...@@ -201,7 +201,7 @@ typedef struct ParallelHashJoinBatchAccessor ...@@ -201,7 +201,7 @@ typedef struct ParallelHashJoinBatchAccessor
size_t ntuples; /* number of tuples */ size_t ntuples; /* number of tuples */
size_t size; /* size of partition in memory */ size_t size; /* size of partition in memory */
size_t estimated_size; /* size of partition on disk */ size_t estimated_size; /* size of partition on disk */
size_t old_ntuples; /* how many tuples before repartioning? */ size_t old_ntuples; /* how many tuples before repartitioning? */
bool at_least_one_chunk; /* has this backend allocated a chunk? */ bool at_least_one_chunk; /* has this backend allocated a chunk? */
bool done; /* flag to remember that a batch is done */ bool done; /* flag to remember that a batch is done */
......
...@@ -104,7 +104,7 @@ typedef struct AggStatePerTransData ...@@ -104,7 +104,7 @@ typedef struct AggStatePerTransData
/* /*
* Comparators for input columns --- only set/used when aggregate has * Comparators for input columns --- only set/used when aggregate has
* DISTINCT flag. equalfnOne version is used for single-column * DISTINCT flag. equalfnOne version is used for single-column
* commparisons, equalfnMulti for the case of multiple columns. * comparisons, equalfnMulti for the case of multiple columns.
*/ */
FmgrInfo equalfnOne; FmgrInfo equalfnOne;
ExprState *equalfnMulti; ExprState *equalfnMulti;
......
...@@ -47,7 +47,7 @@ struct TableFuncScanState; ...@@ -47,7 +47,7 @@ struct TableFuncScanState;
* *
* DestroyBuilder shall release all resources associated with a table builder * DestroyBuilder shall release all resources associated with a table builder
* context. It may be called either because all rows have been consumed, or * context. It may be called either because all rows have been consumed, or
* because an error ocurred while processing the table expression. * because an error occurred while processing the table expression.
*/ */
typedef struct TableFuncRoutine typedef struct TableFuncRoutine
{ {
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#include "utils/resowner.h" #include "utils/resowner.h"
/* Flags deterimining what kind of JIT operations to perform */ /* Flags determining what kind of JIT operations to perform */
#define PGJIT_NONE 0 #define PGJIT_NONE 0
#define PGJIT_PERFORM 1 << 0 #define PGJIT_PERFORM 1 << 0
#define PGJIT_OPT3 1 << 1 #define PGJIT_OPT3 1 << 1
......
...@@ -107,7 +107,7 @@ extern void llvm_inline(LLVMModuleRef mod); ...@@ -107,7 +107,7 @@ extern void llvm_inline(LLVMModuleRef mod);
/* /*
**************************************************************************** ****************************************************************************
* Code ceneration functions. * Code generation functions.
**************************************************************************** ****************************************************************************
*/ */
extern bool llvm_compile_expr(struct ExprState *state); extern bool llvm_compile_expr(struct ExprState *state);
......
...@@ -42,7 +42,7 @@ typedef dshash_hash (*dshash_hash_function) (const void *v, size_t size, ...@@ -42,7 +42,7 @@ typedef dshash_hash (*dshash_hash_function) (const void *v, size_t size,
* Compare and hash functions must be supplied even when attaching, because we * Compare and hash functions must be supplied even when attaching, because we
* can't safely share function pointers between backends in general. Either * can't safely share function pointers between backends in general. Either
* the arg variants or the non-arg variants should be supplied; the other * the arg variants or the non-arg variants should be supplied; the other
* function pointers should be NULL. If the arg varants are supplied then the * function pointers should be NULL. If the arg variants are supplied then the
* user data pointer supplied to the create and attach functions will be * user data pointer supplied to the create and attach functions will be
* passed to the hash and compare functions. * passed to the hash and compare functions.
*/ */
......
...@@ -410,7 +410,7 @@ extern const pg_wchar_tbl pg_wchar_table[]; ...@@ -410,7 +410,7 @@ extern const pg_wchar_tbl pg_wchar_table[];
* points to a lookup table for the second byte. And so on. * points to a lookup table for the second byte. And so on.
* *
* Physically, all the trees are stored in one big array, in 'chars16' or * Physically, all the trees are stored in one big array, in 'chars16' or
* 'chars32', depending on the maximum value that needs to be reprented. For * 'chars32', depending on the maximum value that needs to be represented. For
* each level in each tree, we also store lower and upper bound of allowed * each level in each tree, we also store lower and upper bound of allowed
* values - values outside those bounds are considered invalid, and are left * values - values outside those bounds are considered invalid, and are left
* out of the tables. * out of the tables.
......
...@@ -1444,7 +1444,7 @@ typedef JoinPath NestPath; ...@@ -1444,7 +1444,7 @@ typedef JoinPath NestPath;
* that the executor need find only one match per outer tuple, and that the * that the executor need find only one match per outer tuple, and that the
* mergeclauses are sufficient to identify a match. In such cases the * mergeclauses are sufficient to identify a match. In such cases the
* executor can immediately advance the outer relation after processing a * executor can immediately advance the outer relation after processing a
* match, and therefoere it need never back up the inner relation. * match, and therefore it need never back up the inner relation.
* *
* materialize_inner is true if a Material node should be placed atop the * materialize_inner is true if a Material node should be placed atop the
* inner input. This may appear with or without an inner Sort step. * inner input. This may appear with or without an inner Sort step.
......
...@@ -347,7 +347,7 @@ extern int isinf(double x); ...@@ -347,7 +347,7 @@ extern int isinf(double x);
/* /*
* Glibc doesn't use the builtin for clang due to a *gcc* bug in a version * Glibc doesn't use the builtin for clang due to a *gcc* bug in a version
* newer than the gcc compatibility clang claims to have. This would cause a * newer than the gcc compatibility clang claims to have. This would cause a
* *lot* of superflous function calls, therefore revert when using clang. * *lot* of superfluous function calls, therefore revert when using clang.
*/ */
#ifdef __clang__ #ifdef __clang__
/* needs to be separate to not confuse other compilers */ /* needs to be separate to not confuse other compilers */
......
...@@ -471,7 +471,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno, ...@@ -471,7 +471,7 @@ ecpg_get_data(const PGresult *results, int act_tuple, int act_field, int lineno,
memcpy(str, pval, size); memcpy(str, pval, size);
str[varcharsize-1] = '\0'; str[varcharsize-1] = '\0';
/* compatiblity mode empty string gets -1 indicator but no warning */ /* compatibility mode empty string gets -1 indicator but no warning */
if (size == 0) { if (size == 0) {
/* truncation */ /* truncation */
switch (ind_type) switch (ind_type)
......
...@@ -316,7 +316,7 @@ DecodeISO8601Interval(char *str, ...@@ -316,7 +316,7 @@ DecodeISO8601Interval(char *str,
* places where DecodeTime is called; and added * places where DecodeTime is called; and added
* int range = INTERVAL_FULL_RANGE; * int range = INTERVAL_FULL_RANGE;
* *
* * ECPG semes not to have a global IntervalStyle * * ECPG seems not to have a global IntervalStyle
* so added * so added
* int IntervalStyle = INTSTYLE_POSTGRES; * int IntervalStyle = INTSTYLE_POSTGRES;
*/ */
......
...@@ -84,7 +84,7 @@ PLyUnicode_Bytes(PyObject *unicode) ...@@ -84,7 +84,7 @@ PLyUnicode_Bytes(PyObject *unicode)
* function. The result is palloc'ed. * function. The result is palloc'ed.
* *
* Note that this function is disguised as PyString_AsString() when * Note that this function is disguised as PyString_AsString() when
* using Python 3. That function retuns a pointer into the internal * using Python 3. That function returns a pointer into the internal
* memory of the argument, which isn't exactly the interface of this * memory of the argument, which isn't exactly the interface of this
* function. But in either case you get a rather short-lived * function. But in either case you get a rather short-lived
* reference that you ought to better leave alone. * reference that you ought to better leave alone.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment