nodeHash.c 18 KB
Newer Older
1 2
/*-------------------------------------------------------------------------
 *
3
 * nodeHash.c
4
 *	  Routines to hash relations for hashjoin
5
 *
Bruce Momjian's avatar
Bruce Momjian committed
6
 * Portions Copyright (c) 1996-2002, PostgreSQL Global Development Group
Bruce Momjian's avatar
Add:  
Bruce Momjian committed
7
 * Portions Copyright (c) 1994, Regents of the University of California
8 9
 *
 *
10
 * IDENTIFICATION
11
 *	  $Header: /cvsroot/pgsql/src/backend/executor/nodeHash.c,v 1.77 2003/07/21 17:05:09 tgl Exp $
12 13 14 15 16
 *
 *-------------------------------------------------------------------------
 */
/*
 * INTERFACE ROUTINES
17
 *		ExecHash		- generate an in-memory hash table of the relation
18
 *		ExecInitHash	- initialize node and subnodes
19
 *		ExecEndHash		- shutdown node and subnodes
20
 */
21
#include "postgres.h"
22

23
#include "executor/execdebug.h"
24 25
#include "executor/nodeHash.h"
#include "executor/nodeHashjoin.h"
26
#include "miscadmin.h"
27
#include "parser/parse_expr.h"
28
#include "utils/memutils.h"
29
#include "utils/lsyscache.h"
30

31 32

/* ----------------------------------------------------------------
33
 *		ExecHash
34
 *
35 36
 *		build hash table for hashjoin, all do partitioning if more
 *		than one batches are required.
37 38 39
 * ----------------------------------------------------------------
 */
TupleTableSlot *
40
ExecHash(HashState *node)
41
{
42
	EState	   *estate;
43
	PlanState  *outerNode;
44
	List	   *hashkeys;
45
	HashJoinTable hashtable;
46
	TupleTableSlot *slot;
47 48 49
	ExprContext *econtext;
	int			nbatch;
	int			i;
50

51 52
	/*
	 * get state info from node
53
	 */
54 55
	estate = node->ps.state;
	outerNode = outerPlanState(node);
56

57
	hashtable = node->hashtable;
58 59 60
	nbatch = hashtable->nbatch;

	if (nbatch > 0)
61
	{
62 63 64
		/*
		 * Open temp files for inner batches, if needed. Note that file
		 * buffers are palloc'd in regular executor context.
65 66
		 */
		for (i = 0; i < nbatch; i++)
67
			hashtable->innerBatchFile[i] = BufFileCreateTemp(false);
68 69
	}

70 71
	/*
	 * set expression context
72
	 */
73
	hashkeys = node->hashkeys;
74
	econtext = node->ps.ps_ExprContext;
75

76 77
	/*
	 * get all inner tuples and insert into the hash table (or temp files)
78 79 80
	 */
	for (;;)
	{
81
		slot = ExecProcNode(outerNode);
82 83 84
		if (TupIsNull(slot))
			break;
		econtext->ecxt_innertuple = slot;
85
		ExecHashTableInsert(hashtable, econtext, hashkeys);
86 87 88
		ExecClearTuple(slot);
	}

89 90 91
	/*
	 * Return the slot so that we have the tuple descriptor when we need
	 * to save/restore them.  -Jeff 11 July 1991
92 93
	 */
	return slot;
94 95 96
}

/* ----------------------------------------------------------------
97
 *		ExecInitHash
98
 *
99
 *		Init routine for Hash node
100 101
 * ----------------------------------------------------------------
 */
102 103
HashState *
ExecInitHash(Hash *node, EState *estate)
104
{
105
	HashState  *hashstate;
106

107
	SO_printf("ExecInitHash: initializing hash node\n");
108

109
	/*
110 111 112
	 * create state structure
	 */
	hashstate = makeNode(HashState);
113 114
	hashstate->ps.plan = (Plan *) node;
	hashstate->ps.state = estate;
115
	hashstate->hashtable = NULL;
116

117 118
	/*
	 * Miscellaneous initialization
119
	 *
120
	 * create expression context for node
121
	 */
122
	ExecAssignExprContext(estate, &hashstate->ps);
123

124 125
#define HASH_NSLOTS 1

126
	/*
127 128
	 * initialize our result slot
	 */
129
	ExecInitResultTupleSlot(estate, &hashstate->ps);
130

131
	/*
132
	 * initialize child expressions
133
	 */
134
	hashstate->ps.targetlist = (List *)
135
		ExecInitExpr((Expr *) node->plan.targetlist,
136 137
					 (PlanState *) hashstate);
	hashstate->ps.qual = (List *)
138
		ExecInitExpr((Expr *) node->plan.qual,
139 140 141 142 143 144
					 (PlanState *) hashstate);

	/*
	 * initialize child nodes
	 */
	outerPlanState(hashstate) = ExecInitNode(outerPlan(node), estate);
145

146 147 148
	/*
	 * initialize tuple type. no need to initialize projection info
	 * because this node doesn't do projections
149
	 */
150 151
	ExecAssignResultTypeFromOuterPlan(&hashstate->ps);
	hashstate->ps.ps_ProjInfo = NULL;
152

153
	return hashstate;
154 155 156
}

int
157
ExecCountSlotsHash(Hash *node)
158
{
159
	return ExecCountSlotsNode(outerPlan(node)) +
160 161
		ExecCountSlotsNode(innerPlan(node)) +
		HASH_NSLOTS;
162 163 164
}

/* ---------------------------------------------------------------
165
 *		ExecEndHash
166
 *
167
 *		clean up routine for Hash node
168 169 170
 * ----------------------------------------------------------------
 */
void
171
ExecEndHash(HashState *node)
172
{
173
	PlanState  *outerPlan;
174

175
	/*
176
	 * free exprcontext
177
	 */
178
	ExecFreeExprContext(&node->ps);
179

180 181
	/*
	 * shut down the subplan
182
	 */
183 184
	outerPlan = outerPlanState(node);
	ExecEndNode(outerPlan);
185 186
}

187

188
/* ----------------------------------------------------------------
189
 *		ExecHashTableCreate
190
 *
191
 *		create a hashtable in shared memory for hashjoin.
192 193 194
 * ----------------------------------------------------------------
 */
HashJoinTable
195
ExecHashTableCreate(Hash *node, List *hashOperators)
196
{
197
	HashJoinTable hashtable;
198
	Plan	   *outerNode;
199
	int			totalbuckets;
200 201
	int			nbuckets;
	int			nbatch;
202
	int			nkeys;
203
	int			i;
204
	List	   *ho;
Bruce Momjian's avatar
Bruce Momjian committed
205
	MemoryContext oldcxt;
206

207 208 209
	/*
	 * Get information about the size of the relation to be hashed (it's
	 * the "outer" subtree of this node, but the inner relation of the
210
	 * hashjoin).  Compute the appropriate size of the hash table.
211 212
	 */
	outerNode = outerPlan(node);
213

214 215
	ExecChooseHashTableSize(outerNode->plan_rows, outerNode->plan_width,
							&totalbuckets, &nbuckets, &nbatch);
Bruce Momjian's avatar
Bruce Momjian committed
216

217
#ifdef HJDEBUG
Bruce Momjian's avatar
Bruce Momjian committed
218 219
	printf("nbatch = %d, totalbuckets = %d, nbuckets = %d\n",
		   nbatch, totalbuckets, nbuckets);
220
#endif
221

222 223 224 225 226
	/*
	 * Initialize the hash table control block.
	 *
	 * The hashtable control block is just palloc'd from the executor's
	 * per-query memory context.
227
	 */
228
	hashtable = (HashJoinTable) palloc(sizeof(HashTableData));
229 230
	hashtable->nbuckets = nbuckets;
	hashtable->totalbuckets = totalbuckets;
231
	hashtable->buckets = NULL;
232 233
	hashtable->nbatch = nbatch;
	hashtable->curbatch = 0;
234 235 236 237 238
	hashtable->innerBatchFile = NULL;
	hashtable->outerBatchFile = NULL;
	hashtable->innerBatchSize = NULL;
	hashtable->outerBatchSize = NULL;

239
	/*
240
	 * Get info about the hash functions to be used for each hash key.
241
	 */
242 243
	nkeys = length(hashOperators);
	hashtable->hashfunctions = (FmgrInfo *) palloc(nkeys * sizeof(FmgrInfo));
244
	i = 0;
245
	foreach(ho, hashOperators)
246
	{
247 248 249 250
		Oid		hashfn;

		hashfn = get_op_hash_function(lfirsto(ho));
		if (!OidIsValid(hashfn))
251
			elog(ERROR, "could not find hash function for hash operator %u",
252 253
				 lfirsto(ho));
		fmgr_info(hashfn, &hashtable->hashfunctions[i]);
254 255
		i++;
	}
256

257 258 259
	/*
	 * Create temporary memory contexts in which to keep the hashtable
	 * working storage.  See notes in executor/hashjoin.h.
260
	 */
261
	hashtable->hashCxt = AllocSetContextCreate(CurrentMemoryContext,
262 263 264 265 266 267 268 269 270 271
											   "HashTableContext",
											   ALLOCSET_DEFAULT_MINSIZE,
											   ALLOCSET_DEFAULT_INITSIZE,
											   ALLOCSET_DEFAULT_MAXSIZE);

	hashtable->batchCxt = AllocSetContextCreate(hashtable->hashCxt,
												"HashBatchContext",
												ALLOCSET_DEFAULT_MINSIZE,
												ALLOCSET_DEFAULT_INITSIZE,
												ALLOCSET_DEFAULT_MAXSIZE);
272 273 274 275 276

	/* Allocate data that will live for the life of the hashjoin */

	oldcxt = MemoryContextSwitchTo(hashtable->hashCxt);

277 278
	if (nbatch > 0)
	{
279 280
		/*
		 * allocate and initialize the file arrays in hashCxt
281
		 */
282
		hashtable->innerBatchFile = (BufFile **)
283
			palloc0(nbatch * sizeof(BufFile *));
284
		hashtable->outerBatchFile = (BufFile **)
285
			palloc0(nbatch * sizeof(BufFile *));
286
		hashtable->innerBatchSize = (long *)
287
			palloc0(nbatch * sizeof(long));
288
		hashtable->outerBatchSize = (long *)
289
			palloc0(nbatch * sizeof(long));
290
		/* The files will not be opened until later... */
291
	}
292

Bruce Momjian's avatar
Bruce Momjian committed
293
	/*
294
	 * Prepare context for the first-scan space allocations; allocate the
Bruce Momjian's avatar
Bruce Momjian committed
295
	 * hashbucket array therein, and set each bucket "empty".
296
	 */
297 298 299
	MemoryContextSwitchTo(hashtable->batchCxt);

	hashtable->buckets = (HashJoinTuple *)
300
		palloc0(nbuckets * sizeof(HashJoinTuple));
301 302 303

	MemoryContextSwitchTo(oldcxt);

304
	return hashtable;
305 306
}

307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330

/*
 * Compute appropriate size for hashtable given the estimated size of the
 * relation to be hashed (number of rows and average row width).
 *
 * Caution: the input is only the planner's estimates, and so can't be
 * trusted too far.  Apply a healthy fudge factor.
 *
 * This is exported so that the planner's costsize.c can use it.
 */

/* Target bucket loading (tuples per bucket) */
#define NTUP_PER_BUCKET			10
/* Fudge factor to allow for inaccuracy of input estimates */
#define FUDGE_FAC				2.0

void
ExecChooseHashTableSize(double ntuples, int tupwidth,
						int *virtualbuckets,
						int *physicalbuckets,
						int *numbatches)
{
	int			tupsize;
	double		inner_rel_bytes;
331 332
	long		hash_table_bytes;
	double		dtmp;
333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349
	int			nbatch;
	int			nbuckets;
	int			totalbuckets;
	int			bucketsize;

	/* Force a plausible relation size if no info */
	if (ntuples <= 0.0)
		ntuples = 1000.0;

	/*
	 * Estimate tupsize based on footprint of tuple in hashtable... but
	 * what about palloc overhead?
	 */
	tupsize = MAXALIGN(tupwidth) + MAXALIGN(sizeof(HashJoinTupleData));
	inner_rel_bytes = ntuples * tupsize * FUDGE_FAC;

	/*
350
	 * Target in-memory hashtable size is SortMem kilobytes.
351
	 */
352
	hash_table_bytes = SortMem * 1024L;
353 354 355 356

	/*
	 * Count the number of hash buckets we want for the whole relation,
	 * for an average bucket load of NTUP_PER_BUCKET (per virtual
357
	 * bucket!).  It has to fit in an int, however.
358
	 */
359 360 361 362 363 364 365
	dtmp = ceil(ntuples * FUDGE_FAC / NTUP_PER_BUCKET);
	if (dtmp < INT_MAX)
		totalbuckets = (int) dtmp;
	else
		totalbuckets = INT_MAX;
	if (totalbuckets <= 0)
		totalbuckets = 1;
366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398

	/*
	 * Count the number of buckets we think will actually fit in the
	 * target memory size, at a loading of NTUP_PER_BUCKET (physical
	 * buckets). NOTE: FUDGE_FAC here determines the fraction of the
	 * hashtable space reserved to allow for nonuniform distribution of
	 * hash values. Perhaps this should be a different number from the
	 * other uses of FUDGE_FAC, but since we have no real good way to pick
	 * either one...
	 */
	bucketsize = NTUP_PER_BUCKET * tupsize;
	nbuckets = (int) (hash_table_bytes / (bucketsize * FUDGE_FAC));
	if (nbuckets <= 0)
		nbuckets = 1;

	if (totalbuckets <= nbuckets)
	{
		/*
		 * We have enough space, so no batching.  In theory we could even
		 * reduce nbuckets, but since that could lead to poor behavior if
		 * estimated ntuples is much less than reality, it seems better to
		 * make more buckets instead of fewer.
		 */
		totalbuckets = nbuckets;
		nbatch = 0;
	}
	else
	{
		/*
		 * Need to batch; compute how many batches we want to use. Note
		 * that nbatch doesn't have to have anything to do with the ratio
		 * totalbuckets/nbuckets; in fact, it is the number of groups we
		 * will use for the part of the data that doesn't fall into the
399
		 * first nbuckets hash buckets.  We try to set it to make all the
400
		 * batches the same size.
401
		 */
402 403
		dtmp = ceil((inner_rel_bytes - hash_table_bytes) /
					hash_table_bytes);
404
		if (dtmp < INT_MAX)
405 406
			nbatch = (int) dtmp;
		else
407
			nbatch = INT_MAX;
408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425
		if (nbatch <= 0)
			nbatch = 1;
	}

	/*
	 * Now, totalbuckets is the number of (virtual) hashbuckets for the
	 * whole relation, and nbuckets is the number of physical hashbuckets
	 * we will use in the first pass.  Data falling into the first
	 * nbuckets virtual hashbuckets gets handled in the first pass;
	 * everything else gets divided into nbatch batches to be processed in
	 * additional passes.
	 */
	*virtualbuckets = totalbuckets;
	*physicalbuckets = nbuckets;
	*numbatches = nbatch;
}


426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445
/* ----------------------------------------------------------------
 *		ExecHashTableDestroy
 *
 *		destroy a hash table
 * ----------------------------------------------------------------
 */
void
ExecHashTableDestroy(HashJoinTable hashtable)
{
	int			i;

	/* Make sure all the temp files are closed */
	for (i = 0; i < hashtable->nbatch; i++)
	{
		if (hashtable->innerBatchFile[i])
			BufFileClose(hashtable->innerBatchFile[i]);
		if (hashtable->outerBatchFile[i])
			BufFileClose(hashtable->outerBatchFile[i]);
	}

446 447
	/* Release working memory (batchCxt is a child, so it goes away too) */
	MemoryContextDelete(hashtable->hashCxt);
448 449 450 451 452

	/* And drop the control block */
	pfree(hashtable);
}

453
/* ----------------------------------------------------------------
454
 *		ExecHashTableInsert
455
 *
456 457
 *		insert a tuple into the hash table depending on the hash value
 *		it may just go to a tmp file for other batches
458 459 460 461
 * ----------------------------------------------------------------
 */
void
ExecHashTableInsert(HashJoinTable hashtable,
462
					ExprContext *econtext,
463
					List *hashkeys)
464
{
465
	int			bucketno = ExecHashGetBucket(hashtable, econtext, hashkeys);
466
	int			batchno = ExecHashGetBatch(bucketno, hashtable);
467 468
	TupleTableSlot *slot = econtext->ecxt_innertuple;
	HeapTuple	heapTuple = slot->val;
469

470 471
	/*
	 * decide whether to put the tuple in the hash table or a tmp file
472
	 */
473
	if (batchno < 0)
474
	{
475 476
		/*
		 * put the tuple in hash table
477
		 */
Bruce Momjian's avatar
Bruce Momjian committed
478 479
		HashJoinTuple hashTuple;
		int			hashTupleSize;
480 481 482 483

		hashTupleSize = MAXALIGN(sizeof(*hashTuple)) + heapTuple->t_len;
		hashTuple = (HashJoinTuple) MemoryContextAlloc(hashtable->batchCxt,
													   hashTupleSize);
Bruce Momjian's avatar
Bruce Momjian committed
484
		memcpy((char *) &hashTuple->htup,
485 486
			   (char *) heapTuple,
			   sizeof(hashTuple->htup));
487
		hashTuple->htup.t_datamcxt = hashtable->batchCxt;
488 489 490 491 492 493 494
		hashTuple->htup.t_data = (HeapTupleHeader)
			(((char *) hashTuple) + MAXALIGN(sizeof(*hashTuple)));
		memcpy((char *) hashTuple->htup.t_data,
			   (char *) heapTuple->t_data,
			   heapTuple->t_len);
		hashTuple->next = hashtable->buckets[bucketno];
		hashtable->buckets[bucketno] = hashTuple;
495 496 497
	}
	else
	{
498
		/*
499
		 * put the tuple into a tmp file for later batches
500
		 */
501 502 503
		hashtable->innerBatchSize[batchno]++;
		ExecHashJoinSaveTuple(heapTuple,
							  hashtable->innerBatchFile[batchno]);
504 505 506 507
	}
}

/* ----------------------------------------------------------------
508
 *		ExecHashGetBucket
509
 *
510
 *		Get the hash value for a tuple
511 512 513 514
 * ----------------------------------------------------------------
 */
int
ExecHashGetBucket(HashJoinTable hashtable,
515
				  ExprContext *econtext,
516
				  List *hashkeys)
517
{
518
	uint32		hashkey = 0;
519
	int			bucketno;
520 521
	List	   *hk;
	int			i = 0;
522
	MemoryContext oldContext;
523

524
	/*
525
	 * We reset the eval context each time to reclaim any memory leaked in
526
	 * the hashkey expressions.
527
	 */
528
	ResetExprContext(econtext);
529

530
	oldContext = MemoryContextSwitchTo(econtext->ecxt_per_tuple_memory);
531

532
	foreach(hk, hashkeys)
533
	{
534 535 536 537 538 539 540 541 542
		Datum		keyval;
		bool		isNull;

		/* rotate hashkey left 1 bit at each step */
		hashkey = (hashkey << 1) | ((hashkey & 0x80000000) ? 1 : 0);

		/*
		 * Get the join attribute value of the tuple
		 */
543 544
		keyval = ExecEvalExpr((ExprState *) lfirst(hk),
							  econtext, &isNull, NULL);
545 546 547 548 549 550

		/*
		 * Compute the hash function
		 */
		if (!isNull)			/* treat nulls as having hash key 0 */
		{
551 552 553 554 555
			uint32		hkey;

			hkey = DatumGetUInt32(FunctionCall1(&hashtable->hashfunctions[i],
												keyval));
			hashkey ^= hkey;
556 557 558
		}

		i++;
559
	}
560

561 562
	bucketno = hashkey % (uint32) hashtable->totalbuckets;

563
#ifdef HJDEBUG
564
	if (bucketno >= hashtable->nbuckets)
565
		printf("hash(%u) = %d SAVED\n", hashkey, bucketno);
566
	else
567
		printf("hash(%u) = %d\n", hashkey, bucketno);
568
#endif
569

570 571
	MemoryContextSwitchTo(oldContext);

572
	return bucketno;
573 574
}

575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592
/* ----------------------------------------------------------------
 *		ExecHashGetBatch
 *
 *		determine the batch number for a bucketno
 *
 * Returns -1 if bucket belongs to initial (or current) batch,
 * else 0..nbatch-1 corresponding to external batch file number for bucket.
 * ----------------------------------------------------------------
 */
int
ExecHashGetBatch(int bucketno, HashJoinTable hashtable)
{
	if (bucketno < hashtable->nbuckets)
		return -1;

	return (bucketno - hashtable->nbuckets) % hashtable->nbatch;
}

593
/* ----------------------------------------------------------------
594
 *		ExecScanHashBucket
595
 *
596
 *		scan a hash bucket of matches
597 598 599
 * ----------------------------------------------------------------
 */
HeapTuple
600 601 602
ExecScanHashBucket(HashJoinState *hjstate,
				   List *hjclauses,
				   ExprContext *econtext)
603
{
Bruce Momjian's avatar
Bruce Momjian committed
604 605
	HashJoinTable hashtable = hjstate->hj_HashTable;
	HashJoinTuple hashTuple = hjstate->hj_CurTuple;
606

Bruce Momjian's avatar
Bruce Momjian committed
607 608
	/*
	 * hj_CurTuple is NULL to start scanning a new bucket, or the address
609
	 * of the last tuple returned from the current bucket.
610
	 */
611 612 613 614
	if (hashTuple == NULL)
		hashTuple = hashtable->buckets[hjstate->hj_CurBucketNo];
	else
		hashTuple = hashTuple->next;
615

616
	while (hashTuple != NULL)
617
	{
Bruce Momjian's avatar
Bruce Momjian committed
618
		HeapTuple	heapTuple = &hashTuple->htup;
619
		TupleTableSlot *inntuple;
620

621
		/* insert hashtable's tuple into exec slot so ExecQual sees it */
622 623
		inntuple = ExecStoreTuple(heapTuple,	/* tuple to store */
								  hjstate->hj_HashTupleSlot,	/* slot */
624
								  InvalidBuffer,
625 626
								  false);		/* do not pfree this tuple */
		econtext->ecxt_innertuple = inntuple;
627

628 629 630
		/* reset temp memory each time to avoid leaks from qual expression */
		ResetExprContext(econtext);

631
		if (ExecQual(hjclauses, econtext, false))
632
		{
633
			hjstate->hj_CurTuple = hashTuple;
634 635 636
			return heapTuple;
		}

637
		hashTuple = hashTuple->next;
638
	}
639

640 641
	/*
	 * no match
642 643
	 */
	return NULL;
644 645 646
}

/* ----------------------------------------------------------------
647
 *		ExecHashTableReset
648
 *
649
 *		reset hash table header for new batch
650 651
 *
 *		ntuples is the number of tuples in the inner relation's batch
652
 *		(which we currently don't actually use...)
653 654 655
 * ----------------------------------------------------------------
 */
void
656
ExecHashTableReset(HashJoinTable hashtable, long ntuples)
657
{
Bruce Momjian's avatar
Bruce Momjian committed
658
	MemoryContext oldcxt;
659
	int			nbuckets = hashtable->nbuckets;
660

661
	/*
662
	 * Release all the hash buckets and tuples acquired in the prior pass,
663
	 * and reinitialize the context for a new pass.
664
	 */
665
	MemoryContextReset(hashtable->batchCxt);
666
	oldcxt = MemoryContextSwitchTo(hashtable->batchCxt);
667

668
	/*
Bruce Momjian's avatar
Bruce Momjian committed
669 670 671 672 673 674 675 676
	 * We still use the same number of physical buckets as in the first
	 * pass. (It could be different; but we already decided how many
	 * buckets would be appropriate for the allowed memory, so stick with
	 * that number.) We MUST set totalbuckets to equal nbuckets, because
	 * from now on no tuples will go out to temp files; there are no more
	 * virtual buckets, only real buckets.	(This implies that tuples will
	 * go into different bucket numbers than they did on the first pass,
	 * but that's OK.)
677
	 */
678 679 680 681
	hashtable->totalbuckets = nbuckets;

	/* Reallocate and reinitialize the hash bucket headers. */
	hashtable->buckets = (HashJoinTuple *)
682
		palloc0(nbuckets * sizeof(HashJoinTuple));
683

684
	MemoryContextSwitchTo(oldcxt);
685 686
}

Vadim B. Mikheev's avatar
Vadim B. Mikheev committed
687
void
688
ExecReScanHash(HashState *node, ExprContext *exprCtxt)
Vadim B. Mikheev's avatar
Vadim B. Mikheev committed
689
{
690 691 692 693
	/*
	 * if chgParam of subnode is not null then plan will be re-scanned by
	 * first ExecProcNode.
	 */
694 695
	if (((PlanState *) node)->lefttree->chgParam == NULL)
		ExecReScan(((PlanState *) node)->lefttree, exprCtxt);
Vadim B. Mikheev's avatar
Vadim B. Mikheev committed
696
}