s_lock.h 21.4 KB
Newer Older
1 2
/*-------------------------------------------------------------------------
 *
3
 * s_lock.h
4
 *	   Hardware-dependent implementation of spinlocks.
5
 *
6 7
 *	NOTE: none of the macros in this file are intended to be called directly.
 *	Call them through the hardware-independent macros in spin.h.
8
 *
9 10
 *	The following hardware-dependent macros must be provided for each
 *	supported platform:
11 12 13 14 15 16 17 18
 *
 *	void S_INIT_LOCK(slock_t *lock)
 *		Initialize a spinlock (to the unlocked state).
 *
 *	void S_LOCK(slock_t *lock)
 *		Acquire a spinlock, waiting if necessary.
 *		Time out and abort() if unable to acquire the lock in a
 *		"reasonable" amount of time --- typically ~ 1 minute.
19
 *
20 21
 *	void S_UNLOCK(slock_t *lock)
 *		Unlock a previously acquired lock.
22
 *
23 24 25
 *	bool S_LOCK_FREE(slock_t *lock)
 *		Tests if the lock is free. Returns TRUE if free, FALSE if locked.
 *		This does *not* change the state of the lock.
26
 *
27 28 29
 *	void SPIN_DELAY(void)
 *		Delay operation to occur inside spinlock wait loop.
 *
30 31 32 33 34 35 36
 *	Note to implementors: there are default implementations for all these
 *	macros at the bottom of the file.  Check if your platform can use
 *	these or needs to override them.
 *
 *  Usually, S_LOCK() is implemented in terms of an even lower-level macro
 *	TAS():
 *
37 38
 *	int TAS(slock_t *lock)
 *		Atomic test-and-set instruction.  Attempt to acquire the lock,
Bruce Momjian's avatar
Bruce Momjian committed
39
 *		but do *not* wait.	Returns 0 if successful, nonzero if unable
40
 *		to acquire the lock.
41
 *
42
 *	TAS() is NOT part of the API, and should never be called directly.
43
 *
44 45 46 47
 *	CAUTION: on some platforms TAS() may sometimes report failure to acquire
 *	a lock even when the lock is not locked.  For example, on Alpha TAS()
 *	will "fail" if interrupted.  Therefore TAS() should always be invoked
 *	in a retry loop, even if you are certain the lock is free.
48
 *
49 50 51 52 53
 *	ANOTHER CAUTION: be sure that TAS() and S_UNLOCK() represent sequence
 *	points, ie, loads and stores of other values must not be moved across
 *	a lock or unlock.  In most cases it suffices to make the operation be
 *	done through a "volatile" pointer.
 *
54 55 56
 *	On most supported platforms, TAS() uses a tas() function written
 *	in assembly language to execute a hardware atomic-test-and-set
 *	instruction.  Equivalent OS-supplied mutex routines could be used too.
57
 *
58
 *	If no system-specific TAS() is available (ie, HAVE_SPINLOCKS is not
59 60
 *	defined), then we fall back on an emulation that uses SysV semaphores
 *	(see spin.c).  This emulation will be MUCH MUCH slower than a proper TAS()
61 62 63
 *	implementation, because of the cost of a kernel call per lock or unlock.
 *	An old report is that Postgres spends around 40% of its time in semop(2)
 *	when using the SysV semaphore code.
64
 *
65
 *
66
 * Portions Copyright (c) 1996-2007, PostgreSQL Global Development Group
67 68
 * Portions Copyright (c) 1994, Regents of the University of California
 *
69
 *	  $PostgreSQL: pgsql/src/include/storage/s_lock.h,v 1.163 2007/08/05 15:11:40 tgl Exp $
70 71
 *
 *-------------------------------------------------------------------------
72
 */
73
#ifndef S_LOCK_H
74 75
#define S_LOCK_H

76
#include "storage/pg_sema.h"
77

78
#ifdef HAVE_SPINLOCKS	/* skip spinlocks if requested */
Vadim B. Mikheev's avatar
Vadim B. Mikheev committed
79

80

81
#if defined(__GNUC__) || defined(__INTEL_COMPILER)
82 83
/*************************************************************************
 * All the gcc inlines
84 85
 * Gcc consistently defines the CPU as __cpu__.
 * Other compilers use __cpu or __cpu__ so we test for both in those cases.
86 87
 */

88 89 90
/*----------
 * Standard gcc asm format (assuming "volatile slock_t *lock"):

91
	__asm__ __volatile__(
92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107
		"	instruction	\n"
		"	instruction	\n"
		"	instruction	\n"
:		"=r"(_res), "+m"(*lock)		// return register, in/out lock value
:		"r"(lock)					// lock pointer, in input register
:		"memory", "cc");			// show clobbered registers here

 * The output-operands list (after first colon) should always include
 * "+m"(*lock), whether or not the asm code actually refers to this
 * operand directly.  This ensures that gcc believes the value in the
 * lock variable is used and set by the asm code.  Also, the clobbers
 * list (after third colon) should always include "memory"; this prevents
 * gcc from thinking it can cache the values of shared-memory fields
 * across the asm code.  Add "cc" if your asm code changes the condition
 * code register, and also list any temp registers the code uses.
 *----------
Bruce Momjian's avatar
Bruce Momjian committed
108 109
 */

110

Bruce Momjian's avatar
Bruce Momjian committed
111
#ifdef __i386__		/* 32-bit i386 */
112 113
#define HAS_TEST_AND_SET

114 115
typedef unsigned char slock_t;

116 117 118 119 120
#define TAS(lock) tas(lock)

static __inline__ int
tas(volatile slock_t *lock)
{
121
	register slock_t _res = 1;
122

123 124 125 126 127
	/*
	 * Use a non-locking test before asserting the bus lock.  Note that the
	 * extra test appears to be a small loss on some x86 platforms and a small
	 * win on others; it's by no means clear that we should keep it.
	 */
128
	__asm__ __volatile__(
129 130
		"	cmpb	$0,%1	\n"
		"	jne		1f		\n"
131 132
		"	lock			\n"
		"	xchgb	%0,%1	\n"
133
		"1: \n"
134 135 136
:		"+q"(_res), "+m"(*lock)
:
:		"memory", "cc");
137 138
	return (int) _res;
}
139

140 141 142 143 144
#define SPIN_DELAY() spin_delay()

static __inline__ void
spin_delay(void)
{
145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167
	/*
	 * This sequence is equivalent to the PAUSE instruction ("rep" is
	 * ignored by old IA32 processors if the following instruction is
	 * not a string operation); the IA-32 Architecture Software
	 * Developer's Manual, Vol. 3, Section 7.7.2 describes why using
	 * PAUSE in the inner loop of a spin lock is necessary for good
	 * performance:
	 *
	 *     The PAUSE instruction improves the performance of IA-32
	 *     processors supporting Hyper-Threading Technology when
	 *     executing spin-wait loops and other routines where one
	 *     thread is accessing a shared lock or semaphore in a tight
	 *     polling loop. When executing a spin-wait loop, the
	 *     processor can suffer a severe performance penalty when
	 *     exiting the loop because it detects a possible memory order
	 *     violation and flushes the core processor's pipeline. The
	 *     PAUSE instruction provides a hint to the processor that the
	 *     code sequence is a spin-wait loop. The processor uses this
	 *     hint to avoid the memory order violation and prevent the
	 *     pipeline flush. In addition, the PAUSE instruction
	 *     de-pipelines the spin-wait loop to prevent it from
	 *     consuming execution resources excessively.
	 */
168
	__asm__ __volatile__(
169
		" rep; nop			\n");
170 171
}

172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214
#endif	 /* __i386__ */


#ifdef __x86_64__		/* AMD Opteron, Intel EM64T */
#define HAS_TEST_AND_SET

typedef unsigned char slock_t;

#define TAS(lock) tas(lock)

static __inline__ int
tas(volatile slock_t *lock)
{
	register slock_t _res = 1;

	/*
	 * On Opteron, using a non-locking test before the locking instruction
	 * is a huge loss.  On EM64T, it appears to be a wash or small loss,
	 * so we needn't bother to try to distinguish the sub-architectures.
	 */
	__asm__ __volatile__(
		"	lock			\n"
		"	xchgb	%0,%1	\n"
:		"+q"(_res), "+m"(*lock)
:
:		"memory", "cc");
	return (int) _res;
}

#define SPIN_DELAY() spin_delay()

static __inline__ void
spin_delay(void)
{
	/*
	 * Adding a PAUSE in the spin delay loop is demonstrably a no-op on
	 * Opteron, but it may be of some use on EM64T, so we keep it.
	 */
	__asm__ __volatile__(
		" rep; nop			\n");
}

#endif	 /* __x86_64__ */
215 216


Bruce Momjian's avatar
Bruce Momjian committed
217
#if defined(__ia64__) || defined(__ia64)	/* Intel Itanium */
218 219
#define HAS_TEST_AND_SET

220 221
typedef unsigned int slock_t;

222 223
#define TAS(lock) tas(lock)

224 225
#ifndef __INTEL_COMPILER

226
static __inline__ int
Bruce Momjian's avatar
Bruce Momjian committed
227
tas(volatile slock_t *lock)
228
{
Bruce Momjian's avatar
Bruce Momjian committed
229
	long int	ret;
230

231
	__asm__ __volatile__(
232
		"	xchg4 	%0=%1,%2	\n"
233 234
:		"=r"(ret), "+m"(*lock)
:		"r"(1)
235
:		"memory");
Bruce Momjian's avatar
Bruce Momjian committed
236
	return (int) ret;
237
}
Bruce Momjian's avatar
Bruce Momjian committed
238

239
#else /* __INTEL_COMPILER */
240 241 242 243 244 245 246 247 248 249 250

static __inline__ int
tas(volatile slock_t *lock)
{
	int		ret;

	ret = _InterlockedExchange(lock,1);	/* this is a xchg asm macro */

	return ret;
}

251
#endif /* __INTEL_COMPILER */
252
#endif	 /* __ia64__ || __ia64 */
253

254

255
#if defined(__arm__) || defined(__arm)
256 257
#define HAS_TEST_AND_SET

258 259
typedef unsigned char slock_t;

260 261 262 263 264
#define TAS(lock) tas(lock)

static __inline__ int
tas(volatile slock_t *lock)
{
Bruce Momjian's avatar
Bruce Momjian committed
265
	register slock_t _res = 1;
266

267
	__asm__ __volatile__(
268 269 270 271
		"	swpb 	%0, %0, [%2]	\n"
:		"+r"(_res), "+m"(*lock)
:		"r"(lock)
:		"memory");
Bruce Momjian's avatar
Bruce Momjian committed
272
	return (int) _res;
273 274
}

Bruce Momjian's avatar
Bruce Momjian committed
275
#endif	 /* __arm__ */
276

277

278
/* S/390 and S/390x Linux (32- and 64-bit zSeries) */
Bruce Momjian's avatar
Bruce Momjian committed
279
#if defined(__s390__) || defined(__s390x__)
280 281 282 283
#define HAS_TEST_AND_SET

typedef unsigned int slock_t;

284 285 286 287 288
#define TAS(lock)	   tas(lock)

static __inline__ int
tas(volatile slock_t *lock)
{
289
	int			_res = 0;
290 291

	__asm__	__volatile__(
292 293 294 295 296
		"	cs 	%0,%3,0(%2)		\n"
:		"+d"(_res), "+m"(*lock)
:		"a"(lock), "d"(1)
:		"memory", "cc");
	return _res;
297 298
}

299
#endif	 /* __s390__ || __s390x__ */
300

301

Bruce Momjian's avatar
Bruce Momjian committed
302
#if defined(__sparc__)		/* Sparc */
303
#define HAS_TEST_AND_SET
304 305 306

typedef unsigned char slock_t;

307 308 309 310 311
#define TAS(lock) tas(lock)

static __inline__ int
tas(volatile slock_t *lock)
{
312
	register slock_t _res;
313

314 315
	/*
	 *	See comment in /pg/backend/port/tas/solaris_sparc.s for why this
316 317
	 *	uses "ldstub", and that file uses "cas".  gcc currently generates
	 *	sparcv7-targeted binaries, so "cas" use isn't possible.
318
	 */
319
	__asm__ __volatile__(
320
		"	ldstub	[%2], %0	\n"
321 322 323
:		"=r"(_res), "+m"(*lock)
:		"r"(lock)
:		"memory");
324 325
	return (int) _res;
}
326

327
#endif	 /* __sparc__ */
328

329

Bruce Momjian's avatar
Bruce Momjian committed
330
/* PowerPC */
331
#if defined(__ppc__) || defined(__powerpc__) || defined(__ppc64__) || defined(__powerpc64__)
332 333
#define HAS_TEST_AND_SET

334
#if defined(__ppc64__) || defined(__powerpc64__)
335
typedef unsigned long slock_t;
336 337
#else
typedef unsigned int slock_t;
338 339
#endif

340 341 342 343 344
#define TAS(lock) tas(lock)
/*
 * NOTE: per the Enhanced PowerPC Architecture manual, v1.0 dated 7-May-2002,
 * an isync is a sufficient synchronization barrier after a lwarx/stwcx loop.
 */
345 346 347 348 349 350 351
static __inline__ int
tas(volatile slock_t *lock)
{
	slock_t _t;
	int _res;

	__asm__ __volatile__(
352
"	lwarx   %0,0,%3		\n"
353
"	cmpwi   %0,0		\n"
354
"	bne     1f			\n"
355
"	addi    %0,%0,1		\n"
356
"	stwcx.  %0,0,%3		\n"
357
"	beq     2f         	\n"
358
"1:	li      %1,1		\n"
359 360 361
"	b		3f			\n"
"2:						\n"
"	isync				\n"
362
"	li      %1,0		\n"
363
"3:						\n"
364

365 366 367
:	"=&r"(_t), "=r"(_res), "+m"(*lock)
:	"r"(lock)
:	"memory", "cc");
368 369
	return _res;
}
370

371
/* PowerPC S_UNLOCK is almost standard but requires a "sync" instruction */
372 373
#define S_UNLOCK(lock)	\
do \
Bruce Momjian's avatar
Bruce Momjian committed
374
{ \
375 376 377 378
	__asm__ __volatile__ ("	sync \n"); \
	*((volatile slock_t *) (lock)) = 0; \
} while (0)

379
#endif /* powerpc */
380

381

Bruce Momjian's avatar
Bruce Momjian committed
382
/* Linux Motorola 68k */
383
#if (defined(__mc68000__) || defined(__m68k__)) && defined(__linux__)
384
#define HAS_TEST_AND_SET
385 386 387

typedef unsigned char slock_t;

388 389 390 391 392 393
#define TAS(lock) tas(lock)

static __inline__ int
tas(volatile slock_t *lock)
{
	register int rv;
Bruce Momjian's avatar
Bruce Momjian committed
394

395
	__asm__	__volatile__(
396 397 398
		"	clrl	%0		\n"
		"	tas		%1		\n"
		"	sne		%0		\n"
399 400 401
:		"=d"(rv), "+m"(*lock)
:
:		"memory", "cc");
402 403 404
	return rv;
}

405
#endif	 /* (__mc68000__ || __m68k__) && __linux__ */
406

407 408 409 410 411

/*
 * VAXen -- even multiprocessor ones
 * (thanks to Tom Ivar Helbekkmo)
 */
Bruce Momjian's avatar
Bruce Momjian committed
412
#if defined(__vax__)
413
#define HAS_TEST_AND_SET
414 415 416

typedef unsigned char slock_t;

417 418 419 420 421
#define TAS(lock) tas(lock)

static __inline__ int
tas(volatile slock_t *lock)
{
422
	register int	_res;
423

424
	__asm__ __volatile__(
425 426 427 428 429
		"	movl 	$1, %0			\n"
		"	bbssi	$0, (%2), 1f	\n"
		"	clrl	%0				\n"
		"1: \n"
:		"=&r"(_res), "+m"(*lock)
430
:		"r"(lock)
431
:		"memory");
432
	return _res;
433
}
434

435
#endif	 /* __vax__ */
436 437


Bruce Momjian's avatar
Bruce Momjian committed
438
#if defined(__ns32k__)		/* National Semiconductor 32K */
439
#define HAS_TEST_AND_SET
440 441 442

typedef unsigned char slock_t;

443 444 445 446 447
#define TAS(lock) tas(lock)

static __inline__ int
tas(volatile slock_t *lock)
{
448
	register int	_res;
Bruce Momjian's avatar
Bruce Momjian committed
449

450
	__asm__ __volatile__(
451 452 453 454 455
		"	sbitb	0, %1	\n"
		"	sfsd	%0		\n"
:		"=r"(_res), "+m"(*lock)
:
:		"memory");
456
	return _res;
457 458
}

459
#endif	 /* __ns32k__ */
460

461

Bruce Momjian's avatar
Bruce Momjian committed
462
#if defined(__alpha) || defined(__alpha__)	/* Alpha */
463
/*
464 465 466 467
 * Correct multi-processor locking methods are explained in section 5.5.3
 * of the Alpha AXP Architecture Handbook, which at this writing can be
 * found at ftp://ftp.netbsd.org/pub/NetBSD/misc/dec-docs/index.html.
 * For gcc we implement the handbook's code directly with inline assembler.
468
 */
469
#define HAS_TEST_AND_SET
470

471 472
typedef unsigned long slock_t;

473
#define TAS(lock)  tas(lock)
474 475 476 477

static __inline__ int
tas(volatile slock_t *lock)
{
478 479
	register slock_t _res;

480
	__asm__	__volatile__(
481
		"	ldq		$0, %1	\n"
482
		"	bne		$0, 2f	\n"
483 484
		"	ldq_l	%0, %1	\n"
		"	bne		%0, 2f	\n"
485
		"	mov		1,  $0	\n"
486
		"	stq_c	$0, %1	\n"
487 488 489
		"	beq		$0, 2f	\n"
		"	mb				\n"
		"	br		3f		\n"
490
		"2:	mov		1, %0	\n"
491
		"3:					\n"
492
:		"=&r"(_res), "+m"(*lock)
Bruce Momjian's avatar
Bruce Momjian committed
493
:
494
:		"memory", "0");
495 496
	return (int) _res;
}
497

498 499 500 501 502 503 504
#define S_UNLOCK(lock)	\
do \
{\
	__asm__ __volatile__ ("	mb \n"); \
	*((volatile slock_t *) (lock)) = 0; \
} while (0)

505 506 507
#endif /* __alpha || __alpha__ */


Bruce Momjian's avatar
Bruce Momjian committed
508
#if defined(__mips__) && !defined(__sgi)	/* non-SGI MIPS */
509
/* Note: on SGI we use the OS' mutex ABI, see below */
510
/* Note: R10000 processors require a separate SYNC */
511
#define HAS_TEST_AND_SET
512

513
typedef unsigned int slock_t;
514

515
#define TAS(lock) tas(lock)
516

517 518 519
static __inline__ int
tas(volatile slock_t *lock)
{
520 521 522
	register volatile slock_t *_l = lock;
	register int _res;
	register int _tmp;
523

524 525 526 527 528
	__asm__ __volatile__(
		"       .set push           \n"
		"       .set mips2          \n"
		"       .set noreorder      \n"
		"       .set nomacro        \n"
529
		"       ll      %0, %2      \n"
530
		"       or      %1, %0, 1   \n"
531
		"       sc      %1, %2      \n"
532
		"       xori    %1, 1       \n"
533
		"       or      %0, %0, %1  \n"
534 535
		"       sync                \n"
		"       .set pop              "
536
:		"=&r" (_res), "=&r" (_tmp), "+R" (*_l)
537
:
538 539
:		"memory");
	return _res;
540
}
541

542 543 544
/* MIPS S_UNLOCK is almost standard but requires a "sync" instruction */
#define S_UNLOCK(lock)	\
do \
545 546 547 548 549 550 551 552
{ \
	__asm__ __volatile__( \
		"       .set push           \n" \
		"       .set mips2          \n" \
		"       .set noreorder      \n" \
		"       .set nomacro        \n" \
		"       sync                \n" \
		"       .set pop              "); \
553 554 555
	*((volatile slock_t *) (lock)) = 0; \
} while (0)

556 557 558
#endif /* __mips__ && !__sgi */


559 560 561 562 563 564 565 566 567 568 569 570
#if defined(__m32r__) && defined(HAVE_SYS_TAS_H)	/* Renesas' M32R */
#define HAS_TEST_AND_SET

#include <sys/tas.h>

typedef int slock_t;

#define TAS(lock) tas(lock)

#endif /* __m32r__ */


571 572 573
/* These live in s_lock.c, but only for gcc */


Bruce Momjian's avatar
Bruce Momjian committed
574
#if defined(__m68k__) && !defined(__linux__)	/* non-Linux Motorola 68k */
575 576
#define HAS_TEST_AND_SET

577
typedef unsigned char slock_t;
578 579
#endif

580

581 582 583 584
#endif	/* __GNUC__ */



Bruce Momjian's avatar
Bruce Momjian committed
585 586
/*
 * ---------------------------------------------------------------------
587
 * Platforms that use non-gcc inline assembly:
Bruce Momjian's avatar
Bruce Momjian committed
588
 * ---------------------------------------------------------------------
589 590
 */

591
#if !defined(HAS_TEST_AND_SET)	/* We didn't trigger above, let's try here */
592

593

Bruce Momjian's avatar
Bruce Momjian committed
594
#if defined(USE_UNIVEL_CC)		/* Unixware compiler */
595
#define HAS_TEST_AND_SET
596 597 598

typedef unsigned char slock_t;

599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615
#define TAS(lock)	tas(lock)

asm int
tas(volatile slock_t *s_lock)
{
/* UNIVEL wants %mem in column 1, so we don't pg_indent this file */
%mem s_lock
	pushl %ebx
	movl s_lock, %ebx
	movl $255, %eax
	lock
	xchgb %al, (%ebx)
	popl %ebx
}

#endif	 /* defined(USE_UNIVEL_CC) */

616

Bruce Momjian's avatar
Bruce Momjian committed
617
#if defined(__alpha) || defined(__alpha__)	/* Tru64 Unix Alpha compiler */
618 619 620 621 622 623 624 625
/*
 * The Tru64 compiler doesn't support gcc-style inline asm, but it does
 * have some builtin functions that accomplish much the same results.
 * For simplicity, slock_t is defined as long (ie, quadword) on Alpha
 * regardless of the compiler in use.  LOCK_LONG and UNLOCK_LONG only
 * operate on an int (ie, longword), but that's OK as long as we define
 * S_INIT_LOCK to zero out the whole quadword.
 */
626 627 628
#define HAS_TEST_AND_SET

typedef unsigned long slock_t;
629 630 631

#include <alpha/builtins.h>
#define S_INIT_LOCK(lock)  (*(lock) = 0)
Bruce Momjian's avatar
Bruce Momjian committed
632 633
#define TAS(lock)		   (__LOCK_LONG_RETRY((lock), 1) == 0)
#define S_UNLOCK(lock)	   __UNLOCK_LONG(lock)
634

635
#endif	 /* __alpha || __alpha__ */
636 637


Bruce Momjian's avatar
Bruce Momjian committed
638
#if defined(__hppa) || defined(__hppa__)	/* HP PA-RISC, GCC and HP compilers */
639
/*
640
 * HP's PA-RISC
641
 *
642 643 644 645 646 647
 * See src/backend/port/hpux/tas.c.template for details about LDCWX.  Because
 * LDCWX requires a 16-byte-aligned address, we declare slock_t as a 16-byte
 * struct.  The active word in the struct is whichever has the aligned address;
 * the other three words just sit at -1.
 *
 * When using gcc, we can inline the required assembly code.
648
 */
649 650 651 652 653 654
#define HAS_TEST_AND_SET

typedef struct
{
	int			sema[4];
} slock_t;
655

656 657 658 659 660 661 662 663 664 665 666 667
#define TAS_ACTIVE_WORD(lock)	((volatile int *) (((long) (lock) + 15) & ~15))

#if defined(__GNUC__)

static __inline__ int
tas(volatile slock_t *lock)
{
	volatile int *lockword = TAS_ACTIVE_WORD(lock);
	register int lockval;

	__asm__ __volatile__(
		"	ldcwx	0(0,%2),%0	\n"
668 669 670
:		"=r"(lockval), "+m"(*lockword)
:		"r"(lockword)
:		"memory");
671 672 673 674 675 676 677 678
	return (lockval == 0);
}

#endif /* __GNUC__ */

#define S_UNLOCK(lock)	(*TAS_ACTIVE_WORD(lock) = -1)

#define S_INIT_LOCK(lock) \
Bruce Momjian's avatar
Bruce Momjian committed
679
	do { \
680
		volatile slock_t *lock_ = (lock); \
681 682 683 684
		lock_->sema[0] = -1; \
		lock_->sema[1] = -1; \
		lock_->sema[2] = -1; \
		lock_->sema[3] = -1; \
Bruce Momjian's avatar
Bruce Momjian committed
685
	} while (0)
686

687
#define S_LOCK_FREE(lock)	(*TAS_ACTIVE_WORD(lock) != 0)
688

689
#endif	 /* __hppa || __hppa__ */
690

691

692 693 694 695 696 697 698 699 700 701 702 703
#if defined(__hpux) && defined(__ia64) && !defined(__GNUC__)

#define HAS_TEST_AND_SET

typedef unsigned int slock_t;

#include <ia64/sys/inline.h>
#define TAS(lock) _Asm_xchg(_SZ_W, lock, 1, _LDHINT_NONE)

#endif	/* HPUX on IA64, non gcc */


Bruce Momjian's avatar
Bruce Momjian committed
704
#if defined(__sgi)	/* SGI compiler */
705 706
/*
 * SGI IRIX 5
707
 * slock_t is defined as a unsigned long. We use the standard SGI
Bruce Momjian's avatar
Bruce Momjian committed
708
 * mutex API.
709 710 711
 *
 * The following comment is left for historical reasons, but is probably
 * not a good idea since the mutex ABI is supported.
712 713 714 715 716
 *
 * This stuff may be supplemented in the future with Masato Kataoka's MIPS-II
 * assembly from his NECEWS SVR4 port, but we probably ought to retain this
 * for the R3000 chips out there.
 */
717
#define HAS_TEST_AND_SET
718 719 720

typedef unsigned long slock_t;

721
#include "mutex.h"
722 723 724 725
#define TAS(lock)	(test_and_set(lock,1))
#define S_UNLOCK(lock)	(test_then_and(lock,0))
#define S_INIT_LOCK(lock)	(test_then_and(lock,0))
#define S_LOCK_FREE(lock)	(test_then_add(lock,0) == 0)
726
#endif	 /* __sgi */
727

728

Bruce Momjian's avatar
Bruce Momjian committed
729
#if defined(sinix)		/* Sinix */
730
/*
Bruce Momjian's avatar
Bruce Momjian committed
731
 * SINIX / Reliant UNIX
732 733 734
 * slock_t is defined as a struct abilock_t, which has a single unsigned long
 * member. (Basically same as SGI)
 */
735
#define HAS_TEST_AND_SET
736 737 738 739

#include "abi_mutex.h"
typedef abilock_t slock_t;

740 741 742 743 744
#define TAS(lock)	(!acquire_lock(lock))
#define S_UNLOCK(lock)	release_lock(lock)
#define S_INIT_LOCK(lock)	init_lock(lock)
#define S_LOCK_FREE(lock)	(stat_lock(lock) == UNLOCKED)
#endif	 /* sinix */
Bruce Momjian's avatar
Bruce Momjian committed
745

746

Bruce Momjian's avatar
Bruce Momjian committed
747
#if defined(_AIX)	/* AIX */
748 749 750
/*
 * AIX (POWER)
 */
751
#define HAS_TEST_AND_SET
752

753 754
#include <sys/atomic_op.h>

755
typedef int slock_t;
756

757 758
#define TAS(lock)			_check_lock((slock_t *) (lock), 0, 1)
#define S_UNLOCK(lock)		_clear_lock((slock_t *) (lock), 0)
759
#endif	 /* _AIX */
760 761


Bruce Momjian's avatar
Bruce Momjian committed
762
#if defined (nextstep)		/* Nextstep */
763
#define HAS_TEST_AND_SET
764

765 766
typedef struct mutex slock_t;

767 768 769 770 771
#define S_LOCK(lock)	mutex_lock(lock)
#define S_UNLOCK(lock)	mutex_unlock(lock)
#define S_INIT_LOCK(lock)	mutex_init(lock)
/* For Mach, we have to delve inside the entrails of `struct mutex'.  Ick! */
#define S_LOCK_FREE(alock)	((alock)->lock == 0)
772
#endif	 /* nextstep */
773

774

775
/* These are in s_lock.c */
776

777

Bruce Momjian's avatar
Bruce Momjian committed
778
#if defined(sun3)		/* Sun3 */
779
#define HAS_TEST_AND_SET
780 781

typedef unsigned char slock_t;
782 783
#endif

784

785
#if defined(__sun) && (defined(__i386) || defined(__x86_64__) || defined(__sparc__) || defined(__sparc))
786
#define HAS_TEST_AND_SET
787

788
#if defined(__i386) || defined(__x86_64__) || defined(__sparcv9) || defined(__sparcv8plus)
789
typedef unsigned int slock_t;
790 791 792
#else
typedef unsigned char slock_t;
#endif
793

794
extern slock_t pg_atomic_cas(volatile slock_t *lock, slock_t with,
795
									  slock_t cmp);
796

797
#define TAS(a) (pg_atomic_cas((a), 1, 0) != 0)
798 799 800
#endif


801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818
#ifdef WIN32_ONLY_COMPILER
typedef LONG slock_t;

#define HAS_TEST_AND_SET
#define TAS(lock) (InterlockedCompareExchange(lock, 1, 0))

#define SPIN_DELAY() spin_delay()

static __forceinline void
spin_delay(void)
{
	/* See comment for gcc code. Same code, MASM syntax */
	__asm rep nop;
}

#endif

  
819
#endif	/* !defined(HAS_TEST_AND_SET) */
820

821

822
/* Blow up if we didn't have any way to do spinlocks */
823
#ifndef HAS_TEST_AND_SET
824
#error PostgreSQL does not have native spinlock support on this platform.  To continue the compilation, rerun configure using --disable-spinlocks.  However, performance will be poor.  Please report this to pgsql-bugs@postgresql.org.
825 826 827 828
#endif


#else	/* !HAVE_SPINLOCKS */
829

830 831

/*
832
 * Fake spinlock implementation using semaphores --- slow and prone
833
 * to fall foul of kernel limits on number of semaphores, so don't use this
834
 * unless you must!  The subroutines appear in spin.c.
835
 */
836
typedef PGSemaphoreData slock_t;
837 838 839 840

extern bool s_lock_free_sema(volatile slock_t *lock);
extern void s_unlock_sema(volatile slock_t *lock);
extern void s_init_lock_sema(volatile slock_t *lock);
Bruce Momjian's avatar
Bruce Momjian committed
841
extern int	tas_sema(volatile slock_t *lock);
842

Bruce Momjian's avatar
Bruce Momjian committed
843 844 845 846
#define S_LOCK_FREE(lock)	s_lock_free_sema(lock)
#define S_UNLOCK(lock)	 s_unlock_sema(lock)
#define S_INIT_LOCK(lock)	s_init_lock_sema(lock)
#define TAS(lock)	tas_sema(lock)
847 848


849
#endif	/* HAVE_SPINLOCKS */
850

851

852
/*
853 854
 * Default Definitions - override these above as needed.
 */
855

856
#if !defined(S_LOCK)
857
#define S_LOCK(lock) \
858
	do { \
859 860
		if (TAS(lock)) \
			s_lock((lock), __FILE__, __LINE__); \
861
	} while (0)
862
#endif	 /* S_LOCK */
863

864
#if !defined(S_LOCK_FREE)
865
#define S_LOCK_FREE(lock)	(*(lock) == 0)
866
#endif	 /* S_LOCK_FREE */
867

868
#if !defined(S_UNLOCK)
869
#define S_UNLOCK(lock)		(*((volatile slock_t *) (lock)) = 0)
870
#endif	 /* S_UNLOCK */
871

872
#if !defined(S_INIT_LOCK)
873
#define S_INIT_LOCK(lock)	S_UNLOCK(lock)
874
#endif	 /* S_INIT_LOCK */
875

876 877 878 879
#if !defined(SPIN_DELAY)
#define SPIN_DELAY()	((void) 0)
#endif	 /* SPIN_DELAY */

880
#if !defined(TAS)
881
extern int	tas(volatile slock_t *lock);		/* in port/.../tas.s, or
882
												 * s_lock.c */
Bruce Momjian's avatar
Bruce Momjian committed
883

884
#define TAS(lock)		tas(lock)
885
#endif	 /* TAS */
886

887

888
/*
889 890
 * Platform-independent out-of-line support routines
 */
891
extern void s_lock(volatile slock_t *lock, const char *file, int line);
892

893 894 895 896 897 898
/* Support for dynamic adjustment of spins_per_delay */
#define DEFAULT_SPINS_PER_DELAY  100

extern void set_spins_per_delay(int shared_spins_per_delay);
extern int	update_spins_per_delay(int shared_spins_per_delay);

899
#endif	 /* S_LOCK_H */