Commit befa3e64 authored by Bruce Momjian's avatar Bruce Momjian

Revert 9.5 pgindent changes to atomics directory files

This is because there are many __asm__ blocks there that pgindent messes
up.  Also configure pgindent to skip that directory in the future.
parent 2aa0476d
...@@ -18,9 +18,9 @@ ...@@ -18,9 +18,9 @@
* fence. * fence.
*/ */
#if defined(__INTEL_COMPILER) #if defined(__INTEL_COMPILER)
#define pg_memory_barrier_impl() __mf() # define pg_memory_barrier_impl() __mf()
#elif defined(__GNUC__) #elif defined(__GNUC__)
#define pg_memory_barrier_impl() __asm__ __volatile__ ("mf" : : : "memory") # define pg_memory_barrier_impl() __asm__ __volatile__ ("mf" : : : "memory")
#elif defined(__hpux) #elif defined(__hpux)
#define pg_memory_barrier_impl() _Asm_mf() # define pg_memory_barrier_impl() _Asm_mf()
#endif #endif
...@@ -78,10 +78,9 @@ typedef struct pg_atomic_uint64 ...@@ -78,10 +78,9 @@ typedef struct pg_atomic_uint64
} pg_atomic_uint64; } pg_atomic_uint64;
#endif #endif
#endif /* defined(HAVE_ATOMICS) */ #endif /* defined(HAVE_ATOMICS) */
#endif /* defined(__GNUC__) && #endif /* defined(__GNUC__) && !defined(__INTEL_COMPILER) */
* !defined(__INTEL_COMPILER) */
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
...@@ -94,20 +93,20 @@ typedef struct pg_atomic_uint64 ...@@ -94,20 +93,20 @@ typedef struct pg_atomic_uint64
* PAUSE in the inner loop of a spin lock is necessary for good * PAUSE in the inner loop of a spin lock is necessary for good
* performance: * performance:
* *
* The PAUSE instruction improves the performance of IA-32 * The PAUSE instruction improves the performance of IA-32
* processors supporting Hyper-Threading Technology when * processors supporting Hyper-Threading Technology when
* executing spin-wait loops and other routines where one * executing spin-wait loops and other routines where one
* thread is accessing a shared lock or semaphore in a tight * thread is accessing a shared lock or semaphore in a tight
* polling loop. When executing a spin-wait loop, the * polling loop. When executing a spin-wait loop, the
* processor can suffer a severe performance penalty when * processor can suffer a severe performance penalty when
* exiting the loop because it detects a possible memory order * exiting the loop because it detects a possible memory order
* violation and flushes the core processor's pipeline. The * violation and flushes the core processor's pipeline. The
* PAUSE instruction provides a hint to the processor that the * PAUSE instruction provides a hint to the processor that the
* code sequence is a spin-wait loop. The processor uses this * code sequence is a spin-wait loop. The processor uses this
* hint to avoid the memory order violation and prevent the * hint to avoid the memory order violation and prevent the
* pipeline flush. In addition, the PAUSE instruction * pipeline flush. In addition, the PAUSE instruction
* de-pipelines the spin-wait loop to prevent it from * de-pipelines the spin-wait loop to prevent it from
* consuming execution resources excessively. * consuming execution resources excessively.
*/ */
#if defined(__INTEL_COMPILER) #if defined(__INTEL_COMPILER)
#define PG_HAVE_SPIN_DELAY #define PG_HAVE_SPIN_DELAY
...@@ -121,8 +120,8 @@ pg_spin_delay_impl(void) ...@@ -121,8 +120,8 @@ pg_spin_delay_impl(void)
static __inline__ void static __inline__ void
pg_spin_delay_impl(void) pg_spin_delay_impl(void)
{ {
__asm__ __volatile__( __asm__ __volatile__(
" rep; nop \n"); " rep; nop \n");
} }
#elif defined(WIN32_ONLY_COMPILER) && defined(__x86_64__) #elif defined(WIN32_ONLY_COMPILER) && defined(__x86_64__)
#define PG_HAVE_SPIN_DELAY #define PG_HAVE_SPIN_DELAY
...@@ -137,10 +136,10 @@ static __forceinline void ...@@ -137,10 +136,10 @@ static __forceinline void
pg_spin_delay_impl(void) pg_spin_delay_impl(void)
{ {
/* See comment for gcc code. Same code, MASM syntax */ /* See comment for gcc code. Same code, MASM syntax */
__asm rep nop; __asm rep nop;
} }
#endif #endif
#endif /* !defined(PG_HAVE_SPIN_DELAY) */ #endif /* !defined(PG_HAVE_SPIN_DELAY) */
#if defined(HAVE_ATOMICS) #if defined(HAVE_ATOMICS)
...@@ -154,13 +153,12 @@ pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr) ...@@ -154,13 +153,12 @@ pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
{ {
register char _res = 1; register char _res = 1;
__asm__ __volatile__( __asm__ __volatile__(
" lock \n" " lock \n"
" xchgb %0,%1 \n" " xchgb %0,%1 \n"
: "+q"(_res), "+m"(ptr->value) : "+q"(_res), "+m"(ptr->value)
: :
: "memory"); : "memory");
return _res == 0; return _res == 0;
} }
...@@ -172,8 +170,7 @@ pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr) ...@@ -172,8 +170,7 @@ pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
* On a TSO architecture like x86 it's sufficient to use a compiler * On a TSO architecture like x86 it's sufficient to use a compiler
* barrier to achieve release semantics. * barrier to achieve release semantics.
*/ */
__asm__ __volatile__("":::"memory"); __asm__ __volatile__("" ::: "memory");
ptr->value = 0; ptr->value = 0;
} }
...@@ -182,20 +179,19 @@ static inline bool ...@@ -182,20 +179,19 @@ static inline bool
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval) uint32 *expected, uint32 newval)
{ {
char ret; char ret;
/* /*
* Perform cmpxchg and use the zero flag which it implicitly sets when * Perform cmpxchg and use the zero flag which it implicitly sets when
* equal to measure the success. * equal to measure the success.
*/ */
__asm__ __volatile__( __asm__ __volatile__(
" lock \n" " lock \n"
" cmpxchgl %4,%5 \n" " cmpxchgl %4,%5 \n"
" setz %2 \n" " setz %2 \n"
: "=a"(*expected), "=m"(ptr->value), "=q"(ret) : "=a" (*expected), "=m"(ptr->value), "=q" (ret)
: "a"(*expected), "r"(newval), "m"(ptr->value) : "a" (*expected), "r" (newval), "m"(ptr->value)
: "memory", "cc"); : "memory", "cc");
return (bool) ret; return (bool) ret;
} }
...@@ -203,14 +199,13 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, ...@@ -203,14 +199,13 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
static inline uint32 static inline uint32
pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_) pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
{ {
uint32 res; uint32 res;
__asm__ __volatile__( __asm__ __volatile__(
" lock \n" " lock \n"
" xaddl %0,%1 \n" " xaddl %0,%1 \n"
: "=q"(res), "=m"(ptr->value) : "=q"(res), "=m"(ptr->value)
: "0"(add_), "m"(ptr->value) : "0" (add_), "m"(ptr->value)
: "memory", "cc"); : "memory", "cc");
return res; return res;
} }
...@@ -221,20 +216,19 @@ static inline bool ...@@ -221,20 +216,19 @@ static inline bool
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval) uint64 *expected, uint64 newval)
{ {
char ret; char ret;
/* /*
* Perform cmpxchg and use the zero flag which it implicitly sets when * Perform cmpxchg and use the zero flag which it implicitly sets when
* equal to measure the success. * equal to measure the success.
*/ */
__asm__ __volatile__( __asm__ __volatile__(
" lock \n" " lock \n"
" cmpxchgq %4,%5 \n" " cmpxchgq %4,%5 \n"
" setz %2 \n" " setz %2 \n"
: "=a"(*expected), "=m"(ptr->value), "=q"(ret) : "=a" (*expected), "=m"(ptr->value), "=q" (ret)
: "a"(*expected), "r"(newval), "m"(ptr->value) : "a" (*expected), "r" (newval), "m"(ptr->value)
: "memory", "cc"); : "memory", "cc");
return (bool) ret; return (bool) ret;
} }
...@@ -242,23 +236,20 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, ...@@ -242,23 +236,20 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
static inline uint64 static inline uint64
pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
{ {
uint64 res; uint64 res;
__asm__ __volatile__( __asm__ __volatile__(
" lock \n" " lock \n"
" xaddq %0,%1 \n" " xaddq %0,%1 \n"
: "=q"(res), "=m"(ptr->value) : "=q"(res), "=m"(ptr->value)
: "0"(add_), "m"(ptr->value) : "0" (add_), "m"(ptr->value)
: "memory", "cc"); : "memory", "cc");
return res; return res;
} }
#endif /* __x86_64__ */ #endif /* __x86_64__ */
#endif /* defined(__GNUC__) && #endif /* defined(__GNUC__) && !defined(__INTEL_COMPILER) */
* !defined(__INTEL_COMPILER) */
#endif /* HAVE_ATOMICS */ #endif /* HAVE_ATOMICS */
#endif /* defined(PG_USE_INLINE) || #endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
* defined(ATOMICS_INCLUDE_DEFINITIONS) */
/*------------------------------------------------------------------------- /*-------------------------------------------------------------------------
* *
* fallback.h * fallback.h
* Fallback for platforms without spinlock and/or atomics support. Slower * Fallback for platforms without spinlock and/or atomics support. Slower
* than native atomics support, but not unusably slow. * than native atomics support, but not unusably slow.
* *
* Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California * Portions Copyright (c) 1994, Regents of the University of California
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
/* intentionally no include guards, should only be included by atomics.h */ /* intentionally no include guards, should only be included by atomics.h */
#ifndef INSIDE_ATOMICS_H #ifndef INSIDE_ATOMICS_H
#error "should be included via atomics.h" # error "should be included via atomics.h"
#endif #endif
#ifndef pg_memory_barrier_impl #ifndef pg_memory_barrier_impl
...@@ -75,15 +75,14 @@ typedef struct pg_atomic_flag ...@@ -75,15 +75,14 @@ typedef struct pg_atomic_flag
* be content with just one byte instead of 4, but that's not too much * be content with just one byte instead of 4, but that's not too much
* waste. * waste.
*/ */
#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP #if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
* compilers */
int sema[4]; int sema[4];
#else #else
int sema; int sema;
#endif #endif
} pg_atomic_flag; } pg_atomic_flag;
#endif /* PG_HAVE_ATOMIC_FLAG_SUPPORT */ #endif /* PG_HAVE_ATOMIC_FLAG_SUPPORT */
#if !defined(PG_HAVE_ATOMIC_U32_SUPPORT) #if !defined(PG_HAVE_ATOMIC_U32_SUPPORT)
...@@ -93,8 +92,7 @@ typedef struct pg_atomic_flag ...@@ -93,8 +92,7 @@ typedef struct pg_atomic_flag
typedef struct pg_atomic_uint32 typedef struct pg_atomic_uint32
{ {
/* Check pg_atomic_flag's definition above for an explanation */ /* Check pg_atomic_flag's definition above for an explanation */
#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP #if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
* compilers */
int sema[4]; int sema[4];
#else #else
int sema; int sema;
...@@ -102,7 +100,7 @@ typedef struct pg_atomic_uint32 ...@@ -102,7 +100,7 @@ typedef struct pg_atomic_uint32
volatile uint32 value; volatile uint32 value;
} pg_atomic_uint32; } pg_atomic_uint32;
#endif /* PG_HAVE_ATOMIC_U32_SUPPORT */ #endif /* PG_HAVE_ATOMIC_U32_SUPPORT */
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
...@@ -130,7 +128,7 @@ pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr) ...@@ -130,7 +128,7 @@ pg_atomic_unlocked_test_flag_impl(volatile pg_atomic_flag *ptr)
return true; return true;
} }
#endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */ #endif /* PG_HAVE_ATOMIC_FLAG_SIMULATION */
#ifdef PG_HAVE_ATOMIC_U32_SIMULATION #ifdef PG_HAVE_ATOMIC_U32_SIMULATION
...@@ -139,13 +137,12 @@ extern void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_) ...@@ -139,13 +137,12 @@ extern void pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
#define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32 #define PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32
extern bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, extern bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval); uint32 *expected, uint32 newval);
#define PG_HAVE_ATOMIC_FETCH_ADD_U32 #define PG_HAVE_ATOMIC_FETCH_ADD_U32
extern uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_); extern uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_);
#endif /* PG_HAVE_ATOMIC_U32_SIMULATION */ #endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
#endif /* defined(PG_USE_INLINE) || #endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
* defined(ATOMICS_INCLUDE_DEFINITIONS) */
...@@ -10,9 +10,9 @@ ...@@ -10,9 +10,9 @@
* *
* Documentation: * Documentation:
* * inline assembly for Itanium-based HP-UX: * * inline assembly for Itanium-based HP-UX:
* http://h21007.www2.hp.com/portal/download/files/unprot/Itanium/inline_assem_ERS.pdf * http://h21007.www2.hp.com/portal/download/files/unprot/Itanium/inline_assem_ERS.pdf
* * Implementing Spinlocks on the Intel (R) Itanium (R) Architecture and PA-RISC * * Implementing Spinlocks on the Intel (R) Itanium (R) Architecture and PA-RISC
* http://h21007.www2.hp.com/portal/download/files/unprot/itanium/spinlocks.pdf * http://h21007.www2.hp.com/portal/download/files/unprot/itanium/spinlocks.pdf
* *
* Itanium only supports a small set of numbers (6, -8, -4, -1, 1, 4, 8, 16) * Itanium only supports a small set of numbers (6, -8, -4, -1, 1, 4, 8, 16)
* for atomic add/sub, so we just implement everything but compare_exchange * for atomic add/sub, so we just implement everything but compare_exchange
...@@ -49,7 +49,7 @@ typedef struct pg_atomic_uint64 ...@@ -49,7 +49,7 @@ typedef struct pg_atomic_uint64
volatile uint64 value; volatile uint64 value;
} pg_atomic_uint64; } pg_atomic_uint64;
#endif /* defined(HAVE_ATOMICS) */ #endif /* defined(HAVE_ATOMICS) */
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
...@@ -64,25 +64,23 @@ STATIC_IF_INLINE bool ...@@ -64,25 +64,23 @@ STATIC_IF_INLINE bool
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval) uint32 *expected, uint32 newval)
{ {
bool ret; bool ret;
uint32 current; uint32 current;
_Asm_mov_to_ar(_AREG_CCV, *expected, MINOR_FENCE); _Asm_mov_to_ar(_AREG_CCV, *expected, MINOR_FENCE);
/* /*
* We want a barrier, not just release/acquire semantics. * We want a barrier, not just release/acquire semantics.
*/ */
_Asm_mf(); _Asm_mf();
/* /*
* Notes: DOWN_MEM_FENCE | _UP_MEM_FENCE prevents reordering by the * Notes:
* compiler * DOWN_MEM_FENCE | _UP_MEM_FENCE prevents reordering by the compiler
*/ */
current = _Asm_cmpxchg(_SZ_W, /* word */ current = _Asm_cmpxchg(_SZ_W, /* word */
_SEM_REL, _SEM_REL,
&ptr->value, &ptr->value,
newval, _LDHINT_NONE, newval, _LDHINT_NONE,
_DOWN_MEM_FENCE | _UP_MEM_FENCE); _DOWN_MEM_FENCE | _UP_MEM_FENCE);
ret = current == *expected; ret = current == *expected;
*expected = current; *expected = current;
return ret; return ret;
...@@ -94,16 +92,16 @@ STATIC_IF_INLINE bool ...@@ -94,16 +92,16 @@ STATIC_IF_INLINE bool
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval) uint64 *expected, uint64 newval)
{ {
bool ret; bool ret;
uint64 current; uint64 current;
_Asm_mov_to_ar(_AREG_CCV, *expected, MINOR_FENCE); _Asm_mov_to_ar(_AREG_CCV, *expected, MINOR_FENCE);
_Asm_mf(); _Asm_mf();
current = _Asm_cmpxchg(_SZ_D, /* doubleword */ current = _Asm_cmpxchg(_SZ_D, /* doubleword */
_SEM_REL, _SEM_REL,
&ptr->value, &ptr->value,
newval, _LDHINT_NONE, newval, _LDHINT_NONE,
_DOWN_MEM_FENCE | _UP_MEM_FENCE); _DOWN_MEM_FENCE | _UP_MEM_FENCE);
ret = current == *expected; ret = current == *expected;
*expected = current; *expected = current;
return ret; return ret;
...@@ -111,7 +109,6 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, ...@@ -111,7 +109,6 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
#undef MINOR_FENCE #undef MINOR_FENCE
#endif /* defined(HAVE_ATOMICS) */ #endif /* defined(HAVE_ATOMICS) */
#endif /* defined(PG_USE_INLINE) || #endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
* defined(ATOMICS_INCLUDE_DEFINITIONS) */
...@@ -10,9 +10,9 @@ ...@@ -10,9 +10,9 @@
* *
* Documentation: * Documentation:
* * Legacy __sync Built-in Functions for Atomic Memory Access * * Legacy __sync Built-in Functions for Atomic Memory Access
* http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html * http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fsync-Builtins.html
* * Built-in functions for memory model aware atomic operations * * Built-in functions for memory model aware atomic operations
* http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html * http://gcc.gnu.org/onlinedocs/gcc-4.8.2/gcc/_005f_005fatomic-Builtins.html
* *
* src/include/port/atomics/generic-gcc.h * src/include/port/atomics/generic-gcc.h
* *
...@@ -40,21 +40,21 @@ ...@@ -40,21 +40,21 @@
* definitions where possible, and use this only as a fallback. * definitions where possible, and use this only as a fallback.
*/ */
#if !defined(pg_memory_barrier_impl) #if !defined(pg_memory_barrier_impl)
#if defined(HAVE_GCC__ATOMIC_INT32_CAS) # if defined(HAVE_GCC__ATOMIC_INT32_CAS)
#define pg_memory_barrier_impl() __atomic_thread_fence(__ATOMIC_SEQ_CST) # define pg_memory_barrier_impl() __atomic_thread_fence(__ATOMIC_SEQ_CST)
#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) # elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
#define pg_memory_barrier_impl() __sync_synchronize() # define pg_memory_barrier_impl() __sync_synchronize()
#endif # endif
#endif /* !defined(pg_memory_barrier_impl) */ #endif /* !defined(pg_memory_barrier_impl) */
#if !defined(pg_read_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS) #if !defined(pg_read_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
/* acquire semantics include read barrier semantics */ /* acquire semantics include read barrier semantics */
#define pg_read_barrier_impl() __atomic_thread_fence(__ATOMIC_ACQUIRE) # define pg_read_barrier_impl() __atomic_thread_fence(__ATOMIC_ACQUIRE)
#endif #endif
#if !defined(pg_write_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS) #if !defined(pg_write_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
/* release semantics include write barrier semantics */ /* release semantics include write barrier semantics */
#define pg_write_barrier_impl() __atomic_thread_fence(__ATOMIC_RELEASE) # define pg_write_barrier_impl() __atomic_thread_fence(__ATOMIC_RELEASE)
#endif #endif
#ifdef HAVE_ATOMICS #ifdef HAVE_ATOMICS
...@@ -75,7 +75,7 @@ typedef struct pg_atomic_flag ...@@ -75,7 +75,7 @@ typedef struct pg_atomic_flag
#endif #endif
} pg_atomic_flag; } pg_atomic_flag;
#endif /* !ATOMIC_FLAG_SUPPORT && SYNC_INT32_TAS */ #endif /* !ATOMIC_FLAG_SUPPORT && SYNC_INT32_TAS */
/* generic gcc based atomic uint32 implementation */ /* generic gcc based atomic uint32 implementation */
#if !defined(PG_HAVE_ATOMIC_U32_SUPPORT) \ #if !defined(PG_HAVE_ATOMIC_U32_SUPPORT) \
...@@ -87,8 +87,7 @@ typedef struct pg_atomic_uint32 ...@@ -87,8 +87,7 @@ typedef struct pg_atomic_uint32
volatile uint32 value; volatile uint32 value;
} pg_atomic_uint32; } pg_atomic_uint32;
#endif /* defined(HAVE_GCC__ATOMIC_INT32_CAS) || #endif /* defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS) */
* defined(HAVE_GCC__SYNC_INT32_CAS) */
/* generic gcc based atomic uint64 implementation */ /* generic gcc based atomic uint64 implementation */
#if !defined(PG_HAVE_ATOMIC_U64_SUPPORT) \ #if !defined(PG_HAVE_ATOMIC_U64_SUPPORT) \
...@@ -102,8 +101,7 @@ typedef struct pg_atomic_uint64 ...@@ -102,8 +101,7 @@ typedef struct pg_atomic_uint64
volatile uint64 value pg_attribute_aligned(8); volatile uint64 value pg_attribute_aligned(8);
} pg_atomic_uint64; } pg_atomic_uint64;
#endif /* defined(HAVE_GCC__ATOMIC_INT64_CAS) || #endif /* defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS) */
* defined(HAVE_GCC__SYNC_INT64_CAS) */
/* /*
* Implementation follows. Inlined or directly included from atomics.c * Implementation follows. Inlined or directly included from atomics.c
...@@ -125,7 +123,7 @@ pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr) ...@@ -125,7 +123,7 @@ pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
} }
#endif #endif
#endif /* defined(HAVE_GCC__SYNC_*_TAS) */ #endif /* defined(HAVE_GCC__SYNC_*_TAS) */
#ifndef PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG #ifndef PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
#define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG #define PG_HAVE_ATOMIC_UNLOCKED_TEST_FLAG
...@@ -154,7 +152,7 @@ pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr) ...@@ -154,7 +152,7 @@ pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
} }
#endif #endif
#endif /* defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) */ #endif /* defined(PG_HAVE_ATOMIC_FLAG_SUPPORT) */
/* prefer __atomic, it has a better API */ /* prefer __atomic, it has a better API */
#if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__ATOMIC_INT32_CAS) #if !defined(PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U32) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
...@@ -175,9 +173,8 @@ static inline bool ...@@ -175,9 +173,8 @@ static inline bool
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval) uint32 *expected, uint32 newval)
{ {
bool ret; bool ret;
uint32 current; uint32 current;
current = __sync_val_compare_and_swap(&ptr->value, *expected, newval); current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
ret = current == *expected; ret = current == *expected;
*expected = current; *expected = current;
...@@ -214,9 +211,8 @@ static inline bool ...@@ -214,9 +211,8 @@ static inline bool
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval) uint64 *expected, uint64 newval)
{ {
bool ret; bool ret;
uint64 current; uint64 current;
current = __sync_val_compare_and_swap(&ptr->value, *expected, newval); current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
ret = current == *expected; ret = current == *expected;
*expected = current; *expected = current;
...@@ -233,9 +229,8 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) ...@@ -233,9 +229,8 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
} }
#endif #endif
#endif /* !defined(PG_DISABLE_64_BIT_ATOMICS) */ #endif /* !defined(PG_DISABLE_64_BIT_ATOMICS) */
#endif /* defined(PG_USE_INLINE) || #endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
* defined(ATOMICS_INCLUDE_DEFINITIONS) */
#endif /* defined(HAVE_ATOMICS) */ #endif /* defined(HAVE_ATOMICS) */
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
* *
* Documentation: * Documentation:
* * Interlocked Variable Access * * Interlocked Variable Access
* http://msdn.microsoft.com/en-us/library/ms684122%28VS.85%29.aspx * http://msdn.microsoft.com/en-us/library/ms684122%28VS.85%29.aspx
* *
* src/include/port/atomics/generic-msvc.h * src/include/port/atomics/generic-msvc.h
* *
...@@ -41,14 +41,12 @@ typedef struct pg_atomic_uint32 ...@@ -41,14 +41,12 @@ typedef struct pg_atomic_uint32
} pg_atomic_uint32; } pg_atomic_uint32;
#define PG_HAVE_ATOMIC_U64_SUPPORT #define PG_HAVE_ATOMIC_U64_SUPPORT
typedef struct __declspec ( typedef struct __declspec(align(8)) pg_atomic_uint64
align(8))
pg_atomic_uint64
{ {
volatile uint64 value; volatile uint64 value;
} pg_atomic_uint64; } pg_atomic_uint64;
#endif /* defined(HAVE_ATOMICS) */ #endif /* defined(HAVE_ATOMICS) */
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
...@@ -60,9 +58,8 @@ static inline bool ...@@ -60,9 +58,8 @@ static inline bool
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval) uint32 *expected, uint32 newval)
{ {
bool ret; bool ret;
uint32 current; uint32 current;
current = InterlockedCompareExchange(&ptr->value, newval, *expected); current = InterlockedCompareExchange(&ptr->value, newval, *expected);
ret = current == *expected; ret = current == *expected;
*expected = current; *expected = current;
...@@ -89,9 +86,8 @@ static inline bool ...@@ -89,9 +86,8 @@ static inline bool
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval) uint64 *expected, uint64 newval)
{ {
bool ret; bool ret;
uint64 current; uint64 current;
current = _InterlockedCompareExchange64(&ptr->value, newval, *expected); current = _InterlockedCompareExchange64(&ptr->value, newval, *expected);
ret = current == *expected; ret = current == *expected;
*expected = current; *expected = current;
...@@ -108,9 +104,8 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) ...@@ -108,9 +104,8 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
{ {
return _InterlockedExchangeAdd64(&ptr->value, add_); return _InterlockedExchangeAdd64(&ptr->value, add_);
} }
#endif /* _WIN64 */ #endif /* _WIN64 */
#endif /* HAVE_ATOMICS */ #endif /* HAVE_ATOMICS */
#endif /* defined(PG_USE_INLINE) || #endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
* defined(ATOMICS_INCLUDE_DEFINITIONS) */
...@@ -9,8 +9,8 @@ ...@@ -9,8 +9,8 @@
* *
* Documentation: * Documentation:
* * manpage for atomic_cas(3C) * * manpage for atomic_cas(3C)
* http://www.unix.com/man-page/opensolaris/3c/atomic_cas/ * http://www.unix.com/man-page/opensolaris/3c/atomic_cas/
* http://docs.oracle.com/cd/E23824_01/html/821-1465/atomic-cas-3c.html * http://docs.oracle.com/cd/E23824_01/html/821-1465/atomic-cas-3c.html
* *
* src/include/port/atomics/generic-sunpro.h * src/include/port/atomics/generic-sunpro.h
* *
...@@ -30,16 +30,16 @@ ...@@ -30,16 +30,16 @@
* membar #StoreStore | #LoadStore | #StoreLoad | #LoadLoad on x86/sparc * membar #StoreStore | #LoadStore | #StoreLoad | #LoadLoad on x86/sparc
* respectively. * respectively.
*/ */
#define pg_memory_barrier_impl() __machine_rw_barrier() # define pg_memory_barrier_impl() __machine_rw_barrier()
#endif #endif
#ifndef pg_read_barrier_impl #ifndef pg_read_barrier_impl
#define pg_read_barrier_impl() __machine_r_barrier() # define pg_read_barrier_impl() __machine_r_barrier()
#endif #endif
#ifndef pg_write_barrier_impl #ifndef pg_write_barrier_impl
#define pg_write_barrier_impl() __machine_w_barrier() # define pg_write_barrier_impl() __machine_w_barrier()
#endif #endif
#endif /* HAVE_MBARRIER_H */ #endif /* HAVE_MBARRIER_H */
/* Older versions of the compiler don't have atomic.h... */ /* Older versions of the compiler don't have atomic.h... */
#ifdef HAVE_ATOMIC_H #ifdef HAVE_ATOMIC_H
...@@ -64,9 +64,9 @@ typedef struct pg_atomic_uint64 ...@@ -64,9 +64,9 @@ typedef struct pg_atomic_uint64
volatile uint64 value pg_attribute_aligned(8); volatile uint64 value pg_attribute_aligned(8);
} pg_atomic_uint64; } pg_atomic_uint64;
#endif /* HAVE_ATOMIC_H */ #endif /* HAVE_ATOMIC_H */
#endif /* defined(HAVE_ATOMICS) */ #endif /* defined(HAVE_ATOMICS) */
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
...@@ -80,8 +80,8 @@ static inline bool ...@@ -80,8 +80,8 @@ static inline bool
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval) uint32 *expected, uint32 newval)
{ {
bool ret; bool ret;
uint32 current; uint32 current;
current = atomic_cas_32(&ptr->value, *expected, newval); current = atomic_cas_32(&ptr->value, *expected, newval);
ret = current == *expected; ret = current == *expected;
...@@ -94,8 +94,8 @@ static inline bool ...@@ -94,8 +94,8 @@ static inline bool
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval) uint64 *expected, uint64 newval)
{ {
bool ret; bool ret;
uint64 current; uint64 current;
current = atomic_cas_64(&ptr->value, *expected, newval); current = atomic_cas_64(&ptr->value, *expected, newval);
ret = current == *expected; ret = current == *expected;
...@@ -103,9 +103,8 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, ...@@ -103,9 +103,8 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
return ret; return ret;
} }
#endif /* HAVE_ATOMIC_H */ #endif /* HAVE_ATOMIC_H */
#endif /* defined(HAVE_ATOMICS) */ #endif /* defined(HAVE_ATOMICS) */
#endif /* defined(PG_USE_INLINE) || #endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
* defined(ATOMICS_INCLUDE_DEFINITIONS) */
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
* *
* Documentation: * Documentation:
* * Synchronization and atomic built-in functions * * Synchronization and atomic built-in functions
* http://publib.boulder.ibm.com/infocenter/lnxpcomp/v8v101/topic/com.ibm.xlcpp8l.doc/compiler/ref/bif_sync.htm * http://publib.boulder.ibm.com/infocenter/lnxpcomp/v8v101/topic/com.ibm.xlcpp8l.doc/compiler/ref/bif_sync.htm
* *
* src/include/port/atomics/generic-xlc.h * src/include/port/atomics/generic-xlc.h
* *
...@@ -35,9 +35,9 @@ typedef struct pg_atomic_uint64 ...@@ -35,9 +35,9 @@ typedef struct pg_atomic_uint64
volatile uint64 value pg_attribute_aligned(8); volatile uint64 value pg_attribute_aligned(8);
} pg_atomic_uint64; } pg_atomic_uint64;
#endif /* __64BIT__ */ #endif /* __64BIT__ */
#endif /* defined(HAVE_ATOMICS) */ #endif /* defined(HAVE_ATOMICS) */
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
...@@ -48,13 +48,13 @@ static inline bool ...@@ -48,13 +48,13 @@ static inline bool
pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 *expected, uint32 newval) uint32 *expected, uint32 newval)
{ {
bool ret; bool ret;
uint64 current; uint64 current;
/* /*
* xlc's documentation tells us: "If __compare_and_swap is used as a * xlc's documentation tells us:
* locking primitive, insert a call to the __isync built-in function at * "If __compare_and_swap is used as a locking primitive, insert a call to
* the start of any critical sections." * the __isync built-in function at the start of any critical sections."
*/ */
__isync(); __isync();
...@@ -62,8 +62,8 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, ...@@ -62,8 +62,8 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
* XXX: __compare_and_swap is defined to take signed parameters, but that * XXX: __compare_and_swap is defined to take signed parameters, but that
* shouldn't matter since we don't perform any arithmetic operations. * shouldn't matter since we don't perform any arithmetic operations.
*/ */
current = (uint32) __compare_and_swap((volatile int *) ptr->value, current = (uint32)__compare_and_swap((volatile int*)ptr->value,
(int) *expected, (int) newval); (int)*expected, (int)newval);
ret = current == *expected; ret = current == *expected;
*expected = current; *expected = current;
return ret; return ret;
...@@ -83,13 +83,13 @@ static inline bool ...@@ -83,13 +83,13 @@ static inline bool
pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
uint64 *expected, uint64 newval) uint64 *expected, uint64 newval)
{ {
bool ret; bool ret;
uint64 current; uint64 current;
__isync(); __isync();
current = (uint64) __compare_and_swaplp((volatile long *) ptr->value, current = (uint64)__compare_and_swaplp((volatile long*)ptr->value,
(long) *expected, (long) newval); (long)*expected, (long)newval);
ret = current == *expected; ret = current == *expected;
*expected = current; *expected = current;
return ret; return ret;
...@@ -102,9 +102,8 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) ...@@ -102,9 +102,8 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
return __fetch_and_addlp(&ptr->value, add_); return __fetch_and_addlp(&ptr->value, add_);
} }
#endif /* PG_HAVE_ATOMIC_U64_SUPPORT */ #endif /* PG_HAVE_ATOMIC_U64_SUPPORT */
#endif /* defined(HAVE_ATOMICS) */ #endif /* defined(HAVE_ATOMICS) */
#endif /* defined(PG_USE_INLINE) || #endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
* defined(ATOMICS_INCLUDE_DEFINITIONS) */
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
/* intentionally no include guards, should only be included by atomics.h */ /* intentionally no include guards, should only be included by atomics.h */
#ifndef INSIDE_ATOMICS_H #ifndef INSIDE_ATOMICS_H
#error "should be included via atomics.h" # error "should be included via atomics.h"
#endif #endif
/* /*
...@@ -22,10 +22,10 @@ ...@@ -22,10 +22,10 @@
* barriers. * barriers.
*/ */
#if !defined(pg_read_barrier_impl) #if !defined(pg_read_barrier_impl)
#define pg_read_barrier_impl pg_memory_barrier_impl # define pg_read_barrier_impl pg_memory_barrier_impl
#endif #endif
#if !defined(pg_write_barrier_impl) #if !defined(pg_write_barrier_impl)
#define pg_write_barrier_impl pg_memory_barrier_impl # define pg_write_barrier_impl pg_memory_barrier_impl
#endif #endif
#ifndef PG_HAVE_SPIN_DELAY #ifndef PG_HAVE_SPIN_DELAY
...@@ -113,8 +113,7 @@ pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr) ...@@ -113,8 +113,7 @@ pg_atomic_init_flag_impl(volatile pg_atomic_flag *ptr)
static inline bool static inline bool
pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr) pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
{ {
uint32 value = 0; uint32 value = 0;
return pg_atomic_compare_exchange_u32_impl(ptr, &value, 1); return pg_atomic_compare_exchange_u32_impl(ptr, &value, 1);
} }
...@@ -130,23 +129,23 @@ static inline void ...@@ -130,23 +129,23 @@ static inline void
pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr) pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
{ {
/* /*
* Use a memory barrier + plain write if we have a native memory barrier. * Use a memory barrier + plain write if we have a native memory
* But don't do so if memory barriers use spinlocks - that'd lead to * barrier. But don't do so if memory barriers use spinlocks - that'd lead
* circularity if flags are used to implement spinlocks. * to circularity if flags are used to implement spinlocks.
*/ */
#ifndef PG_HAVE_MEMORY_BARRIER_EMULATION #ifndef PG_HAVE_MEMORY_BARRIER_EMULATION
/* XXX: release semantics suffice? */ /* XXX: release semantics suffice? */
pg_memory_barrier_impl(); pg_memory_barrier_impl();
pg_atomic_write_u32_impl(ptr, 0); pg_atomic_write_u32_impl(ptr, 0);
#else #else
uint32 value = 1; uint32 value = 1;
pg_atomic_compare_exchange_u32_impl(ptr, &value, 0); pg_atomic_compare_exchange_u32_impl(ptr, &value, 0);
#endif #endif
} }
#elif !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) #elif !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG)
#error "No pg_atomic_test_and_set provided" # error "No pg_atomic_test_and_set provided"
#endif /* !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) */ #endif /* !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) */
#ifndef PG_HAVE_ATOMIC_INIT_U32 #ifndef PG_HAVE_ATOMIC_INIT_U32
...@@ -163,8 +162,7 @@ pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_) ...@@ -163,8 +162,7 @@ pg_atomic_init_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 val_)
static inline uint32 static inline uint32
pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_) pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_)
{ {
uint32 old; uint32 old;
while (true) while (true)
{ {
old = pg_atomic_read_u32_impl(ptr); old = pg_atomic_read_u32_impl(ptr);
...@@ -180,8 +178,7 @@ pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_) ...@@ -180,8 +178,7 @@ pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_)
static inline uint32 static inline uint32
pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_) pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
{ {
uint32 old; uint32 old;
while (true) while (true)
{ {
old = pg_atomic_read_u32_impl(ptr); old = pg_atomic_read_u32_impl(ptr);
...@@ -206,8 +203,7 @@ pg_atomic_fetch_sub_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_) ...@@ -206,8 +203,7 @@ pg_atomic_fetch_sub_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
static inline uint32 static inline uint32
pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_) pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
{ {
uint32 old; uint32 old;
while (true) while (true)
{ {
old = pg_atomic_read_u32_impl(ptr); old = pg_atomic_read_u32_impl(ptr);
...@@ -223,8 +219,7 @@ pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_) ...@@ -223,8 +219,7 @@ pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
static inline uint32 static inline uint32
pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_) pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
{ {
uint32 old; uint32 old;
while (true) while (true)
{ {
old = pg_atomic_read_u32_impl(ptr); old = pg_atomic_read_u32_impl(ptr);
...@@ -260,8 +255,7 @@ pg_atomic_sub_fetch_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_) ...@@ -260,8 +255,7 @@ pg_atomic_sub_fetch_u32_impl(volatile pg_atomic_uint32 *ptr, int32 sub_)
static inline uint64 static inline uint64
pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 xchg_) pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 xchg_)
{ {
uint64 old; uint64 old;
while (true) while (true)
{ {
old = ptr->value; old = ptr->value;
...@@ -290,7 +284,7 @@ pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val) ...@@ -290,7 +284,7 @@ pg_atomic_write_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val)
static inline uint64 static inline uint64
pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr) pg_atomic_read_u64_impl(volatile pg_atomic_uint64 *ptr)
{ {
uint64 old = 0; uint64 old = 0;
/* /*
* 64 bit reads aren't safe on all platforms. In the generic * 64 bit reads aren't safe on all platforms. In the generic
...@@ -318,8 +312,7 @@ pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_) ...@@ -318,8 +312,7 @@ pg_atomic_init_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 val_)
static inline uint64 static inline uint64
pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
{ {
uint64 old; uint64 old;
while (true) while (true)
{ {
old = pg_atomic_read_u64_impl(ptr); old = pg_atomic_read_u64_impl(ptr);
...@@ -344,8 +337,7 @@ pg_atomic_fetch_sub_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_) ...@@ -344,8 +337,7 @@ pg_atomic_fetch_sub_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
static inline uint64 static inline uint64
pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_) pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
{ {
uint64 old; uint64 old;
while (true) while (true)
{ {
old = pg_atomic_read_u64_impl(ptr); old = pg_atomic_read_u64_impl(ptr);
...@@ -361,8 +353,7 @@ pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_) ...@@ -361,8 +353,7 @@ pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
static inline uint64 static inline uint64
pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_) pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
{ {
uint64 old; uint64 old;
while (true) while (true)
{ {
old = pg_atomic_read_u64_impl(ptr); old = pg_atomic_read_u64_impl(ptr);
...@@ -391,7 +382,6 @@ pg_atomic_sub_fetch_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_) ...@@ -391,7 +382,6 @@ pg_atomic_sub_fetch_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
} }
#endif #endif
#endif /* PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64 */ #endif /* PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64 */
#endif /* defined(PG_USE_INLINE) || #endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
* defined(ATOMICS_INCLUDE_DEFINITIONS) */
#list of file patterns to exclude from pg_indent runs #list of file patterns to exclude from pg_indent runs
/s_lock\.h$ /s_lock\.h$
/atomics/
/ecpg/test/expected/ /ecpg/test/expected/
/snowball/libstemmer/ /snowball/libstemmer/
/pl/plperl/ppport\.h$ /pl/plperl/ppport\.h$
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment