Commit befa3e64 authored by Bruce Momjian's avatar Bruce Momjian

Revert 9.5 pgindent changes to atomics directory files

This is because there are many __asm__ blocks there that pgindent messes
up.  Also configure pgindent to skip that directory in the future.
parent 2aa0476d
...@@ -18,9 +18,9 @@ ...@@ -18,9 +18,9 @@
* fence. * fence.
*/ */
#if defined(__INTEL_COMPILER) #if defined(__INTEL_COMPILER)
#define pg_memory_barrier_impl() __mf() # define pg_memory_barrier_impl() __mf()
#elif defined(__GNUC__) #elif defined(__GNUC__)
#define pg_memory_barrier_impl() __asm__ __volatile__ ("mf" : : : "memory") # define pg_memory_barrier_impl() __asm__ __volatile__ ("mf" : : : "memory")
#elif defined(__hpux) #elif defined(__hpux)
#define pg_memory_barrier_impl() _Asm_mf() # define pg_memory_barrier_impl() _Asm_mf()
#endif #endif
...@@ -80,8 +80,7 @@ typedef struct pg_atomic_uint64 ...@@ -80,8 +80,7 @@ typedef struct pg_atomic_uint64
#endif /* defined(HAVE_ATOMICS) */ #endif /* defined(HAVE_ATOMICS) */
#endif /* defined(__GNUC__) && #endif /* defined(__GNUC__) && !defined(__INTEL_COMPILER) */
* !defined(__INTEL_COMPILER) */
#if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) #if defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS)
...@@ -157,10 +156,9 @@ pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr) ...@@ -157,10 +156,9 @@ pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
__asm__ __volatile__( __asm__ __volatile__(
" lock \n" " lock \n"
" xchgb %0,%1 \n" " xchgb %0,%1 \n"
: "+q"(_res), "+m"(ptr->value) : "+q"(_res), "+m"(ptr->value)
: :
: "memory"); : "memory");
return _res == 0; return _res == 0;
} }
...@@ -172,8 +170,7 @@ pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr) ...@@ -172,8 +170,7 @@ pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
* On a TSO architecture like x86 it's sufficient to use a compiler * On a TSO architecture like x86 it's sufficient to use a compiler
* barrier to achieve release semantics. * barrier to achieve release semantics.
*/ */
__asm__ __volatile__("":::"memory"); __asm__ __volatile__("" ::: "memory");
ptr->value = 0; ptr->value = 0;
} }
...@@ -192,10 +189,9 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, ...@@ -192,10 +189,9 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
" lock \n" " lock \n"
" cmpxchgl %4,%5 \n" " cmpxchgl %4,%5 \n"
" setz %2 \n" " setz %2 \n"
: "=a"(*expected), "=m"(ptr->value), "=q"(ret) : "=a" (*expected), "=m"(ptr->value), "=q" (ret)
: "a"(*expected), "r"(newval), "m"(ptr->value) : "a" (*expected), "r" (newval), "m"(ptr->value)
: "memory", "cc"); : "memory", "cc");
return (bool) ret; return (bool) ret;
} }
...@@ -207,10 +203,9 @@ pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_) ...@@ -207,10 +203,9 @@ pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
__asm__ __volatile__( __asm__ __volatile__(
" lock \n" " lock \n"
" xaddl %0,%1 \n" " xaddl %0,%1 \n"
: "=q"(res), "=m"(ptr->value) : "=q"(res), "=m"(ptr->value)
: "0"(add_), "m"(ptr->value) : "0" (add_), "m"(ptr->value)
: "memory", "cc"); : "memory", "cc");
return res; return res;
} }
...@@ -231,10 +226,9 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, ...@@ -231,10 +226,9 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
" lock \n" " lock \n"
" cmpxchgq %4,%5 \n" " cmpxchgq %4,%5 \n"
" setz %2 \n" " setz %2 \n"
: "=a"(*expected), "=m"(ptr->value), "=q"(ret) : "=a" (*expected), "=m"(ptr->value), "=q" (ret)
: "a"(*expected), "r"(newval), "m"(ptr->value) : "a" (*expected), "r" (newval), "m"(ptr->value)
: "memory", "cc"); : "memory", "cc");
return (bool) ret; return (bool) ret;
} }
...@@ -246,19 +240,16 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) ...@@ -246,19 +240,16 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
__asm__ __volatile__( __asm__ __volatile__(
" lock \n" " lock \n"
" xaddq %0,%1 \n" " xaddq %0,%1 \n"
: "=q"(res), "=m"(ptr->value) : "=q"(res), "=m"(ptr->value)
: "0"(add_), "m"(ptr->value) : "0" (add_), "m"(ptr->value)
: "memory", "cc"); : "memory", "cc");
return res; return res;
} }
#endif /* __x86_64__ */ #endif /* __x86_64__ */
#endif /* defined(__GNUC__) && #endif /* defined(__GNUC__) && !defined(__INTEL_COMPILER) */
* !defined(__INTEL_COMPILER) */
#endif /* HAVE_ATOMICS */ #endif /* HAVE_ATOMICS */
#endif /* defined(PG_USE_INLINE) || #endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
* defined(ATOMICS_INCLUDE_DEFINITIONS) */
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
/* intentionally no include guards, should only be included by atomics.h */ /* intentionally no include guards, should only be included by atomics.h */
#ifndef INSIDE_ATOMICS_H #ifndef INSIDE_ATOMICS_H
#error "should be included via atomics.h" # error "should be included via atomics.h"
#endif #endif
#ifndef pg_memory_barrier_impl #ifndef pg_memory_barrier_impl
...@@ -75,8 +75,7 @@ typedef struct pg_atomic_flag ...@@ -75,8 +75,7 @@ typedef struct pg_atomic_flag
* be content with just one byte instead of 4, but that's not too much * be content with just one byte instead of 4, but that's not too much
* waste. * waste.
*/ */
#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP #if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
* compilers */
int sema[4]; int sema[4];
#else #else
int sema; int sema;
...@@ -93,8 +92,7 @@ typedef struct pg_atomic_flag ...@@ -93,8 +92,7 @@ typedef struct pg_atomic_flag
typedef struct pg_atomic_uint32 typedef struct pg_atomic_uint32
{ {
/* Check pg_atomic_flag's definition above for an explanation */ /* Check pg_atomic_flag's definition above for an explanation */
#if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP #if defined(__hppa) || defined(__hppa__) /* HP PA-RISC, GCC and HP compilers */
* compilers */
int sema[4]; int sema[4];
#else #else
int sema; int sema;
...@@ -147,5 +145,4 @@ extern uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 ...@@ -147,5 +145,4 @@ extern uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32
#endif /* PG_HAVE_ATOMIC_U32_SIMULATION */ #endif /* PG_HAVE_ATOMIC_U32_SIMULATION */
#endif /* defined(PG_USE_INLINE) || #endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
* defined(ATOMICS_INCLUDE_DEFINITIONS) */
...@@ -68,15 +68,13 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, ...@@ -68,15 +68,13 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint32 current; uint32 current;
_Asm_mov_to_ar(_AREG_CCV, *expected, MINOR_FENCE); _Asm_mov_to_ar(_AREG_CCV, *expected, MINOR_FENCE);
/* /*
* We want a barrier, not just release/acquire semantics. * We want a barrier, not just release/acquire semantics.
*/ */
_Asm_mf(); _Asm_mf();
/* /*
* Notes: DOWN_MEM_FENCE | _UP_MEM_FENCE prevents reordering by the * Notes:
* compiler * DOWN_MEM_FENCE | _UP_MEM_FENCE prevents reordering by the compiler
*/ */
current = _Asm_cmpxchg(_SZ_W, /* word */ current = _Asm_cmpxchg(_SZ_W, /* word */
_SEM_REL, _SEM_REL,
...@@ -113,5 +111,4 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, ...@@ -113,5 +111,4 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
#endif /* defined(HAVE_ATOMICS) */ #endif /* defined(HAVE_ATOMICS) */
#endif /* defined(PG_USE_INLINE) || #endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
* defined(ATOMICS_INCLUDE_DEFINITIONS) */
...@@ -40,21 +40,21 @@ ...@@ -40,21 +40,21 @@
* definitions where possible, and use this only as a fallback. * definitions where possible, and use this only as a fallback.
*/ */
#if !defined(pg_memory_barrier_impl) #if !defined(pg_memory_barrier_impl)
#if defined(HAVE_GCC__ATOMIC_INT32_CAS) # if defined(HAVE_GCC__ATOMIC_INT32_CAS)
#define pg_memory_barrier_impl() __atomic_thread_fence(__ATOMIC_SEQ_CST) # define pg_memory_barrier_impl() __atomic_thread_fence(__ATOMIC_SEQ_CST)
#elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1)) # elif (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
#define pg_memory_barrier_impl() __sync_synchronize() # define pg_memory_barrier_impl() __sync_synchronize()
#endif # endif
#endif /* !defined(pg_memory_barrier_impl) */ #endif /* !defined(pg_memory_barrier_impl) */
#if !defined(pg_read_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS) #if !defined(pg_read_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
/* acquire semantics include read barrier semantics */ /* acquire semantics include read barrier semantics */
#define pg_read_barrier_impl() __atomic_thread_fence(__ATOMIC_ACQUIRE) # define pg_read_barrier_impl() __atomic_thread_fence(__ATOMIC_ACQUIRE)
#endif #endif
#if !defined(pg_write_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS) #if !defined(pg_write_barrier_impl) && defined(HAVE_GCC__ATOMIC_INT32_CAS)
/* release semantics include write barrier semantics */ /* release semantics include write barrier semantics */
#define pg_write_barrier_impl() __atomic_thread_fence(__ATOMIC_RELEASE) # define pg_write_barrier_impl() __atomic_thread_fence(__ATOMIC_RELEASE)
#endif #endif
#ifdef HAVE_ATOMICS #ifdef HAVE_ATOMICS
...@@ -87,8 +87,7 @@ typedef struct pg_atomic_uint32 ...@@ -87,8 +87,7 @@ typedef struct pg_atomic_uint32
volatile uint32 value; volatile uint32 value;
} pg_atomic_uint32; } pg_atomic_uint32;
#endif /* defined(HAVE_GCC__ATOMIC_INT32_CAS) || #endif /* defined(HAVE_GCC__ATOMIC_INT32_CAS) || defined(HAVE_GCC__SYNC_INT32_CAS) */
* defined(HAVE_GCC__SYNC_INT32_CAS) */
/* generic gcc based atomic uint64 implementation */ /* generic gcc based atomic uint64 implementation */
#if !defined(PG_HAVE_ATOMIC_U64_SUPPORT) \ #if !defined(PG_HAVE_ATOMIC_U64_SUPPORT) \
...@@ -102,8 +101,7 @@ typedef struct pg_atomic_uint64 ...@@ -102,8 +101,7 @@ typedef struct pg_atomic_uint64
volatile uint64 value pg_attribute_aligned(8); volatile uint64 value pg_attribute_aligned(8);
} pg_atomic_uint64; } pg_atomic_uint64;
#endif /* defined(HAVE_GCC__ATOMIC_INT64_CAS) || #endif /* defined(HAVE_GCC__ATOMIC_INT64_CAS) || defined(HAVE_GCC__SYNC_INT64_CAS) */
* defined(HAVE_GCC__SYNC_INT64_CAS) */
/* /*
* Implementation follows. Inlined or directly included from atomics.c * Implementation follows. Inlined or directly included from atomics.c
...@@ -177,7 +175,6 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, ...@@ -177,7 +175,6 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
{ {
bool ret; bool ret;
uint32 current; uint32 current;
current = __sync_val_compare_and_swap(&ptr->value, *expected, newval); current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
ret = current == *expected; ret = current == *expected;
*expected = current; *expected = current;
...@@ -216,7 +213,6 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, ...@@ -216,7 +213,6 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
{ {
bool ret; bool ret;
uint64 current; uint64 current;
current = __sync_val_compare_and_swap(&ptr->value, *expected, newval); current = __sync_val_compare_and_swap(&ptr->value, *expected, newval);
ret = current == *expected; ret = current == *expected;
*expected = current; *expected = current;
...@@ -235,7 +231,6 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) ...@@ -235,7 +231,6 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
#endif /* !defined(PG_DISABLE_64_BIT_ATOMICS) */ #endif /* !defined(PG_DISABLE_64_BIT_ATOMICS) */
#endif /* defined(PG_USE_INLINE) || #endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
* defined(ATOMICS_INCLUDE_DEFINITIONS) */
#endif /* defined(HAVE_ATOMICS) */ #endif /* defined(HAVE_ATOMICS) */
...@@ -41,9 +41,7 @@ typedef struct pg_atomic_uint32 ...@@ -41,9 +41,7 @@ typedef struct pg_atomic_uint32
} pg_atomic_uint32; } pg_atomic_uint32;
#define PG_HAVE_ATOMIC_U64_SUPPORT #define PG_HAVE_ATOMIC_U64_SUPPORT
typedef struct __declspec ( typedef struct __declspec(align(8)) pg_atomic_uint64
align(8))
pg_atomic_uint64
{ {
volatile uint64 value; volatile uint64 value;
} pg_atomic_uint64; } pg_atomic_uint64;
...@@ -62,7 +60,6 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, ...@@ -62,7 +60,6 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
{ {
bool ret; bool ret;
uint32 current; uint32 current;
current = InterlockedCompareExchange(&ptr->value, newval, *expected); current = InterlockedCompareExchange(&ptr->value, newval, *expected);
ret = current == *expected; ret = current == *expected;
*expected = current; *expected = current;
...@@ -91,7 +88,6 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, ...@@ -91,7 +88,6 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
{ {
bool ret; bool ret;
uint64 current; uint64 current;
current = _InterlockedCompareExchange64(&ptr->value, newval, *expected); current = _InterlockedCompareExchange64(&ptr->value, newval, *expected);
ret = current == *expected; ret = current == *expected;
*expected = current; *expected = current;
...@@ -112,5 +108,4 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) ...@@ -112,5 +108,4 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
#endif /* HAVE_ATOMICS */ #endif /* HAVE_ATOMICS */
#endif /* defined(PG_USE_INLINE) || #endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
* defined(ATOMICS_INCLUDE_DEFINITIONS) */
...@@ -30,13 +30,13 @@ ...@@ -30,13 +30,13 @@
* membar #StoreStore | #LoadStore | #StoreLoad | #LoadLoad on x86/sparc * membar #StoreStore | #LoadStore | #StoreLoad | #LoadLoad on x86/sparc
* respectively. * respectively.
*/ */
#define pg_memory_barrier_impl() __machine_rw_barrier() # define pg_memory_barrier_impl() __machine_rw_barrier()
#endif #endif
#ifndef pg_read_barrier_impl #ifndef pg_read_barrier_impl
#define pg_read_barrier_impl() __machine_r_barrier() # define pg_read_barrier_impl() __machine_r_barrier()
#endif #endif
#ifndef pg_write_barrier_impl #ifndef pg_write_barrier_impl
#define pg_write_barrier_impl() __machine_w_barrier() # define pg_write_barrier_impl() __machine_w_barrier()
#endif #endif
#endif /* HAVE_MBARRIER_H */ #endif /* HAVE_MBARRIER_H */
...@@ -107,5 +107,4 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, ...@@ -107,5 +107,4 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
#endif /* defined(HAVE_ATOMICS) */ #endif /* defined(HAVE_ATOMICS) */
#endif /* defined(PG_USE_INLINE) || #endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
* defined(ATOMICS_INCLUDE_DEFINITIONS) */
...@@ -52,9 +52,9 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, ...@@ -52,9 +52,9 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
uint64 current; uint64 current;
/* /*
* xlc's documentation tells us: "If __compare_and_swap is used as a * xlc's documentation tells us:
* locking primitive, insert a call to the __isync built-in function at * "If __compare_and_swap is used as a locking primitive, insert a call to
* the start of any critical sections." * the __isync built-in function at the start of any critical sections."
*/ */
__isync(); __isync();
...@@ -62,8 +62,8 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, ...@@ -62,8 +62,8 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr,
* XXX: __compare_and_swap is defined to take signed parameters, but that * XXX: __compare_and_swap is defined to take signed parameters, but that
* shouldn't matter since we don't perform any arithmetic operations. * shouldn't matter since we don't perform any arithmetic operations.
*/ */
current = (uint32) __compare_and_swap((volatile int *) ptr->value, current = (uint32)__compare_and_swap((volatile int*)ptr->value,
(int) *expected, (int) newval); (int)*expected, (int)newval);
ret = current == *expected; ret = current == *expected;
*expected = current; *expected = current;
return ret; return ret;
...@@ -88,8 +88,8 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, ...@@ -88,8 +88,8 @@ pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr,
__isync(); __isync();
current = (uint64) __compare_and_swaplp((volatile long *) ptr->value, current = (uint64)__compare_and_swaplp((volatile long*)ptr->value,
(long) *expected, (long) newval); (long)*expected, (long)newval);
ret = current == *expected; ret = current == *expected;
*expected = current; *expected = current;
return ret; return ret;
...@@ -106,5 +106,4 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) ...@@ -106,5 +106,4 @@ pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
#endif /* defined(HAVE_ATOMICS) */ #endif /* defined(HAVE_ATOMICS) */
#endif /* defined(PG_USE_INLINE) || #endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
* defined(ATOMICS_INCLUDE_DEFINITIONS) */
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
/* intentionally no include guards, should only be included by atomics.h */ /* intentionally no include guards, should only be included by atomics.h */
#ifndef INSIDE_ATOMICS_H #ifndef INSIDE_ATOMICS_H
#error "should be included via atomics.h" # error "should be included via atomics.h"
#endif #endif
/* /*
...@@ -22,10 +22,10 @@ ...@@ -22,10 +22,10 @@
* barriers. * barriers.
*/ */
#if !defined(pg_read_barrier_impl) #if !defined(pg_read_barrier_impl)
#define pg_read_barrier_impl pg_memory_barrier_impl # define pg_read_barrier_impl pg_memory_barrier_impl
#endif #endif
#if !defined(pg_write_barrier_impl) #if !defined(pg_write_barrier_impl)
#define pg_write_barrier_impl pg_memory_barrier_impl # define pg_write_barrier_impl pg_memory_barrier_impl
#endif #endif
#ifndef PG_HAVE_SPIN_DELAY #ifndef PG_HAVE_SPIN_DELAY
...@@ -114,7 +114,6 @@ static inline bool ...@@ -114,7 +114,6 @@ static inline bool
pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr) pg_atomic_test_set_flag_impl(volatile pg_atomic_flag *ptr)
{ {
uint32 value = 0; uint32 value = 0;
return pg_atomic_compare_exchange_u32_impl(ptr, &value, 1); return pg_atomic_compare_exchange_u32_impl(ptr, &value, 1);
} }
...@@ -130,9 +129,9 @@ static inline void ...@@ -130,9 +129,9 @@ static inline void
pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr) pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
{ {
/* /*
* Use a memory barrier + plain write if we have a native memory barrier. * Use a memory barrier + plain write if we have a native memory
* But don't do so if memory barriers use spinlocks - that'd lead to * barrier. But don't do so if memory barriers use spinlocks - that'd lead
* circularity if flags are used to implement spinlocks. * to circularity if flags are used to implement spinlocks.
*/ */
#ifndef PG_HAVE_MEMORY_BARRIER_EMULATION #ifndef PG_HAVE_MEMORY_BARRIER_EMULATION
/* XXX: release semantics suffice? */ /* XXX: release semantics suffice? */
...@@ -145,7 +144,7 @@ pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr) ...@@ -145,7 +144,7 @@ pg_atomic_clear_flag_impl(volatile pg_atomic_flag *ptr)
} }
#elif !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) #elif !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG)
#error "No pg_atomic_test_and_set provided" # error "No pg_atomic_test_and_set provided"
#endif /* !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) */ #endif /* !defined(PG_HAVE_ATOMIC_TEST_SET_FLAG) */
...@@ -164,7 +163,6 @@ static inline uint32 ...@@ -164,7 +163,6 @@ static inline uint32
pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_) pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_)
{ {
uint32 old; uint32 old;
while (true) while (true)
{ {
old = pg_atomic_read_u32_impl(ptr); old = pg_atomic_read_u32_impl(ptr);
...@@ -181,7 +179,6 @@ static inline uint32 ...@@ -181,7 +179,6 @@ static inline uint32
pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_) pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
{ {
uint32 old; uint32 old;
while (true) while (true)
{ {
old = pg_atomic_read_u32_impl(ptr); old = pg_atomic_read_u32_impl(ptr);
...@@ -207,7 +204,6 @@ static inline uint32 ...@@ -207,7 +204,6 @@ static inline uint32
pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_) pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
{ {
uint32 old; uint32 old;
while (true) while (true)
{ {
old = pg_atomic_read_u32_impl(ptr); old = pg_atomic_read_u32_impl(ptr);
...@@ -224,7 +220,6 @@ static inline uint32 ...@@ -224,7 +220,6 @@ static inline uint32
pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_) pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
{ {
uint32 old; uint32 old;
while (true) while (true)
{ {
old = pg_atomic_read_u32_impl(ptr); old = pg_atomic_read_u32_impl(ptr);
...@@ -261,7 +256,6 @@ static inline uint64 ...@@ -261,7 +256,6 @@ static inline uint64
pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 xchg_) pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 xchg_)
{ {
uint64 old; uint64 old;
while (true) while (true)
{ {
old = ptr->value; old = ptr->value;
...@@ -319,7 +313,6 @@ static inline uint64 ...@@ -319,7 +313,6 @@ static inline uint64
pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
{ {
uint64 old; uint64 old;
while (true) while (true)
{ {
old = pg_atomic_read_u64_impl(ptr); old = pg_atomic_read_u64_impl(ptr);
...@@ -345,7 +338,6 @@ static inline uint64 ...@@ -345,7 +338,6 @@ static inline uint64
pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_) pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
{ {
uint64 old; uint64 old;
while (true) while (true)
{ {
old = pg_atomic_read_u64_impl(ptr); old = pg_atomic_read_u64_impl(ptr);
...@@ -362,7 +354,6 @@ static inline uint64 ...@@ -362,7 +354,6 @@ static inline uint64
pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_) pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
{ {
uint64 old; uint64 old;
while (true) while (true)
{ {
old = pg_atomic_read_u64_impl(ptr); old = pg_atomic_read_u64_impl(ptr);
...@@ -393,5 +384,4 @@ pg_atomic_sub_fetch_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_) ...@@ -393,5 +384,4 @@ pg_atomic_sub_fetch_u64_impl(volatile pg_atomic_uint64 *ptr, int64 sub_)
#endif /* PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64 */ #endif /* PG_HAVE_ATOMIC_COMPARE_EXCHANGE_U64 */
#endif /* defined(PG_USE_INLINE) || #endif /* defined(PG_USE_INLINE) || defined(ATOMICS_INCLUDE_DEFINITIONS) */
* defined(ATOMICS_INCLUDE_DEFINITIONS) */
#list of file patterns to exclude from pg_indent runs #list of file patterns to exclude from pg_indent runs
/s_lock\.h$ /s_lock\.h$
/atomics/
/ecpg/test/expected/ /ecpg/test/expected/
/snowball/libstemmer/ /snowball/libstemmer/
/pl/plperl/ppport\.h$ /pl/plperl/ppport\.h$
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment