Commit e530be96 authored by Tom Lane's avatar Tom Lane

Remove duplicate reads from the inner loops in generic atomic ops.

The pg_atomic_compare_exchange_xxx functions are defined to update
*expected to whatever they read from the target variable.  Therefore,
there's no need to do additional explicit reads after we've initialized
the "old" variable.  The actual benefit of this is somewhat debatable,
but it seems fairly unlikely to hurt anything, especially since we
will override the generic implementations in most performance-sensitive
cases.

Yura Sokolov, reviewed by Jesper Pedersen and myself

Discussion: https://postgr.es/m/7f65886daca545067f82bf2b463b218d@postgrespro.ru
parent 34ae1828
...@@ -170,12 +170,9 @@ static inline uint32 ...@@ -170,12 +170,9 @@ static inline uint32
pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_) pg_atomic_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 xchg_)
{ {
uint32 old; uint32 old;
while (true)
{
old = pg_atomic_read_u32_impl(ptr); old = pg_atomic_read_u32_impl(ptr);
if (pg_atomic_compare_exchange_u32_impl(ptr, &old, xchg_)) while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, xchg_))
break; /* skip */;
}
return old; return old;
} }
#endif #endif
...@@ -186,12 +183,9 @@ static inline uint32 ...@@ -186,12 +183,9 @@ static inline uint32
pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_) pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_)
{ {
uint32 old; uint32 old;
while (true)
{
old = pg_atomic_read_u32_impl(ptr); old = pg_atomic_read_u32_impl(ptr);
if (pg_atomic_compare_exchange_u32_impl(ptr, &old, old + add_)) while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old + add_))
break; /* skip */;
}
return old; return old;
} }
#endif #endif
...@@ -211,12 +205,9 @@ static inline uint32 ...@@ -211,12 +205,9 @@ static inline uint32
pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_) pg_atomic_fetch_and_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 and_)
{ {
uint32 old; uint32 old;
while (true)
{
old = pg_atomic_read_u32_impl(ptr); old = pg_atomic_read_u32_impl(ptr);
if (pg_atomic_compare_exchange_u32_impl(ptr, &old, old & and_)) while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old & and_))
break; /* skip */;
}
return old; return old;
} }
#endif #endif
...@@ -227,12 +218,9 @@ static inline uint32 ...@@ -227,12 +218,9 @@ static inline uint32
pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_) pg_atomic_fetch_or_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 or_)
{ {
uint32 old; uint32 old;
while (true)
{
old = pg_atomic_read_u32_impl(ptr); old = pg_atomic_read_u32_impl(ptr);
if (pg_atomic_compare_exchange_u32_impl(ptr, &old, old | or_)) while (!pg_atomic_compare_exchange_u32_impl(ptr, &old, old | or_))
break; /* skip */;
}
return old; return old;
} }
#endif #endif
...@@ -261,12 +249,9 @@ static inline uint64 ...@@ -261,12 +249,9 @@ static inline uint64
pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 xchg_) pg_atomic_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 xchg_)
{ {
uint64 old; uint64 old;
while (true)
{
old = ptr->value; old = ptr->value;
if (pg_atomic_compare_exchange_u64_impl(ptr, &old, xchg_)) while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, xchg_))
break; /* skip */;
}
return old; return old;
} }
#endif #endif
...@@ -357,12 +342,9 @@ static inline uint64 ...@@ -357,12 +342,9 @@ static inline uint64
pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_)
{ {
uint64 old; uint64 old;
while (true)
{
old = pg_atomic_read_u64_impl(ptr); old = pg_atomic_read_u64_impl(ptr);
if (pg_atomic_compare_exchange_u64_impl(ptr, &old, old + add_)) while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old + add_))
break; /* skip */;
}
return old; return old;
} }
#endif #endif
...@@ -382,12 +364,9 @@ static inline uint64 ...@@ -382,12 +364,9 @@ static inline uint64
pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_) pg_atomic_fetch_and_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 and_)
{ {
uint64 old; uint64 old;
while (true)
{
old = pg_atomic_read_u64_impl(ptr); old = pg_atomic_read_u64_impl(ptr);
if (pg_atomic_compare_exchange_u64_impl(ptr, &old, old & and_)) while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old & and_))
break; /* skip */;
}
return old; return old;
} }
#endif #endif
...@@ -398,12 +377,9 @@ static inline uint64 ...@@ -398,12 +377,9 @@ static inline uint64
pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_) pg_atomic_fetch_or_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 or_)
{ {
uint64 old; uint64 old;
while (true)
{
old = pg_atomic_read_u64_impl(ptr); old = pg_atomic_read_u64_impl(ptr);
if (pg_atomic_compare_exchange_u64_impl(ptr, &old, old | or_)) while (!pg_atomic_compare_exchange_u64_impl(ptr, &old, old | or_))
break; /* skip */;
}
return old; return old;
} }
#endif #endif
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment