@@ -29,6 +29,9 @@ using std::atomic_compare_exchange_strong_explicit;
using std::atomic_compare_exchange_weak;
using std::atomic_compare_exchange_weak_explicit;
+using std::atomic_exchange;
+using std::atomic_exchange_explicit;
+
#define atomic_read(SRC, DST) \
atomic_read_explicit(SRC, DST, memory_order_seq_cst)
#define atomic_read_explicit(SRC, DST, ORDER) \
@@ -67,6 +67,11 @@ typedef enum {
#define atomic_compare_exchange_weak_explicit(DST, EXP, SRC, ORD1, ORD2) \
__c11_atomic_compare_exchange_weak(DST, EXP, SRC, ORD1, ORD2)
+#define atomic_exchange(RMW, ARG) \
+ atomic_exchange_explicit(RMW, ARG, memory_order_seq_cst)
+#define atomic_exchange_explicit(RMW, ARG, ORDER) \
+ __c11_atomic_exchange(RMW, ARG, ORDER)
+
#define atomic_add(RMW, ARG, ORIG) \
atomic_add_explicit(RMW, ARG, ORIG, memory_order_seq_cst)
#define atomic_sub(RMW, ARG, ORIG) \
@@ -128,6 +128,11 @@ atomic_signal_fence(memory_order order)
#define atomic_compare_exchange_weak_explicit \
atomic_compare_exchange_strong_explicit
+#define atomic_exchange_explicit(DST, SRC, ORDER) \
+ __sync_lock_test_and_set(DST, SRC)
+#define atomic_exchange(DST, SRC) \
+ atomic_exchange_explicit(DST, SRC, memory_order_seq_cst)
+
#define atomic_op__(RMW, OP, ARG, ORIG) \
({ \
typeof(RMW) rmw__ = (RMW); \
@@ -61,6 +61,11 @@ typedef enum {
#define atomic_compare_exchange_weak_explicit(DST, EXP, SRC, ORD1, ORD2) \
__atomic_compare_exchange_n(DST, EXP, SRC, true, ORD1, ORD2)
+#define atomic_exchange_explicit(DST, SRC, ORDER) \
+ __atomic_exchange_n(DST, SRC, ORDER)
+#define atomic_exchange(DST, SRC) \
+ atomic_exchange_explicit(DST, SRC, memory_order_seq_cst)
+
#define atomic_add(RMW, OPERAND, ORIG) \
atomic_add_explicit(RMW, OPERAND, ORIG, memory_order_seq_cst)
#define atomic_sub(RMW, OPERAND, ORIG) \
@@ -400,6 +400,11 @@ atomic_signal_fence(memory_order order)
#define atomic_compare_exchange_weak_explicit \
atomic_compare_exchange_strong_explicit
+#define atomic_exchange_explicit(RMW, ARG, ORDER) \
+ atomic_exchange__(RMW, ARG, ORDER)
+#define atomic_exchange(RMW, ARG) \
+ atomic_exchange_explicit(RMW, ARG, memory_order_seq_cst)
+
#define atomic_add__(RMW, ARG, CLOB) \
asm volatile("lock; xadd %0,%1 ; " \
"# atomic_add__ " \
@@ -31,6 +31,15 @@ void atomic_unlock__(void *);
atomic_unlock__(DST), \
false)))
+#define atomic_exchange_locked(DST, SRC) \
+ ({ \
+ atomic_lock__(DST); \
+ typeof(*(DST)) __tmp = *(DST); \
+ *(DST) = SRC; \
+ atomic_unlock__(DST); \
+ __tmp; \
+ })
+
#define atomic_op_locked_add +=
#define atomic_op_locked_sub -=
#define atomic_op_locked_or |=
@@ -345,6 +345,28 @@ atomic_signal_fence(memory_order order)
#define atomic_compare_exchange_weak_explicit \
atomic_compare_exchange_strong_explicit
+/* While intrinsics offering different memory ordering
+ * are available in MSVC C compiler, they are not defined
+ * in the C++ compiler. Ignore for compatibility.
+ *
+ * Use nested ternary operators as the GNU extension ({})
+ * is not available.
+ */
+
+#define atomic_exchange_explicit(DST, SRC, ORDER) \
+ ((sizeof *(DST) == 1) ? \
+ _InterlockedExchange8((char volatile *) DST, SRC) \
+ : (sizeof *(DST) == 2) ? \
+ _InterlockedExchange16((short volatile *) DST, SRC) \
+ : (sizeof *(DST) == 4) ? \
+ _InterlockedExchange((long int volatile *) DST, SRC) \
+ : (sizeof *(DST) == 8) ? \
+ _InterlockedExchange64((__int64 volatile *) DST, SRC) \
+ : (ovs_abort(), 0))
+
+#define atomic_exchange(DST, SRC) \
+ atomic_exchange_explicit(DST, SRC, memory_order_seq_cst)
+
/* MSVCs c++ compiler implements c11 atomics and looking through its
* implementation (in xatomic.h), orders are ignored for x86 platform.
* Do the same here. */
@@ -77,6 +77,11 @@ atomic_signal_fence(memory_order order OVS_UNUSED)
#define atomic_compare_exchange_weak_explicit \
atomic_compare_exchange_strong_explicit
+#define atomic_exchange(DST, SRC) \
+ atomic_exchange_locked(DST, SRC)
+#define atomic_exchange_explicit(DST, SRC, ORDER) \
+ ((void) (ORDER), atomic_exchange(DST, SRC))
+
#define atomic_add(RMW, ARG, ORIG) atomic_op_locked(RMW, add, ARG, ORIG)
#define atomic_sub(RMW, ARG, ORIG) atomic_op_locked(RMW, sub, ARG, ORIG)
#define atomic_or( RMW, ARG, ORIG) atomic_op_locked(RMW, or, ARG, ORIG)
@@ -274,6 +274,11 @@ atomic_signal_fence(memory_order order)
#define atomic_compare_exchange_weak_explicit \
atomic_compare_exchange_strong_explicit
+#define atomic_exchange_explicit(RMW, ARG, ORDER) \
+ atomic_exchange__(RMW, ARG, ORDER)
+#define atomic_exchange(RMW, ARG) \
+ atomic_exchange_explicit(RMW, ARG, memory_order_seq_cst)
+
#define atomic_add__(RMW, ARG, CLOB) \
asm volatile("lock; xadd %0,%1 ; " \
"# atomic_add__ " \
@@ -210,7 +210,7 @@
* In this section, A is an atomic type and C is the corresponding non-atomic
* type.
*
- * The "store" and "compare_exchange" primitives match C11:
+ * The "store", "exchange", and "compare_exchange" primitives match C11:
*
* void atomic_store(A *object, C value);
* void atomic_store_explicit(A *object, C value, memory_order);
@@ -244,6 +244,12 @@
* efficiently, so it should be used if the application will need to
* loop anyway.
*
+ * C atomic_exchange(A *object, C desired);
+ * C atomic_exchange_explicit(A *object, C desired, memory_order);
+ *
+ * Atomically stores 'desired' into '*object', returning the value
+ * previously held.
+ *
* The following primitives differ from the C11 ones (and have different names)
* because there does not appear to be a way to implement the standard
* primitives in standard C: