@@ -36,7 +36,7 @@ typedef enum {
memory_order_release = 3,
memory_order_acq_rel = 4,
memory_order_seq_cst = 5
-} memory_order;
+} ovs_memory_order;
#define atomic_thread_fence(ORDER) __c11_atomic_thread_fence(ORDER)
#define atomic_signal_fence(ORDER) __c11_atomic_signal_fence(ORDER)
@@ -28,7 +28,7 @@ typedef struct {
static inline bool
atomic_flag_test_and_set_explicit(volatile atomic_flag *object,
- memory_order order)
+ ovs_memory_order order)
{
return __atomic_test_and_set(&object->b, order);
}
@@ -40,7 +40,7 @@ atomic_flag_test_and_set(volatile atomic_flag *object)
}
static inline void
-atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order)
+atomic_flag_clear_explicit(volatile atomic_flag *object, ovs_memory_order order)
{
__atomic_clear(object, order);
}
@@ -39,7 +39,7 @@ typedef enum {
memory_order_release,
memory_order_acq_rel,
memory_order_seq_cst
-} memory_order;
+} ovs_memory_order;
#define IS_LOCKLESS_ATOMIC(OBJECT) (sizeof(OBJECT) <= sizeof(void *))
@@ -47,7 +47,7 @@ typedef enum {
#define atomic_init(OBJECT, VALUE) (*(OBJECT) = (VALUE), (void) 0)
static inline void
-atomic_thread_fence(memory_order order)
+atomic_thread_fence(ovs_memory_order order)
{
if (order != memory_order_relaxed) {
__sync_synchronize();
@@ -55,7 +55,7 @@ atomic_thread_fence(memory_order order)
}
static inline void
-atomic_thread_fence_if_seq_cst(memory_order order)
+atomic_thread_fence_if_seq_cst(ovs_memory_order order)
{
if (order == memory_order_seq_cst) {
__sync_synchronize();
@@ -63,7 +63,7 @@ atomic_thread_fence_if_seq_cst(memory_order order)
}
static inline void
-atomic_signal_fence(memory_order order)
+atomic_signal_fence(ovs_memory_order order)
{
if (order != memory_order_relaxed) {
asm volatile("" : : : "memory");
@@ -168,7 +168,7 @@ typedef struct {
static inline bool
atomic_flag_test_and_set_explicit(volatile atomic_flag *object,
- memory_order order)
+ ovs_memory_order order)
{
bool old;
@@ -188,7 +188,7 @@ atomic_flag_test_and_set_explicit(volatile atomic_flag *object,
static inline void
atomic_flag_clear_explicit(volatile atomic_flag *object,
- memory_order order)
+ ovs_memory_order order)
{
/* __sync_lock_release() by itself is a release barrier. For
* anything else additional barrier may be needed. */
@@ -28,7 +28,7 @@ typedef enum {
memory_order_release = __ATOMIC_RELEASE,
memory_order_acq_rel = __ATOMIC_ACQ_REL,
memory_order_seq_cst = __ATOMIC_SEQ_CST
-} memory_order;
+} ovs_memory_order;
#define ATOMIC_VAR_INIT(VALUE) (VALUE)
#define atomic_init(OBJECT, VALUE) (*(OBJECT) = (VALUE), (void) 0)
@@ -106,7 +106,7 @@ typedef enum {
memory_order_release,
memory_order_acq_rel,
memory_order_seq_cst
-} memory_order;
+} ovs_memory_order;
#define ATOMIC_BOOL_LOCK_FREE 2
#define ATOMIC_CHAR_LOCK_FREE 2
@@ -143,7 +143,7 @@ typedef enum {
* without a compiler memory barrier.
*/
static inline void
-atomic_compiler_barrier(memory_order order)
+atomic_compiler_barrier(ovs_memory_order order)
{
if (order > memory_order_consume) {
compiler_barrier();
@@ -151,7 +151,7 @@ atomic_compiler_barrier(memory_order order)
}
static inline void
-atomic_thread_fence(memory_order order)
+atomic_thread_fence(ovs_memory_order order)
{
if (order == memory_order_seq_cst) {
cpu_barrier();
@@ -161,7 +161,7 @@ atomic_thread_fence(memory_order order)
}
static inline void
-atomic_signal_fence(memory_order order)
+atomic_signal_fence(ovs_memory_order order)
{
atomic_compiler_barrier(order);
}
@@ -39,7 +39,7 @@ typedef enum {
memory_order_release,
memory_order_acq_rel,
memory_order_seq_cst
-} memory_order;
+} ovs_memory_order;
#if _MSC_VER > 1800 && defined(_M_IX86)
/* From WDK 10 _InlineInterlocked* functions are renamed to
@@ -63,7 +63,7 @@ typedef enum {
#define atomic_init(OBJECT, VALUE) (*(OBJECT) = (VALUE), (void) 0)
static inline void
-atomic_compiler_barrier(memory_order order)
+atomic_compiler_barrier(ovs_memory_order order)
{
/* In case of 'memory_order_consume', it is implicitly assumed that
* the compiler will not move instructions that have data-dependency
@@ -74,7 +74,7 @@ atomic_compiler_barrier(memory_order order)
}
static inline void
-atomic_thread_fence(memory_order order)
+atomic_thread_fence(ovs_memory_order order)
{
/* x86 is strongly ordered and acquire/release semantics come
* automatically. */
@@ -86,7 +86,7 @@ atomic_thread_fence(memory_order order)
}
static inline void
-atomic_signal_fence(memory_order order)
+atomic_signal_fence(ovs_memory_order order)
{
atomic_compiler_barrier(order);
}
@@ -40,19 +40,19 @@ typedef enum {
memory_order_release,
memory_order_acq_rel,
memory_order_seq_cst
-} memory_order;
+} ovs_memory_order;
#define ATOMIC_VAR_INIT(VALUE) (VALUE)
#define atomic_init(OBJECT, VALUE) (*(OBJECT) = (VALUE), (void) 0)
static inline void
-atomic_thread_fence(memory_order order OVS_UNUSED)
+atomic_thread_fence(ovs_memory_order order OVS_UNUSED)
{
/* Nothing to do. */
}
static inline void
-atomic_signal_fence(memory_order order OVS_UNUSED)
+atomic_signal_fence(ovs_memory_order order OVS_UNUSED)
{
/* Nothing to do. */
}
@@ -117,7 +117,7 @@ atomic_flag_test_and_set(volatile atomic_flag *flag_)
static inline bool
atomic_flag_test_and_set_explicit(volatile atomic_flag *flag,
- memory_order order OVS_UNUSED)
+ ovs_memory_order order OVS_UNUSED)
{
return atomic_flag_test_and_set(flag);
}
@@ -134,7 +134,7 @@ atomic_flag_clear(volatile atomic_flag *flag_)
static inline void
atomic_flag_clear_explicit(volatile atomic_flag *flag,
- memory_order order OVS_UNUSED)
+ ovs_memory_order order OVS_UNUSED)
{
atomic_flag_clear(flag);
}
@@ -107,7 +107,7 @@ typedef enum {
memory_order_release,
memory_order_acq_rel,
memory_order_seq_cst
-} memory_order;
+} ovs_memory_order;
#define ATOMIC_BOOL_LOCK_FREE 2
#define ATOMIC_CHAR_LOCK_FREE 2
@@ -144,7 +144,7 @@ typedef enum {
* without a compiler memory barrier.
*/
static inline void
-atomic_compiler_barrier(memory_order order)
+atomic_compiler_barrier(ovs_memory_order order)
{
if (order > memory_order_consume) {
compiler_barrier();
@@ -152,7 +152,7 @@ atomic_compiler_barrier(memory_order order)
}
static inline void
-atomic_thread_fence(memory_order order)
+atomic_thread_fence(ovs_memory_order order)
{
if (order == memory_order_seq_cst) {
cpu_barrier();
@@ -162,7 +162,7 @@ atomic_thread_fence(memory_order order)
}
static inline void
-atomic_signal_fence(memory_order order)
+atomic_signal_fence(ovs_memory_order order)
{
atomic_compiler_barrier(order);
}
@@ -106,7 +106,7 @@
* Barriers
* ========
*
- * enum memory_order specifies the strictness of a memory barrier. It has the
+ * enum ovs_memory_order specifies the strictness of a memory barrier. It has the
* following values:
*
* memory_order_relaxed:
@@ -183,20 +183,20 @@
* whole system, providing a total order for stores on all atomic
* variables.
*
- * OVS atomics require the memory_order to be passed as a compile-time constant
+ * OVS atomics require the ovs_memory_order to be passed as a compile-time constant
* value, as some compiler implementations may perform poorly if the memory
* order parameter is passed in as a run-time value.
*
* The following functions insert explicit barriers. Most of the other atomic
* functions also include barriers.
*
- * void atomic_thread_fence(memory_order order);
+ * void atomic_thread_fence(ovs_memory_order order);
*
* Inserts a barrier of the specified type.
*
* For memory_order_relaxed, this is a no-op.
*
- * void atomic_signal_fence(memory_order order);
+ * void atomic_signal_fence(ovs_memory_order order);
*
* Inserts a barrier of the specified type, but only with respect to
* signal handlers in the same thread as the barrier. This is
@@ -213,7 +213,7 @@
* The "store" and "compare_exchange" primitives match C11:
*
* void atomic_store(A *object, C value);
- * void atomic_store_explicit(A *object, C value, memory_order);
+ * void atomic_store_explicit(A *object, C value, ovs_memory_order);
*
* Atomically stores 'value' into '*object', respecting the given
* memory order (or memory_order_seq_cst for atomic_store()).
@@ -222,12 +222,12 @@
* bool atomic_compare_exchange_weak(A *object, C *expected, C desired);
* bool atomic_compare_exchange_strong_explicit(A *object, C *expected,
* C desired,
- * memory_order success,
- * memory_order failure);
+ * ovs_memory_order success,
+ * ovs_memory_order failure);
* bool atomic_compare_exchange_weak_explicit(A *object, C *expected,
* C desired,
- * memory_order success,
- * memory_order failure);
+ * ovs_memory_order success,
+ * ovs_memory_order failure);
*
* Atomically loads '*object' and compares it with '*expected' and if
* equal, stores 'desired' into '*object' (an atomic read-modify-write
@@ -249,7 +249,7 @@
* primitives in standard C:
*
* void atomic_read(A *src, C *dst);
- * void atomic_read_explicit(A *src, C *dst, memory_order);
+ * void atomic_read_explicit(A *src, C *dst, ovs_memory_order);
*
* Atomically loads a value from 'src', writing the value read into
* '*dst', respecting the given memory order (or memory_order_seq_cst
@@ -260,11 +260,11 @@
* void atomic_or(A *rmw, C arg, C *orig);
* void atomic_xor(A *rmw, C arg, C *orig);
* void atomic_and(A *rmw, C arg, C *orig);
- * void atomic_add_explicit(A *rmw, C arg, C *orig, memory_order);
- * void atomic_sub_explicit(A *rmw, C arg, C *orig, memory_order);
- * void atomic_or_explicit(A *rmw, C arg, C *orig, memory_order);
- * void atomic_xor_explicit(A *rmw, C arg, C *orig, memory_order);
- * void atomic_and_explicit(A *rmw, C arg, C *orig, memory_order);
+ * void atomic_add_explicit(A *rmw, C arg, C *orig, ovs_memory_order);
+ * void atomic_sub_explicit(A *rmw, C arg, C *orig, ovs_memory_order);
+ * void atomic_or_explicit(A *rmw, C arg, C *orig, ovs_memory_order);
+ * void atomic_xor_explicit(A *rmw, C arg, C *orig, ovs_memory_order);
+ * void atomic_and_explicit(A *rmw, C arg, C *orig, ovs_memory_order);
*
* Atomically applies the given operation, with 'arg' as the second
* operand, to '*rmw', and stores the original value of '*rmw' into
@@ -298,14 +298,14 @@
*
* bool atomic_flag_test_and_set(atomic_flag *object)
* bool atomic_flag_test_and_set_explicit(atomic_flag *object,
- * memory_order);
+ * ovs_memory_order);
*
* Atomically sets '*object', respecting the given memory order (or
* memory_order_seq_cst for atomic_flag_test_and_set()). Returns the
* previous value of the flag (false for clear, true for set).
*
* void atomic_flag_clear(atomic_flag *object);
- * void atomic_flag_clear_explicit(atomic_flag *object, memory_order);
+ * void atomic_flag_clear_explicit(atomic_flag *object, ovs_memory_order);
*
* Atomically clears '*object', respecting the given memory order (or
* memory_order_seq_cst for atomic_flag_clear()).
@@ -186,7 +186,7 @@
({ \
typeof(VAR) ovsrcu_var = (VAR); \
typeof(VALUE) ovsrcu_value = (VALUE); \
- memory_order ovsrcu_order = (ORDER); \
+ ovs_memory_order ovsrcu_order = (ORDER); \
\
atomic_store_explicit(&ovsrcu_var->p, ovsrcu_value, ovsrcu_order); \
(void *) 0; \
@@ -196,7 +196,7 @@ struct ovsrcu_pointer { ATOMIC(void *) p; };
#define OVSRCU_TYPE(TYPE) struct ovsrcu_pointer
#define OVSRCU_INITIALIZER(VALUE) { ATOMIC_VAR_INIT(VALUE) }
static inline void *
-ovsrcu_get__(const struct ovsrcu_pointer *pointer, memory_order order)
+ovsrcu_get__(const struct ovsrcu_pointer *pointer, ovs_memory_order order)
{
void *value;
atomic_read_explicit(&CONST_CAST(struct ovsrcu_pointer *, pointer)->p,
@@ -210,7 +210,7 @@ ovsrcu_get__(const struct ovsrcu_pointer *pointer, memory_order order)
static inline void ovsrcu_set__(struct ovsrcu_pointer *pointer,
const void *value,
- memory_order order)
+ ovs_memory_order order)
{
atomic_store_explicit(&pointer->p, CONST_CAST(void *, value), order);
}
@@ -247,7 +247,7 @@ void ovsrcu_postpone__(void (*function)(void *aux), void *aux);
* an RCU protected pointer to a malloc'd int. */
typedef struct { atomic_int v; } ovsrcu_index;
-static inline int ovsrcu_index_get__(const ovsrcu_index *i, memory_order order)
+static inline int ovsrcu_index_get__(const ovsrcu_index *i, ovs_memory_order order)
{
int ret;
atomic_read_explicit(CONST_CAST(atomic_int *, &i->v), &ret, order);
@@ -270,7 +270,7 @@ static inline int ovsrcu_index_get_protected(const ovsrcu_index *i)
}
static inline void ovsrcu_index_set__(ovsrcu_index *i, int value,
- memory_order order)
+ ovs_memory_order order)
{
atomic_store_explicit(&i->v, value, order);
}
DPDK commit [1] uses function variables named "memory_order". Compilation fails with: error: declaration of 'memory_order' shadows a global declaration [-Werror=shadow] rte_atomic_thread_fence(int memory_order) Rename enum memory_order to ovs_memory_order to avoid that conflict. [1] 672a15056380 ("eal: add wrapper for C11 atomic thread fence") Signed-off-by: Eli Britstein <elibr@nvidia.com> --- lib/ovs-atomic-clang.h | 2 +- lib/ovs-atomic-flag-gcc4.7+.h | 4 ++-- lib/ovs-atomic-gcc4+.h | 12 ++++++------ lib/ovs-atomic-gcc4.7+.h | 2 +- lib/ovs-atomic-i586.h | 8 ++++---- lib/ovs-atomic-msvc.h | 8 ++++---- lib/ovs-atomic-pthreads.h | 10 +++++----- lib/ovs-atomic-x86_64.h | 8 ++++---- lib/ovs-atomic.h | 34 +++++++++++++++++----------------- lib/ovs-rcu.h | 10 +++++----- 10 files changed, 49 insertions(+), 49 deletions(-)