@@ -176,6 +176,7 @@
enum reg_class
{
NO_REGS, /* no registers in set. */
+ R0, /* register r0. */
ALL_REGS, /* all registers. */
LIM_REG_CLASSES /* max value + 1. */
};
@@ -189,6 +190,7 @@ enum reg_class
#define REG_CLASS_NAMES \
{ \
"NO_REGS", \
+ "R0", \
"ALL_REGS" \
}
@@ -202,14 +204,16 @@ enum reg_class
#define REG_CLASS_CONTENTS \
{ \
0x00000000, /* NO_REGS */ \
- 0x00000fff, /* ALL_REGS */ \
+ 0x00000001, /* R0 */ \
+ 0x00000fff, /* ALL_REGS */ \
}
/* A C expression whose value is a register class containing hard
register REGNO. In general there is more that one such class;
choose a class which is "minimal", meaning that no smaller class
also contains the register. */
-#define REGNO_REG_CLASS(REGNO) GENERAL_REGS
+#define REGNO_REG_CLASS(REGNO) \
+ ((REGNO) == 0 ? R0 : GENERAL_REGS)
/* A macro whose definition is the name of the class to which a
valid base register must belong. A base register is one used in
@@ -25,6 +25,11 @@
(define_c_enum "unspec" [
UNSPEC_LDINDABS
UNSPEC_XADD
+ UNSPEC_XAND
+ UNSPEC_XOR
+ UNSPEC_XXOR
+ UNSPEC_XCHG
+ UNSPEC_CMPXCHG
])
;;;; Constants
@@ -56,11 +61,10 @@
;; st generic store instructions for immediates.
;; stx generic store instructions.
;; jmp jump instructions.
-;; xadd atomic exchange-and-add instructions.
;; multi multiword sequence (or user asm statements).
(define_attr "type"
- "unknown,alu,alu32,end,ld,lddw,ldx,st,stx,jmp,xadd,multi"
+ "unknown,alu,alu32,end,ld,lddw,ldx,st,stx,jmp,multi"
(const_string "unknown"))
;; Length of instruction in bytes.
@@ -506,17 +510,111 @@
"ldabs<ldop>\t%0"
[(set_attr "type" "ld")])
-;;;; Atomic increments
+;;;; Atomic operations
(define_mode_iterator AMO [SI DI])
-(define_insn "atomic_add<AMO:mode>"
- [(set (match_operand:AMO 0 "memory_operand" "+m")
- (unspec_volatile:AMO
- [(plus:AMO (match_dup 0)
- (match_operand:AMO 1 "register_operand" "r"))
- (match_operand:SI 2 "const_int_operand")] ;; Memory model.
- UNSPEC_XADD))]
+(define_insn "atomic_fetch_add<AMO:mode>"
+ [(set (match_operand:AMO 0 "register_operand" "=r")
+ (unspec_volatile:AMO
+ [(match_operand:AMO 1 "memory_operand" "+o")
+ (match_operand:AMO 2 "nonmemory_operand" "0")
+ (match_operand:AMO 3 "const_int_operand")] ;; Memory model
+ UNSPEC_XADD))]
+ ""
+ "xadd<mop>\t%1,%0")
+
+(define_insn "atomic_fetch_and<AMO:mode>"
+ [(set (match_operand:AMO 0 "register_operand" "=r")
+ (unspec_volatile:AMO
+ [(match_operand:AMO 1 "memory_operand" "+o")
+ (match_operand:AMO 2 "nonmemory_operand" "0")
+ (match_operand:AMO 3 "const_int_operand")]
+ UNSPEC_XAND))]
+ ""
+ "xand<mop>\t%1,%0")
+
+(define_insn "atomic_fetch_or<AMO:mode>"
+ [(set (match_operand:AMO 0 "register_operand" "=r")
+ (unspec_volatile:AMO
+ [(match_operand:AMO 1 "memory_operand" "+o")
+ (match_operand:AMO 2 "nonmemory_operand" "0")
+ (match_operand:AMO 3 "const_int_operand")]
+ UNSPEC_XOR))]
+ ""
+ "xor<mop>\t%1,%0")
+
+(define_insn "atomic_fetch_xor<AMO:mode>"
+ [(set (match_operand:AMO 0 "register_operand" "=r")
+ (unspec_volatile:AMO
+ [(match_operand:AMO 1 "memory_operand" "+o")
+ (match_operand:AMO 2 "nonmemory_operand" "0")
+ (match_operand:AMO 3 "const_int_operand")]
+ UNSPEC_XXOR))]
+ ""
+ "xxor<mop>\t%1,%0")
+
+(define_insn "atomic_exchange<AMO:mode>"
+ [(set (match_operand:AMO 0 "register_operand" "=r")
+ (unspec_volatile:AMO
+ [(match_operand:AMO 1 "memory_operand" "+o")
+ (match_operand:AMO 2 "nonmemory_operand" "0")
+ (match_operand:AMO 3 "const_int_operand")]
+ UNSPEC_XCHG))]
+ ""
+ "xchg<mop>\t%1,%0")
+
+;; eBPF compare and exchange operation atomically compares the
+;; value addressed by memory operand(%0) with _R0_(%1), so
+;; there is a constraint to load the expected value in mentioned
+;; register. If they match it is replaced with desired value(%3).
+;; In either case, the value that was there before in %0 is
+;; zero-extended and loaded back to R0.
+
+(define_expand "atomic_compare_and_swap<AMO:mode>"
+ [(match_operand:SI 0 "register_operand") ;; bool success
+ (match_operand:AMO 1 "register_operand") ;; old value
+ (match_operand:AMO 2 "memory_operand") ;; memory
+ (match_operand:AMO 3 "register_operand") ;; expected
+ (match_operand:AMO 4 "register_operand") ;; desired
+ (match_operand:SI 5 "const_int_operand") ;; is_weak (unused)
+ (match_operand:SI 6 "const_int_operand") ;; success model (unused)
+ (match_operand:SI 7 "const_int_operand")] ;; failure model (unused)
""
- "xadd<mop>\t%0,%1"
- [(set_attr "type" "xadd")])
+{
+ emit_insn
+ (gen_atomic_compare_and_swap<AMO:mode>_1
+ (operands[1], operands[2], operands[3], operands[4], operands[6]));
+
+ /* Assume success operation, i.e memory operand
+ is matched with expected value.
+ */
+ emit_move_insn (operands[0], const1_rtx);
+ rtx_code_label *success_label = gen_label_rtx ();
+
+ /* At this point eBPF xcmp was executed, now we can ask
+ * for success exchange, and set suitable value for bool
+ * operand(%0)
+ */
+ emit_cmp_and_jump_insns (operands[1], operands[3], EQ, 0,
+ GET_MODE (operands[1]), 1, success_label);
+ emit_move_insn (operands[0], const0_rtx);
+
+ if (success_label)
+ {
+ emit_label (success_label);
+ LABEL_NUSES (success_label) = 1;
+ }
+ DONE;
+})
+
+(define_insn "atomic_compare_and_swap<AMO:mode>_1"
+ [(set (match_operand:AMO 0 "register_operand" "=t") ;; must be r0
+ (unspec_volatile:AMO
+ [(match_operand:AMO 1 "memory_operand" "+o") ;; memory
+ (match_operand:AMO 2 "register_operand" "0") ;; expected
+ (match_operand:AMO 3 "register_operand" "r") ;; desired
+ (match_operand:SI 4 "const_int_operand")] ;; success (unused)
+ UNSPEC_CMPXCHG))]
+ ""
+ "xcmp<mop>\t%1,%3")
@@ -29,3 +29,6 @@
(define_constraint "S"
"A constant call address."
(match_code "const,symbol_ref,label_ref,const_int"))
+
+(define_register_constraint "t" "R0"
+ "Register r0")
new file mode 100644
@@ -0,0 +1,28 @@
+/* { dg-do compile } */
+
+long val;
+long ptr;
+long expected;
+long desired;
+
+void
+foo ()
+{
+ int done;
+
+ done = __atomic_compare_exchange_n (&ptr, &expected, desired,
+ 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ done = __atomic_compare_exchange_n ((int *)&ptr, (int *)&expected,
+ (int)desired, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+
+ done = __atomic_compare_exchange (&ptr, &expected, &desired,
+ 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+ done = __atomic_compare_exchange ((int *)&ptr, (int *)&expected,
+ (int *)&desired, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
+
+ done = __sync_bool_compare_and_swap (&ptr, expected, desired);
+ done = __sync_bool_compare_and_swap ((int*)&ptr, expected, desired);
+}
+
+/* { dg-final { scan-assembler "xcmpdw\t.*" } } */
+/* { dg-final { scan-assembler "xcmpw\t.*" } } */
new file mode 100644
@@ -0,0 +1,19 @@
+/* { dg-do compile } */
+
+long val;
+long ptr;
+
+void
+foo ()
+{
+ long prev;
+
+ __atomic_exchange(&ptr, &val, &prev, __ATOMIC_RELAXED);
+ prev = __atomic_exchange_n(&ptr, val, __ATOMIC_RELAXED);
+
+ __atomic_exchange((int *)&ptr, (int *)&val, (int *)&prev, __ATOMIC_RELAXED);
+ prev = __atomic_exchange_n((int *)&ptr, (int)val, __ATOMIC_RELAXED);
+}
+
+/* { dg-final { scan-assembler "xchgdw\t.*" } } */
+/* { dg-final { scan-assembler "xchgw\t.*" } } */
new file mode 100644
@@ -0,0 +1,26 @@
+/* { dg-do compile } */
+
+long delta;
+long *val;
+
+void
+foo ()
+{
+ long k;
+
+ k = __atomic_fetch_add (val, delta, __ATOMIC_RELAXED);
+ k = __atomic_fetch_add ((int*)val, delta, __ATOMIC_RELAXED);
+
+ k = __atomic_add_fetch (val, delta, __ATOMIC_RELAXED);
+ k = __atomic_add_fetch ((int*)val, delta, __ATOMIC_RELAXED);
+
+ k = __sync_fetch_and_add (val, delta, __ATOMIC_RELAXED);
+ k = __sync_fetch_and_add ((int*)val, delta, __ATOMIC_RELAXED);
+
+ k = __sync_add_and_fetch (val, delta, __ATOMIC_RELAXED);
+ k = __sync_add_and_fetch ((int*)val, delta, __ATOMIC_RELAXED);
+
+}
+
+/* { dg-final { scan-assembler "xadddw\t.*" } } */
+/* { dg-final { scan-assembler "xaddw\t.*" } } */
new file mode 100644
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+
+long mask;
+long *val;
+
+void
+foo ()
+{
+ long k;
+
+ k = __atomic_fetch_and (val, mask, __ATOMIC_RELAXED);
+ k = __atomic_fetch_and ((int*)val, mask, __ATOMIC_RELAXED);
+
+ k = __atomic_and_fetch (val, mask, __ATOMIC_RELAXED);
+ k = __atomic_and_fetch ((int*)val, mask, __ATOMIC_RELAXED);
+
+ k = __sync_fetch_and_and (val, mask, __ATOMIC_RELAXED);
+ k = __sync_fetch_and_and ((int*)val, mask, __ATOMIC_RELAXED);
+
+ k = __sync_and_and_fetch (val, mask, __ATOMIC_RELAXED);
+ k = __sync_and_and_fetch ((int*)val, mask, __ATOMIC_RELAXED);
+}
+
+/* { dg-final { scan-assembler "xanddw\t.*" } } */
+/* { dg-final { scan-assembler "xandw\t.*" } } */
new file mode 100644
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+
+long bits;
+long *val;
+
+void
+foo ()
+{
+ long k;
+
+ k = __atomic_fetch_or (val, bits, __ATOMIC_RELAXED);
+ k = __atomic_fetch_or ((int *)val, bits, __ATOMIC_RELAXED);
+
+ k = __atomic_or_fetch (val, bits, __ATOMIC_RELAXED);
+ k = __atomic_or_fetch ((int*)val, bits, __ATOMIC_RELAXED);
+
+ k = __sync_fetch_and_or (val, bits, __ATOMIC_RELAXED);
+ k = __sync_fetch_and_or ((int*)val, bits, __ATOMIC_RELAXED);
+
+ k = __sync_or_and_fetch (val, bits, __ATOMIC_RELAXED);
+ k = __sync_or_and_fetch ((int*)val, bits, __ATOMIC_RELAXED);
+}
+
+/* { dg-final { scan-assembler "xordw\t.*" } } */
+/* { dg-final { scan-assembler "xorw\t.*" } } */
new file mode 100644
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+
+long delta;
+long *val;
+
+void
+foo ()
+{
+ long k;
+
+ k = __atomic_fetch_sub (val, delta, __ATOMIC_RELAXED);
+ k = __atomic_fetch_sub ((int*)val, delta, __ATOMIC_RELAXED);
+
+ k = __atomic_sub_fetch (val, delta, __ATOMIC_RELAXED);
+ k = __atomic_sub_fetch ((int*)val, delta, __ATOMIC_RELAXED);
+
+ k = __sync_fetch_and_sub (val, delta, __ATOMIC_RELAXED);
+ k = __sync_fetch_and_sub ((int*)val, delta, __ATOMIC_RELAXED);
+
+ k = __sync_sub_and_fetch (val, delta, __ATOMIC_RELAXED);
+ k = __sync_sub_and_fetch ((int*)val, delta, __ATOMIC_RELAXED);
+}
+
+/* { dg-final { scan-assembler "xadddw\t.*" } } */
+/* { dg-final { scan-assembler "xaddw\t.*" } } */
new file mode 100644
@@ -0,0 +1,25 @@
+/* { dg-do compile } */
+
+long bits;
+long *val;
+
+void
+foo ()
+{
+ long k;
+
+ k = __atomic_fetch_xor (val, bits, __ATOMIC_RELAXED);
+ k = __atomic_fetch_xor ((int *)val, bits, __ATOMIC_RELAXED);
+
+ k = __atomic_xor_fetch (val, bits, __ATOMIC_RELAXED);
+ k = __atomic_xor_fetch ((int*)val, bits, __ATOMIC_RELAXED);
+
+ k = __sync_fetch_and_xor (val, bits, __ATOMIC_RELAXED);
+ k = __sync_fetch_and_xor ((int*)val, bits, __ATOMIC_RELAXED);
+
+ k = __sync_xor_and_fetch (val, bits, __ATOMIC_RELAXED);
+ k = __sync_xor_and_fetch ((int*)val, bits, __ATOMIC_RELAXED);
+}
+
+/* { dg-final { scan-assembler "xxordw\t.*" } } */
+/* { dg-final { scan-assembler "xxorw\t.*" } } */