===================================================================
@@ -951,7 +951,6 @@ BU_ALTIVEC_X (VEC_EXT_V4SF, "vec_ext_v4sf", CO
before we get to the point about classifying the builtin type. */
/* 3 argument Altivec overloaded builtins. */
-BU_ALTIVEC_OVERLOAD_3 (ADDEC, "addec")
BU_ALTIVEC_OVERLOAD_3 (MADD, "madd")
BU_ALTIVEC_OVERLOAD_3 (MADDS, "madds")
BU_ALTIVEC_OVERLOAD_3 (MLADD, "mladd")
@@ -1137,6 +1136,7 @@ BU_ALTIVEC_OVERLOAD_P (VCMPGE_P, "vcmpge_p")
/* Overloaded Altivec builtins that are handled as special cases. */
BU_ALTIVEC_OVERLOAD_X (ADDE, "adde")
+BU_ALTIVEC_OVERLOAD_X (ADDEC, "addec")
BU_ALTIVEC_OVERLOAD_X (CTF, "ctf")
BU_ALTIVEC_OVERLOAD_X (CTS, "cts")
BU_ALTIVEC_OVERLOAD_X (CTU, "ctu")
===================================================================
@@ -4661,6 +4661,79 @@ assignment for unaligned loads and stores");
}
}
+ if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC)
+ {
+ /* vec_addec needs to be special cased because there is no instruction
+ for the {un}signed int version. */
+ if (nargs != 3)
+ {
+ error ("vec_addec only accepts 3 arguments");
+ return error_mark_node;
+ }
+
+ tree arg0 = (*arglist)[0];
+ tree arg0_type = TREE_TYPE (arg0);
+ tree arg1 = (*arglist)[1];
+ tree arg1_type = TREE_TYPE (arg1);
+ tree arg2 = (*arglist)[2];
+ tree arg2_type = TREE_TYPE (arg2);
+
+ /* All 3 arguments must be vectors of (signed or unsigned) (int or
+ __int128) and the types must match. */
+ if ((arg0_type != arg1_type) || (arg1_type != arg2_type))
+ goto bad;
+ if (TREE_CODE (arg0_type) != VECTOR_TYPE)
+ goto bad;
+
+ switch (TYPE_MODE (TREE_TYPE (arg0_type)))
+ {
+ /* For {un}signed ints,
+ vec_addec (va, vb, carryv) == vec_or (vec_addc (va, vb),
+ vec_addc(vec_add(va, vb),
+ vec_and (carryv, 0x1))). */
+ case SImode:
+ {
+ /* Use save_expr to ensure that operands used more than once
+ that may have side effects (like calls) are only evaluated
+ once. */
+ arg0 = save_expr(arg0);
+ arg1 = save_expr(arg1);
+ vec<tree, va_gc> *params = make_tree_vector();
+ vec_safe_push (params, arg0);
+ vec_safe_push (params, arg1);
+ tree call1 = altivec_resolve_overloaded_builtin
+ (loc, rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADDC], params);
+ params = make_tree_vector();
+ vec_safe_push (params, arg0);
+ vec_safe_push (params, arg1);
+ tree call2 = altivec_resolve_overloaded_builtin
+ (loc, rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADD], params);
+ tree const1 = build_vector_from_val (arg0_type,
+ build_int_cstu(TREE_TYPE (arg0_type), 1));
+ tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR,
+ arg0_type, arg2, const1);
+ params = make_tree_vector();
+ vec_safe_push (params, call2);
+ vec_safe_push (params, and_expr);
+ call2 = altivec_resolve_overloaded_builtin
+ (loc, rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADDC], params);
+ params = make_tree_vector();
+ vec_safe_push (params, call1);
+ vec_safe_push (params, call2);
+ return altivec_resolve_overloaded_builtin
+ (loc, rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_OR], params);
+ }
+ /* For {un}signed __int128s use the vaddecuq instruction directly. */
+ case TImode:
+ return altivec_resolve_overloaded_builtin
+ (loc, rs6000_builtin_decls[P8V_BUILTIN_VEC_VADDECUQ], arglist);
+ /* Types other than {un}signed int and {un}signed __int128
+ are errors. */
+ default:
+ goto bad;
+ }
+ }
+
/* For now treat vec_splats and vec_promote as the same. */
if (fcode == ALTIVEC_BUILTIN_VEC_SPLATS
|| fcode == ALTIVEC_BUILTIN_VEC_PROMOTE)
===================================================================
@@ -16045,6 +16045,8 @@ altivec_init_builtins (void)
def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
ALTIVEC_BUILTIN_VEC_ADDE);
+ def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
+ ALTIVEC_BUILTIN_VEC_ADDEC);
/* Cell builtins. */
def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
===================================================================
@@ -0,0 +1,109 @@
+/* { dg-do run { target { powerpc64*-*-* } } } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power8" } } */
+/* { dg-options "-mcpu=power8 -O3" } */
+
+/* Test that the vec_addec builtin works as expected. */
+
+#include "altivec.h"
+
+#define N 4096
+
+void abort ();
+
+#define define_test_functions(STYPE, NAMESUFFIX) \
+\
+STYPE result_##NAMESUFFIX[N]; \
+STYPE addend1_##NAMESUFFIX[N]; \
+STYPE addend2_##NAMESUFFIX[N]; \
+STYPE carry_##NAMESUFFIX[N]; \
+STYPE expected_##NAMESUFFIX[N]; \
+\
+__attribute__((noinline)) void vector_tests_##NAMESUFFIX () \
+{ \
+ int i; \
+ vector STYPE v1, v2, v3, tmp; \
+ for (i = 0; i < N; i+=16/sizeof(STYPE)) { \
+ /* result=carry of addend1+addend2+(carry & 0x1). */ \
+ v1 = (vector STYPE) { addend1_##NAMESUFFIX[i] }; \
+ v2 = (vector STYPE) { addend2_##NAMESUFFIX[i] }; \
+ v3 = (vector STYPE) { carry_##NAMESUFFIX[i] }; \
+\
+ tmp = vec_addec (v1, v2, v3); \
+ result_##NAMESUFFIX[i] = tmp[0]; \
+ } \
+} \
+\
+__attribute__((noinline)) void init_##NAMESUFFIX () \
+{ \
+ int i; \
+ for (i = 0; i < N; ++i) { \
+ result_##NAMESUFFIX[i] = 0; \
+ if (i%6 == 0) { \
+ addend1_##NAMESUFFIX[i] = ((__int128)0xffffffffffffffff << 64); \
+ addend2_##NAMESUFFIX[i] = 0xfffffffffffffffe; \
+ carry_##NAMESUFFIX[i] = 1; \
+ expected_##NAMESUFFIX[i] = 0; \
+ } else if (i%6 == 1) { \
+ addend1_##NAMESUFFIX[i] = ((__int128)0xffffffffffffffff << 64) + \
+ 0xffffffffffffffff; \
+ addend2_##NAMESUFFIX[i] = 1; \
+ carry_##NAMESUFFIX[i] = 0; \
+ expected_##NAMESUFFIX[i] = 1; \
+ } else if (i%6 == 2) { \
+ addend1_##NAMESUFFIX[i] = ((__int128)0xffffffffffffffff << 64) + \
+ 0xffffffffffffffff; \
+ addend2_##NAMESUFFIX[i] = 0; \
+ carry_##NAMESUFFIX[i] = 3; /* 3 should work like 1 here. */ \
+ expected_##NAMESUFFIX[i] = 1; \
+ } else if (i%6 == 3) { \
+ addend1_##NAMESUFFIX[i] = 1; \
+ addend2_##NAMESUFFIX[i] = ((__int128)0xffffffffffffffff << 64) + \
+ 0xffffffffffffffff; \
+ carry_##NAMESUFFIX[i] = 2; /* 2 should work like 0 here. */ \
+ expected_##NAMESUFFIX[i] = 1; \
+ } else if (i%6 == 4) { \
+ addend1_##NAMESUFFIX[i] = 0; \
+ addend2_##NAMESUFFIX[i] = ((__int128)0xffffffffffffffff << 64) + \
+ 0xffffffffffffffff; \
+ carry_##NAMESUFFIX[i] = 1; \
+ expected_##NAMESUFFIX[i] = 1; \
+ } else if (i%6 == 5) { \
+ addend1_##NAMESUFFIX[i] = ((__int128)0xffffffffffffffff << 64); \
+ addend2_##NAMESUFFIX[i] = 0xffffffffffffffff; \
+ carry_##NAMESUFFIX[i] = 1; \
+ expected_##NAMESUFFIX[i] = 1; \
+ } \
+ } \
+} \
+\
+__attribute__((noinline)) void verify_results_##NAMESUFFIX () \
+{ \
+ int i; \
+ for (i = 0; i < N; ++i) { \
+ if (result_##NAMESUFFIX[i] != expected_##NAMESUFFIX[i]) \
+ abort(); \
+ } \
+}
+
+
+#define execute_test_functions(STYPE, NAMESUFFIX) \
+{ \
+ init_##NAMESUFFIX (); \
+ vector_tests_##NAMESUFFIX (); \
+ verify_results_##NAMESUFFIX (); \
+}
+
+
+define_test_functions(signed __int128, si128);
+define_test_functions(unsigned __int128, ui128);
+
+int main ()
+{
+ execute_test_functions(signed __int128, si128);
+ execute_test_functions(unsigned __int128, ui128);
+
+ return 0;
+}
+
+
===================================================================
@@ -0,0 +1,105 @@
+/* { dg-do run { target { powerpc64*-*-* } } } */
+/* { dg-require-effective-target powerpc_p8vector_ok } */
+/* { dg-skip-if "do not override -mcpu" { powerpc*-*-* } { "-mcpu=*" } { "-mcpu=power8" } } */
+/* { dg-options "-mcpu=power8 -O3" } */
+
+/* Test that the vec_addec builtin works as expected. */
+
+#include "altivec.h"
+
+#define N 4096
+
+void abort ();
+
+#define define_test_functions(STYPE, NAMESUFFIX) \
+\
+STYPE result_##NAMESUFFIX[N]; \
+STYPE addend1_##NAMESUFFIX[N]; \
+STYPE addend2_##NAMESUFFIX[N]; \
+STYPE carry_##NAMESUFFIX[N]; \
+STYPE expected_##NAMESUFFIX[N]; \
+\
+__attribute__((noinline)) void vector_tests_##NAMESUFFIX () \
+{ \
+ int i; \
+ vector STYPE v1, v2, v3, tmp; \
+ for (i = 0; i < N; i+=16/sizeof(STYPE)) { \
+ /* result=carry of addend1+addend2+(carry & 0x1). */ \
+ v1 = vec_vsx_ld (0, &addend1_##NAMESUFFIX[i]); \
+ v2 = vec_vsx_ld (0, &addend2_##NAMESUFFIX[i]); \
+ v3 = vec_vsx_ld (0, &carry_##NAMESUFFIX[i]); \
+\
+ tmp = vec_addec (v1, v2, v3); \
+ vec_vsx_st (tmp, 0, &result_##NAMESUFFIX[i]); \
+ } \
+} \
+\
+__attribute__((noinline)) void init_##NAMESUFFIX () \
+{ \
+ int i; \
+ for (i = 0; i < N; ++i) { \
+ result_##NAMESUFFIX[i] = 0; \
+ if (i%6 == 0) { \
+ addend1_##NAMESUFFIX[i] = 0xfffffffd; \
+ addend2_##NAMESUFFIX[i] = 1; \
+ carry_##NAMESUFFIX[i] = 1; \
+ expected_##NAMESUFFIX[i] = 0; \
+ } else if (i%6 == 1) { \
+ addend1_##NAMESUFFIX[i] = 0xffffffff; \
+ addend2_##NAMESUFFIX[i] = 1; \
+ carry_##NAMESUFFIX[i] = 0; \
+ expected_##NAMESUFFIX[i] = 1; \
+ } else if (i%6 == 2) { \
+ addend1_##NAMESUFFIX[i] = 0xffffffff; \
+ addend2_##NAMESUFFIX[i] = 0; \
+ carry_##NAMESUFFIX[i] = 3; /* 3 should work like 1 here. */ \
+ expected_##NAMESUFFIX[i] = 1; \
+ } else if (i%6 == 3) { \
+ addend1_##NAMESUFFIX[i] = 1; \
+ addend2_##NAMESUFFIX[i] = 0xffffffff; \
+ carry_##NAMESUFFIX[i] = 2; /* 2 should work like 0 here. */ \
+ expected_##NAMESUFFIX[i] = 1; \
+ } else if (i%6 == 4) { \
+ addend1_##NAMESUFFIX[i] = 0; \
+ addend2_##NAMESUFFIX[i] = 0xffffffff; \
+ carry_##NAMESUFFIX[i] = 1; \
+ expected_##NAMESUFFIX[i] = 1; \
+ } else if (i%6 == 5) { \
+ addend1_##NAMESUFFIX[i] = 0xffff0000; \
+ addend2_##NAMESUFFIX[i] = 0x0000ffff; \
+ carry_##NAMESUFFIX[i] = 1; \
+ expected_##NAMESUFFIX[i] = 1; \
+ } \
+ } \
+} \
+\
+__attribute__((noinline)) void verify_results_##NAMESUFFIX () \
+{ \
+ int i; \
+ for (i = 0; i < N; ++i) { \
+ if (result_##NAMESUFFIX[i] != expected_##NAMESUFFIX[i]) \
+ abort(); \
+ } \
+}
+
+
+#define execute_test_functions(STYPE, NAMESUFFIX) \
+{ \
+ init_##NAMESUFFIX (); \
+ vector_tests_##NAMESUFFIX (); \
+ verify_results_##NAMESUFFIX (); \
+}
+
+
+define_test_functions(signed int, si);
+define_test_functions(unsigned int, ui);
+
+int main ()
+{
+ execute_test_functions(signed int, si);
+ execute_test_functions(unsigned int, ui);
+
+ return 0;
+}
+
+