diff mbox series

Backports to 8.x branch

Message ID 20190207150254.GN2135@tucnak
State New
Headers show
Series Backports to 8.x branch | expand

Commit Message

Jakub Jelinek Feb. 7, 2019, 3:02 p.m. UTC
Hi!

Another month have passed since my last 8.x backporting effort,
thus I've backported following 32 patches from trunk to 8.x,
bootstrapped/regtested on x86_64-linux and i686-linux and committed.

	Jakub
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-07  Jakub Jelinek  <jakub@redhat.com>

	PR debug/88723
	* dwarf2out.c (const_ok_for_output_1): Remove redundant call to
	const_not_ok_for_debug_p target hook.
	(mem_loc_descriptor) <case UNSPEC>: Only call const_ok_for_output_1
	on UNSPEC and subexpressions thereof if all subexpressions of the
	UNSPEC are CONSTANT_P.

	2019-01-05  Jakub Jelinek  <jakub@redhat.com>

	PR debug/88635
	* dwarf2out.c (const_ok_for_output_1): Reject MINUS that contains
	SYMBOL_REF, CODE_LABEL or UNSPEC in subexpressions of second argument.
	Reject PLUS that contains SYMBOL_REF, CODE_LABEL or UNSPEC in
	subexpressions of both operands.
	(mem_loc_descriptor): Handle UNSPEC if target hook acks it and all the
	subrtxes are CONSTANT_P.

	* gcc.dg/debug/dwarf2/pr88635.c: New test.
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-10  Jakub Jelinek  <jakub@redhat.com>

	PR c/88568
	* attribs.c (handle_dll_attribute): Clear TREE_STATIC after setting
	DECL_EXTERNAL.

	* gcc.dg/pr88568.c: New test.

--- gcc/attribs.c	(revision 267798)
+++ gcc/attribs.c	(revision 267799)
@@ -1691,6 +1691,8 @@ handle_dll_attribute (tree * pnode, tree
 	     a function global scope, unless declared static.  */
 	  if (current_function_decl != NULL_TREE && !TREE_STATIC (node))
 	    TREE_PUBLIC (node) = 1;
+	  /* Clear TREE_STATIC because DECL_EXTERNAL is set.  */
+	  TREE_STATIC (node) = 0;
 	}
 
       if (*no_add_attrs == false)
--- gcc/testsuite/gcc.dg/pr88568.c	(nonexistent)
+++ gcc/testsuite/gcc.dg/pr88568.c	(revision 267799)
@@ -0,0 +1,4 @@
+/* PR c/88568 */
+/* { dg-do compile } */
+/* { dg-require-dll "" } */
+__attribute__((dllimport)) struct S var;	/* { dg-bogus "storage size of .var. isn.t known" } */
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-11  Jakub Jelinek  <jakub@redhat.com>

	PR bootstrap/88714
	* passes.c (finish_optimization_passes): Call print_combine_total_stats
	inside of pass_combine_1 dump rather than pass_profile_1.

--- gcc/passes.c	(revision 267838)
+++ gcc/passes.c	(revision 267839)
@@ -361,9 +361,9 @@ finish_optimization_passes (void)
 
   if (optimize > 0)
     {
-      dumps->dump_start (pass_profile_1->static_pass_number, NULL);
+      dumps->dump_start (pass_combine_1->static_pass_number, NULL);
       print_combine_total_stats ();
-      dumps->dump_finish (pass_profile_1->static_pass_number);
+      dumps->dump_finish (pass_combine_1->static_pass_number);
     }
 
   /* Do whatever is necessary to finish printing the graphs.  */
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-14  Jakub Jelinek  <jakub@redhat.com>

	* c-cppbuiltin.c (c_cpp_builtin): Define __cpp_guaranteed_copy_elision
	and __cpp_nontype_template_parameter_auto.  Add a comment that
	__cpp_template_auto is deprecated.

	* g++.dg/cpp1z/feat-cxx1z.C: Add tests for
	__cpp_guaranteed_copy_elision and __cpp_nontype_template_parameter_auto
	feature test macros.

--- gcc/c-family/c-cppbuiltin.c	(revision 267924)
+++ gcc/c-family/c-cppbuiltin.c	(revision 267925)
@@ -972,9 +972,13 @@ c_cpp_builtins (cpp_reader *pfile)
 	  cpp_define (pfile, "__cpp_aggregate_bases=201603");
 	  cpp_define (pfile, "__cpp_deduction_guides=201611");
 	  cpp_define (pfile, "__cpp_noexcept_function_type=201510");
+	  /* Old macro, superseded by
+	     __cpp_nontype_template_parameter_auto.  */
 	  cpp_define (pfile, "__cpp_template_auto=201606");
 	  cpp_define (pfile, "__cpp_structured_bindings=201606");
 	  cpp_define (pfile, "__cpp_variadic_using=201611");
+	  cpp_define (pfile, "__cpp_guaranteed_copy_elision=201606");
+	  cpp_define (pfile, "__cpp_nontype_template_parameter_auto=201606");
 	}
       if (flag_concepts)
 	cpp_define (pfile, "__cpp_concepts=201507");
--- gcc/testsuite/g++.dg/cpp1z/feat-cxx1z.C	(revision 267924)
+++ gcc/testsuite/g++.dg/cpp1z/feat-cxx1z.C	(revision 267925)
@@ -417,6 +417,18 @@
 #  error "__cpp_variadic_using != 201611"
 #endif
 
+#ifndef __cpp_guaranteed_copy_elision
+#  error "__cpp_guaranteed_copy_elision"
+#elif __cpp_guaranteed_copy_elision != 201606
+#  error "__cpp_guaranteed_copy_elision != 201606"
+#endif
+
+#ifndef __cpp_nontype_template_parameter_auto
+#  error "__cpp_nontype_template_parameter_auto"
+#elif __cpp_nontype_template_parameter_auto != 201606
+#  error "__cpp_nontype_template_parameter_auto != 201606"
+#endif
+
 #ifdef __has_cpp_attribute
 
 #  if ! __has_cpp_attribute(maybe_unused)
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-17  Jakub Jelinek  <jakub@redhat.com>

	PR rtl-optimization/88870
	* dce.c (deletable_insn_p): Never delete const/pure calls that can
	throw if we can't alter the cfg or delete dead exceptions.
	(mark_insn): Don't call find_call_stack_args for such calls.

	* gcc.dg/pr88870.c: New test.

--- gcc/dce.c	(revision 268007)
+++ gcc/dce.c	(revision 268008)
@@ -108,7 +108,10 @@ deletable_insn_p (rtx_insn *insn, bool f
       /* We can delete dead const or pure calls as long as they do not
          infinite loop.  */
       && (RTL_CONST_OR_PURE_CALL_P (insn)
-	  && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn)))
+	  && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
+      /* Don't delete calls that may throw if we cannot do so.  */
+      && ((cfun->can_delete_dead_exceptions && can_alter_cfg)
+	  || insn_nothrow_p (insn)))
     return find_call_stack_args (as_a <rtx_call_insn *> (insn), false,
 				 fast, arg_stores);
 
@@ -201,7 +204,9 @@ mark_insn (rtx_insn *insn, bool fast)
 	  && !df_in_progress
 	  && !SIBLING_CALL_P (insn)
 	  && (RTL_CONST_OR_PURE_CALL_P (insn)
-	      && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn)))
+	      && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
+	  && ((cfun->can_delete_dead_exceptions && can_alter_cfg)
+	      || insn_nothrow_p (insn)))
 	find_call_stack_args (as_a <rtx_call_insn *> (insn), true, fast, NULL);
     }
 }
--- gcc/testsuite/gcc.dg/pr88870.c	(nonexistent)
+++ gcc/testsuite/gcc.dg/pr88870.c	(revision 268008)
@@ -0,0 +1,23 @@
+/* PR rtl-optimization/88870 */
+/* { dg-do compile } */
+/* { dg-options "-O1 -fexceptions -fnon-call-exceptions -ftrapv -fno-tree-dominator-opts" } */
+
+int a, b;
+
+void
+foo (int *x)
+{
+  int c = 0;
+  {
+    int d;
+    x = &c;
+    for (;;)
+      {
+        x = &d;
+        b = 0;
+        d = c + 1;
+        b = c = 1;
+        ++a;
+      }
+  }
+}
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-17  Jakub Jelinek  <jakub@redhat.com>

	PR target/88734
	* config/aarch64/arm_neon.h: Fix #pragma GCC target syntax - replace
	(("..."))) with ("...").  Use arch=armv8.2-a+sha3 instead of
	arch=armv8.2-a+crypto for vsha512hq_u64 etc. intrinsics.

--- gcc/config/aarch64/arm_neon.h	(revision 268048)
+++ gcc/config/aarch64/arm_neon.h	(revision 268049)
@@ -31971,7 +31971,7 @@ vdotq_laneq_s32 (int32x4_t __r, int8x16_
 #pragma GCC pop_options
 
 #pragma GCC push_options
-#pragma GCC target(("arch=armv8.2-a+sm4"))
+#pragma GCC target ("arch=armv8.2-a+sm4")
 
 __extension__ extern __inline uint32x4_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
@@ -32038,7 +32038,7 @@ vsm4ekeyq_u32 (uint32x4_t __a, uint32x4_
 #pragma GCC pop_options
 
 #pragma GCC push_options
-#pragma GCC target(("arch=armv8.2-a+crypto"))
+#pragma GCC target ("arch=armv8.2-a+sha3")
 
 __extension__ extern __inline uint64x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
@@ -32098,7 +32098,7 @@ vbcaxq_u16 (uint16x8_t __a, uint16x8_t _
 #pragma GCC pop_options
 
 #pragma GCC push_options
-#pragma GCC target(("arch=armv8.2-a+fp16fml"))
+#pragma GCC target ("arch=armv8.2-a+fp16fml")
 
 __extension__ extern __inline float32x2_t
 __attribute__ ((__always_inline__, __gnu_inline__, __artificial__))
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-19  Jakub Jelinek  <jakub@redhat.com>

	PR fortran/88902
	* trans-decl.c (gfc_get_symbol_decl): Don't add length to function
	or parent function if it has been added there already.

	* gfortran.dg/pr88902.f90: New test.

--- gcc/fortran/trans-decl.c	(revision 268090)
+++ gcc/fortran/trans-decl.c	(revision 268091)
@@ -1572,13 +1572,17 @@ gfc_get_symbol_decl (gfc_symbol * sym)
 	  if (VAR_P (length) && DECL_FILE_SCOPE_P (length))
 	    {
 	      /* Add the string length to the same context as the symbol.  */
-	      if (DECL_CONTEXT (sym->backend_decl) == current_function_decl)
-	        gfc_add_decl_to_function (length);
-	      else
-		gfc_add_decl_to_parent_function (length);
+	      if (DECL_CONTEXT (length) == NULL_TREE)
+		{
+		  if (DECL_CONTEXT (sym->backend_decl)
+		      == current_function_decl)
+		    gfc_add_decl_to_function (length);
+		  else
+		    gfc_add_decl_to_parent_function (length);
+		}
 
-	      gcc_assert (DECL_CONTEXT (sym->backend_decl) ==
-			    DECL_CONTEXT (length));
+	      gcc_assert (DECL_CONTEXT (sym->backend_decl)
+			  == DECL_CONTEXT (length));
 
 	      gfc_defer_symbol_init (sym);
 	    }
--- gcc/testsuite/gfortran.dg/pr88902.f90	(nonexistent)
+++ gcc/testsuite/gfortran.dg/pr88902.f90	(revision 268091)
@@ -0,0 +1,6 @@
+! PR fortran/88902
+! { dg-do compile }
+! { dg-require-effective-target lto }
+! { dg-options "-flto --param ggc-min-heapsize=0" }
+
+include 'pr50069_2.f90'
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-21  Jakub Jelinek  <jakub@redhat.com>

	PR sanitizer/88901
	* typeck.c (cp_build_binary_op): Don't instrument
	SANITIZE_POINTER_COMPARE if processing_template_decl.
	(pointer_diff): Similarly for SANITIZE_POINTER_SUBTRACT.

	* g++.dg/asan/pr88901.C: New test.

--- gcc/cp/typeck.c	(revision 268121)
+++ gcc/cp/typeck.c	(revision 268122)
@@ -5233,6 +5233,7 @@ cp_build_binary_op (const op_location_t
 	}
 
       if ((code0 == POINTER_TYPE || code1 == POINTER_TYPE)
+	  && !processing_template_decl
 	  && sanitize_flags_p (SANITIZE_POINTER_COMPARE))
 	{
 	  op0 = save_expr (op0);
@@ -5650,7 +5651,8 @@ pointer_diff (location_t loc, tree op0,
   else
     inttype = restype;
 
-  if (sanitize_flags_p (SANITIZE_POINTER_SUBTRACT))
+  if (!processing_template_decl
+      && sanitize_flags_p (SANITIZE_POINTER_SUBTRACT))
     {
       op0 = save_expr (op0);
       op1 = save_expr (op1);
--- gcc/testsuite/g++.dg/asan/pr88901.C	(nonexistent)
+++ gcc/testsuite/g++.dg/asan/pr88901.C	(revision 268122)
@@ -0,0 +1,13 @@
+// PR sanitizer/88901
+// { dg-do compile }
+// { dg-options "-fsanitize=address -fsanitize=pointer-compare" }
+
+template <typename T>
+struct A {
+  void foo() {
+    auto d = [](char *x, char *y) {
+      for (char *p = x; p + sizeof(T) <= y; p += sizeof(T))
+        reinterpret_cast<T *>(p)->~T();
+    };
+  }
+};
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-21  Jakub Jelinek  <jakub@redhat.com>

	PR c++/88949
	* optimize.c (cxx_copy_decl): New function.
	(clone_body): Use it instead of copy_decl_no_change.

	* g++.dg/gomp/pr88949.C: New test.

--- gcc/cp/optimize.c	(revision 268126)
+++ gcc/cp/optimize.c	(revision 268127)
@@ -61,6 +61,25 @@ update_cloned_parm (tree parm, tree clon
   DECL_GIMPLE_REG_P (cloned_parm) = DECL_GIMPLE_REG_P (parm);
 }
 
+/* Like copy_decl_no_change, but handle DECL_OMP_PRIVATIZED_MEMBER
+   properly.  */
+
+static tree
+cxx_copy_decl (tree decl, copy_body_data *id)
+{
+  tree copy = copy_decl_no_change (decl, id);
+  if (VAR_P (decl)
+      && DECL_HAS_VALUE_EXPR_P (decl)
+      && DECL_ARTIFICIAL (decl)
+      && DECL_LANG_SPECIFIC (decl)
+      && DECL_OMP_PRIVATIZED_MEMBER (decl))
+    {
+      tree expr = DECL_VALUE_EXPR (copy);
+      walk_tree (&expr, copy_tree_body_r, id, NULL);
+      SET_DECL_VALUE_EXPR (copy, expr);
+    }
+  return copy;
+}
 
 /* FN is a function in High GIMPLE form that has a complete body and no
    CFG.  CLONE is a function whose body is to be set to a copy of FN,
@@ -80,7 +99,7 @@ clone_body (tree clone, tree fn, void *a
   id.src_cfun = DECL_STRUCT_FUNCTION (fn);
   id.decl_map = static_cast<hash_map<tree, tree> *> (arg_map);
 
-  id.copy_decl = copy_decl_no_change;
+  id.copy_decl = cxx_copy_decl;
   id.transform_call_graph_edges = CB_CGE_DUPLICATE;
   id.transform_new_cfg = true;
   id.transform_return_to_modify = false;
--- gcc/testsuite/g++.dg/gomp/pr88949.C	(nonexistent)
+++ gcc/testsuite/g++.dg/gomp/pr88949.C	(revision 268127)
@@ -0,0 +1,23 @@
+// PR c++/88949
+// { dg-do compile }
+
+struct A {
+  int a;
+  A (int x) : a (x) {
+#pragma omp parallel firstprivate (a)
+    --a;
+  }
+  void foo () {
+#pragma omp parallel firstprivate (a)
+    --a;
+  }
+};
+
+int c;
+
+int
+main ()
+{
+  A d(c);
+  d.foo ();
+}
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-22  Jakub Jelinek  <jakub@redhat.com>

	PR rtl-optimization/49429
	PR target/49454
	PR rtl-optimization/86334
	PR target/88906
	* expr.c (emit_block_move_hints): Move marking of MEM_EXPRs
	addressable from here...
	(emit_block_op_via_libcall): ... to here.

	* gcc.target/i386/pr86334.c: New test.
	* gcc.target/i386/pr88906.c: New test.

--- gcc/expr.c	(revision 268137)
+++ gcc/expr.c	(revision 268138)
@@ -1631,14 +1631,6 @@ emit_block_move_hints (rtx x, rtx y, rtx
       if (may_use_call < 0)
 	return pc_rtx;
 
-      /* Since x and y are passed to a libcall, mark the corresponding
-	 tree EXPR as addressable.  */
-      tree y_expr = MEM_EXPR (y);
-      tree x_expr = MEM_EXPR (x);
-      if (y_expr)
-	mark_addressable (y_expr);
-      if (x_expr)
-	mark_addressable (x_expr);
       retval = emit_block_copy_via_libcall (x, y, size,
 					    method == BLOCK_OP_TAILCALL);
     }
@@ -1884,6 +1876,15 @@ emit_block_op_via_libcall (enum built_in
   tree call_expr, dst_tree, src_tree, size_tree;
   machine_mode size_mode;
 
+  /* Since dst and src are passed to a libcall, mark the corresponding
+     tree EXPR as addressable.  */
+  tree dst_expr = MEM_EXPR (dst);
+  tree src_expr = MEM_EXPR (src);
+  if (dst_expr)
+    mark_addressable (dst_expr);
+  if (src_expr)
+    mark_addressable (src_expr);
+
   dst_addr = copy_addr_to_reg (XEXP (dst, 0));
   dst_addr = convert_memory_address (ptr_mode, dst_addr);
   dst_tree = make_tree (ptr_type_node, dst_addr);
--- gcc/testsuite/gcc.target/i386/pr86334.c	(nonexistent)
+++ gcc/testsuite/gcc.target/i386/pr86334.c	(revision 268138)
@@ -0,0 +1,21 @@
+/* PR rtl-optimization/86334 */
+/* { dg-do run { target ia32 } } */
+/* { dg-options "-O -march=i386 -mtune=athlon -minline-all-stringops -minline-stringops-dynamically -mmemcpy-strategy=libcall:-1:align -Wno-psabi" } */
+
+typedef int V __attribute__ ((vector_size (64)));
+
+static inline V
+foo (V g)
+{
+  g[0] = 4;
+  return g;
+}
+
+int
+main ()
+{
+  V x = foo ((V) { });
+  if (x[0] != 4 || x[1] || x[2] || x[3] || x[4] || x[5] || x[6] || x[7])
+    __builtin_abort ();
+  return 0;
+}
--- gcc/testsuite/gcc.target/i386/pr88906.c	(nonexistent)
+++ gcc/testsuite/gcc.target/i386/pr88906.c	(revision 268138)
@@ -0,0 +1,21 @@
+/* PR target/88906 */
+/* { dg-do run { target ia32 } } */
+/* { dg-options "-O -march=i386 -mtune=k6 -minline-all-stringops -minline-stringops-dynamically -mmemcpy-strategy=libcall:-1:align -Wno-psabi" } */
+
+typedef unsigned V __attribute__ ((vector_size (16)));
+
+static inline V
+foo (V v)
+{
+  __builtin_sub_overflow (0, 0, &v[0]);
+  return v;
+}
+
+int
+main ()
+{
+  V v = foo ((V) { ~0 });
+  if (v[0] || v[1] || v[2] || v[3])
+    __builtin_abort ();
+  return 0;
+}
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-22  Jakub Jelinek  <jakub@redhat.com>

	PR target/88905
	* optabs.c (add_equal_note): Add op0_mode argument, use it instead of
	GET_MODE (op0).
	(expand_binop_directly, expand_doubleword_clz,
	expand_doubleword_popcount, expand_ctz, expand_ffs,
	expand_unop_direct, maybe_emit_unop_insn): Adjust callers.

	* gcc.dg/pr88905.c: New test.

--- gcc/optabs.c	(revision 268138)
+++ gcc/optabs.c	(revision 268139)
@@ -55,7 +55,7 @@ void debug_optab_libfuncs (void);
 
 /* Add a REG_EQUAL note to the last insn in INSNS.  TARGET is being set to
    the result of operation CODE applied to OP0 (and OP1 if it is a binary
-   operation).
+   operation).  OP0_MODE is OP0's mode.
 
    If the last insn does not set TARGET, don't do anything, but return 1.
 
@@ -64,7 +64,8 @@ void debug_optab_libfuncs (void);
    try again, ensuring that TARGET is not one of the operands.  */
 
 static int
-add_equal_note (rtx_insn *insns, rtx target, enum rtx_code code, rtx op0, rtx op1)
+add_equal_note (rtx_insn *insns, rtx target, enum rtx_code code, rtx op0,
+		rtx op1, machine_mode op0_mode)
 {
   rtx_insn *last_insn;
   rtx set;
@@ -136,16 +137,16 @@ add_equal_note (rtx_insn *insns, rtx tar
       case POPCOUNT:
       case PARITY:
       case BSWAP:
-	if (GET_MODE (op0) != VOIDmode && GET_MODE (target) != GET_MODE (op0))
+	if (op0_mode != VOIDmode && GET_MODE (target) != op0_mode)
 	  {
-	    note = gen_rtx_fmt_e (code, GET_MODE (op0), copy_rtx (op0));
-	    if (GET_MODE_UNIT_SIZE (GET_MODE (op0))
+	    note = gen_rtx_fmt_e (code, op0_mode, copy_rtx (op0));
+	    if (GET_MODE_UNIT_SIZE (op0_mode)
 		> GET_MODE_UNIT_SIZE (GET_MODE (target)))
 	      note = simplify_gen_unary (TRUNCATE, GET_MODE (target),
-					 note, GET_MODE (op0));
+					 note, op0_mode);
 	    else
 	      note = simplify_gen_unary (ZERO_EXTEND, GET_MODE (target),
-					 note, GET_MODE (op0));
+					 note, op0_mode);
 	    break;
 	  }
 	/* FALLTHRU */
@@ -1127,7 +1128,7 @@ expand_binop_directly (enum insn_code ic
       if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
 	  && ! add_equal_note (pat, ops[0].value,
 			       optab_to_code (binoptab),
-			       ops[1].value, ops[2].value))
+			       ops[1].value, ops[2].value, mode0))
 	{
 	  delete_insns_since (last);
 	  return expand_binop (mode, binoptab, op0, op1, NULL_RTX,
@@ -2298,7 +2299,7 @@ expand_doubleword_clz (scalar_int_mode m
   seq = get_insns ();
   end_sequence ();
 
-  add_equal_note (seq, target, CLZ, xop0, 0);
+  add_equal_note (seq, target, CLZ, xop0, NULL_RTX, mode);
   emit_insn (seq);
   return target;
 
@@ -2340,7 +2341,7 @@ expand_doubleword_popcount (scalar_int_m
   seq = get_insns ();
   end_sequence ();
 
-  add_equal_note (seq, t, POPCOUNT, op0, 0);
+  add_equal_note (seq, t, POPCOUNT, op0, NULL_RTX, mode);
   emit_insn (seq);
   return t;
 }
@@ -2511,7 +2512,7 @@ expand_ctz (scalar_int_mode mode, rtx op
   seq = get_insns ();
   end_sequence ();
 
-  add_equal_note (seq, temp, CTZ, op0, 0);
+  add_equal_note (seq, temp, CTZ, op0, NULL_RTX, mode);
   emit_insn (seq);
   return temp;
 }
@@ -2589,7 +2590,7 @@ expand_ffs (scalar_int_mode mode, rtx op
   seq = get_insns ();
   end_sequence ();
 
-  add_equal_note (seq, temp, FFS, op0, 0);
+  add_equal_note (seq, temp, FFS, op0, NULL_RTX, mode);
   emit_insn (seq);
   return temp;
 
@@ -2736,7 +2737,7 @@ expand_unop_direct (machine_mode mode, o
 	  if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
 	      && ! add_equal_note (pat, ops[0].value,
 				   optab_to_code (unoptab),
-				   ops[1].value, NULL_RTX))
+				   ops[1].value, NULL_RTX, mode))
 	    {
 	      delete_insns_since (last);
 	      return expand_unop (mode, unoptab, op0, NULL_RTX, unsignedp);
@@ -3588,7 +3589,8 @@ maybe_emit_unop_insn (enum insn_code ico
 
   if (INSN_P (pat) && NEXT_INSN (pat) != NULL_RTX
       && code != UNKNOWN)
-    add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX);
+    add_equal_note (pat, ops[0].value, code, ops[1].value, NULL_RTX,
+		    GET_MODE (op0));
 
   emit_insn (pat);
 
--- gcc/testsuite/gcc.dg/pr88905.c	(nonexistent)
+++ gcc/testsuite/gcc.dg/pr88905.c	(revision 268139)
@@ -0,0 +1,21 @@
+/* PR target/88905 */
+/* { dg-do compile } */
+/* { dg-options "-Og -fno-tree-ccp" } */
+/* { dg-additional-options "-mabm" { target { i?86-*-* x86_64-*-* } } } */
+
+int a, b, c;
+extern void baz (int);
+
+static inline int
+bar (unsigned u)
+{
+  int i = __builtin_popcountll (-(unsigned long long) u);
+  baz (i & c);
+  return a + b + c;
+}
+
+void
+foo (void)
+{
+  bar (2376498292ULL);
+}
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-22  Jakub Jelinek  <jakub@redhat.com>

	PR middle-end/88968
	* gimplify.c (gimplify_omp_atomic): Handle bitfield atomics with
	non-integral DECL_BIT_FIELD_REPRESENTATIVEs.

	* c-omp.c (c_finish_omp_atomic): For bitfield atomics, update type
	variable after using BIT_FIELD_REF.

	* c-c++-common/gomp/atomic-23.c: New test.

--- gcc/gimplify.c	(revision 268164)
+++ gcc/gimplify.c	(revision 268165)
@@ -11057,9 +11057,36 @@ gimplify_omp_atomic (tree *expr_p, gimpl
 
   loadstmt = gimple_build_omp_atomic_load (tmp_load, addr);
   gimplify_seq_add_stmt (pre_p, loadstmt);
-  if (rhs && gimplify_expr (&rhs, pre_p, NULL, is_gimple_val, fb_rvalue)
-      != GS_ALL_DONE)
-    return GS_ERROR;
+  if (rhs)
+    {
+      /* BIT_INSERT_EXPR is not valid for non-integral bitfield
+	 representatives.  Use BIT_FIELD_REF on the lhs instead.  */
+      if (TREE_CODE (rhs) == BIT_INSERT_EXPR
+	  && !INTEGRAL_TYPE_P (TREE_TYPE (tmp_load)))
+	{
+	  tree bitpos = TREE_OPERAND (rhs, 2);
+	  tree op1 = TREE_OPERAND (rhs, 1);
+	  tree bitsize;
+	  tree tmp_store = tmp_load;
+	  if (TREE_CODE (*expr_p) == OMP_ATOMIC_CAPTURE_OLD)
+	    tmp_store = get_initialized_tmp_var (tmp_load, pre_p, NULL);
+	  if (INTEGRAL_TYPE_P (TREE_TYPE (op1)))
+	    bitsize = bitsize_int (TYPE_PRECISION (TREE_TYPE (op1)));
+	  else
+	    bitsize = TYPE_SIZE (TREE_TYPE (op1));
+	  gcc_assert (TREE_OPERAND (rhs, 0) == tmp_load);
+	  tree t = build2_loc (EXPR_LOCATION (rhs),
+			       MODIFY_EXPR, void_type_node,
+			       build3_loc (EXPR_LOCATION (rhs), BIT_FIELD_REF,
+					   TREE_TYPE (op1), tmp_store, bitsize,
+					   bitpos), op1);
+	  gimplify_and_add (t, pre_p);
+	  rhs = tmp_store;
+	}
+      if (gimplify_expr (&rhs, pre_p, NULL, is_gimple_val, fb_rvalue)
+	  != GS_ALL_DONE)
+	return GS_ERROR;
+    }
 
   if (TREE_CODE (*expr_p) == OMP_ATOMIC_READ)
     rhs = tmp_load;
--- gcc/c-family/c-omp.c	(revision 268164)
+++ gcc/c-family/c-omp.c	(revision 268165)
@@ -378,8 +378,11 @@ c_finish_omp_atomic (location_t loc, enu
 	    }
 	}
       if (blhs)
-	x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x,
-			bitsize_int (bitsize), bitsize_int (bitpos));
+	{
+	  x = build3_loc (loc, BIT_FIELD_REF, TREE_TYPE (blhs), x,
+			  bitsize_int (bitsize), bitsize_int (bitpos));
+	  type = TREE_TYPE (blhs);
+	}
       x = build_modify_expr (loc, v, NULL_TREE, NOP_EXPR,
 			     loc, x, NULL_TREE);
       if (rhs1 && rhs1 != orig_lhs)
--- gcc/testsuite/c-c++-common/gomp/atomic-23.c	(nonexistent)
+++ gcc/testsuite/c-c++-common/gomp/atomic-23.c	(revision 268165)
@@ -0,0 +1,47 @@
+/* PR middle-end/88968 */
+/* { dg-do compile } */
+
+struct __attribute__((packed)) S {
+  unsigned int a : 16;
+  unsigned int b : 1;
+} s;
+
+void
+f1 (void)
+{
+#pragma omp atomic
+  ++s.a;
+}
+
+int
+f2 (void)
+{
+  int r;
+#pragma omp atomic capture
+  {
+    r = s.a;
+    s.a = 0;
+  }
+  return r;
+}
+
+int
+f3 (void)
+{
+  int r;
+#pragma omp atomic capture
+  {
+    r = s.a;
+    s.a = s.a + 32;
+  }
+  return r;
+}
+
+int
+f4 (void)
+{
+  int r;
+#pragma omp atomic capture
+  r = s.a = s.a + 32;
+  return r;
+}
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-22  Jakub Jelinek  <jakub@redhat.com>

	PR target/88965
	* config/rs6000/rs6000.c: Include tree-vrp.h and tree-ssanames.h.
	(rs6000_gimple_fold_builtin): If MEM_REF address doesn't satisfy
	is_gimple_mem_ref_addr predicate, force it into a SSA_NAME first.

	* gcc.target/powerpc/pr88965.c: New test.

--- gcc/config/rs6000/rs6000.c	(revision 268165)
+++ gcc/config/rs6000/rs6000.c	(revision 268166)
@@ -81,6 +81,8 @@
 #include "case-cfn-macros.h"
 #include "ppc-auxv.h"
 #include "tree-ssa-propagate.h"
+#include "tree-vrp.h"
+#include "tree-ssanames.h"
 
 /* This file should be included last.  */
 #include "target-def.h"
@@ -16580,6 +16582,13 @@ rs6000_gimple_fold_builtin (gimple_stmt_
 					  arg1_type, temp_addr,
 					  build_int_cst (arg1_type, -16));
 	gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
+	if (!is_gimple_mem_ref_addr (aligned_addr))
+	  {
+	    tree t = make_ssa_name (TREE_TYPE (aligned_addr));
+	    gimple *g = gimple_build_assign (t, aligned_addr);
+	    gsi_insert_before (gsi, g, GSI_SAME_STMT);
+	    aligned_addr = t;
+	  }
 	/* Use the build2 helper to set up the mem_ref.  The MEM_REF could also
 	   take an offset, but since we've already incorporated the offset
 	   above, here we just pass in a zero.  */
@@ -16628,6 +16637,13 @@ rs6000_gimple_fold_builtin (gimple_stmt_
 					  arg2_type, temp_addr,
 					  build_int_cst (arg2_type, -16));
 	gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
+	if (!is_gimple_mem_ref_addr (aligned_addr))
+	  {
+	    tree t = make_ssa_name (TREE_TYPE (aligned_addr));
+	    gimple *g = gimple_build_assign (t, aligned_addr);
+	    gsi_insert_before (gsi, g, GSI_SAME_STMT);
+	    aligned_addr = t;
+	  }
 	/* The desired gimple result should be similar to:
 	   MEM[(__vector floatD.1407 *)_1] = vf1D.2697;  */
 	gimple *g
--- gcc/testsuite/gcc.target/powerpc/pr88965.c	(nonexistent)
+++ gcc/testsuite/gcc.target/powerpc/pr88965.c	(revision 268166)
@@ -0,0 +1,19 @@
+/* PR target/88965 */
+/* { dg-do compile } */
+/* { dg-require-effective-target powerpc_vsx_ok } */
+/* { dg-options "-O2 -mvsx" } */
+
+unsigned int a[16];
+unsigned int __attribute__ ((vector_size (16))) b;
+
+void
+foo (void)
+{
+  b = __builtin_vec_vsx_ld (0, &a[0]);
+}
+
+void
+bar (void)
+{
+  __builtin_vec_vsx_st (b, 0, &a[0]);
+}
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-23  Jakub Jelinek  <jakub@redhat.com>

	PR tree-optimization/88964
	* gimple-loop-interchange.cc (loop_cand::analyze_induction_var): Use
	build_zero_cst instead of build_int_cst.  Return false for loop
	invariants which honor signed zeros.

	* gfortran.dg/pr88964.f90: New test.

--- gcc/gimple-loop-interchange.cc	(revision 268178)
+++ gcc/gimple-loop-interchange.cc	(revision 268179)
@@ -688,11 +688,16 @@ loop_cand::analyze_induction_var (tree v
   /* Var is loop invariant, though it's unlikely to happen.  */
   if (tree_does_not_contain_chrecs (chrec))
     {
+      /* Punt on floating point invariants if honoring signed zeros,
+	 representing that as + 0.0 would change the result if init
+	 is -0.0.  */
+      if (HONOR_SIGNED_ZEROS (chrec))
+	return false;
       struct induction *iv = XCNEW (struct induction);
       iv->var = var;
       iv->init_val = init;
       iv->init_expr = chrec;
-      iv->step = build_int_cst (TREE_TYPE (chrec), 0);
+      iv->step = build_zero_cst (TREE_TYPE (chrec));
       m_inductions.safe_push (iv);
       return true;
     }
--- gcc/testsuite/gfortran.dg/pr88964.f90	(nonexistent)
+++ gcc/testsuite/gfortran.dg/pr88964.f90	(revision 268179)
@@ -0,0 +1,57 @@
+! PR tree-optimization/88964
+! { dg-do compile }
+! { dg-options "-O3 -fno-tree-forwprop --param sccvn-max-alias-queries-per-access=1" }
+
+MODULE pr88964
+  INTEGER, PARAMETER :: dp=8
+  REAL(KIND=dp) :: p, q, o
+CONTAINS
+  SUBROUTINE foo(a,b,c,f,h)
+    IMPLICIT NONE
+    INTEGER :: a, b, c
+    REAL(KIND=dp) :: f(b*c), h(a*c)
+    CALL bar(h)
+    CALL baz(f)
+    CALL qux(h)
+  END SUBROUTINE foo
+  SUBROUTINE bar(h)
+    IMPLICIT NONE
+    REAL(KIND=dp) :: h(1*1)
+    INTEGER :: r, s, t, u
+    DO u = 1,3
+      DO t = 1,1
+        DO s = 1,3
+          DO r = 1,1
+            h((t-1)*1+r) = h((t-1)*1+r)-p*o
+          END DO
+        END DO
+      END DO
+    END DO
+  END SUBROUTINE bar
+  SUBROUTINE baz(f)
+    IMPLICIT NONE
+    REAL(KIND=dp) :: f(3*1)
+    INTEGER :: s, t, u
+    DO u = 1,4
+      DO t = 1,1
+        DO s = 1,3
+          f((t-1)*3+s) = f((t-1)*3+s) - q
+        END DO
+      END DO
+    END DO
+  END SUBROUTINE baz
+  SUBROUTINE qux(h)
+    IMPLICIT NONE
+    REAL(KIND=dp) :: h(1*1)
+    INTEGER :: r, s, t, u
+    DO u = 1,5
+      DO t = 1,1
+        DO s = 1,3
+          DO r = 1,1
+            h((t-1)*1+r) = h((t-1)*1+r)-p*o
+          END DO
+        END DO
+      END DO
+    END DO
+  END SUBROUTINE qux
+END MODULE pr88964
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-24  Jakub Jelinek  <jakub@redhat.com>

	PR c++/88976
	* semantics.c (finish_omp_cancel): Use maybe_convert_cond when not in
	template or build_x_binary_op otherwise.

	* c-c++-common/gomp/cancel-2.c: New test.
	* gcc.dg/gomp/cancel-1.c: New test.
	* g++.dg/gomp/cancel-1.C: New test.
	* g++.dg/gomp/cancel-2.C: New test.
	* g++.dg/gomp/cancel-3.C: New test.

--- gcc/cp/semantics.c	(revision 268244)
+++ gcc/cp/semantics.c	(revision 268245)
@@ -8518,10 +8518,13 @@ finish_omp_cancel (tree clauses)
   tree ifc = omp_find_clause (clauses, OMP_CLAUSE_IF);
   if (ifc != NULL_TREE)
     {
-      tree type = TREE_TYPE (OMP_CLAUSE_IF_EXPR (ifc));
-      ifc = fold_build2_loc (OMP_CLAUSE_LOCATION (ifc), NE_EXPR,
-			     boolean_type_node, OMP_CLAUSE_IF_EXPR (ifc),
-			     build_zero_cst (type));
+      if (!processing_template_decl)
+	ifc = maybe_convert_cond (OMP_CLAUSE_IF_EXPR (ifc));
+      else
+	ifc = build_x_binary_op (OMP_CLAUSE_LOCATION (ifc), NE_EXPR,
+				 OMP_CLAUSE_IF_EXPR (ifc), ERROR_MARK,
+				 integer_zero_node, ERROR_MARK,
+				 NULL, tf_warning_or_error);
     }
   else
     ifc = boolean_true_node;
--- gcc/testsuite/gcc.dg/gomp/cancel-1.c	(nonexistent)
+++ gcc/testsuite/gcc.dg/gomp/cancel-1.c	(revision 268245)
@@ -0,0 +1,12 @@
+/* { dg-do compile } */
+
+struct S { int s; } s;
+
+void
+foo (void)
+{
+  #pragma omp parallel
+  {
+    #pragma omp cancel parallel if (s)	/* { dg-error "used struct type value where scalar is required" } */
+  }
+}
--- gcc/testsuite/g++.dg/gomp/cancel-1.C	(nonexistent)
+++ gcc/testsuite/g++.dg/gomp/cancel-1.C	(revision 268245)
@@ -0,0 +1,26 @@
+// PR c++/88976
+// { dg-do compile }
+
+template <class T> void
+foo (T x)
+{
+#pragma omp parallel
+  {
+  #pragma omp cancel parallel if (x)
+  }
+#pragma omp parallel
+  {
+  #pragma omp cancel parallel if (1 == 1)
+  }
+}
+
+void
+bar (int x, double y, long long z)
+{
+  foo (0);
+  foo (1LL);
+  foo (1.25);
+  foo (x);
+  foo (y);
+  foo (z);
+}
--- gcc/testsuite/g++.dg/gomp/cancel-2.C	(nonexistent)
+++ gcc/testsuite/g++.dg/gomp/cancel-2.C	(revision 268245)
@@ -0,0 +1,20 @@
+// PR c++/88976
+// { dg-do compile }
+
+template <class T> void
+foo (T x)
+{
+#pragma omp parallel
+  {
+  #pragma omp cancel parallel if (x)	// { dg-error "no match for" }
+  }
+}
+
+struct S {};
+
+void
+bar ()
+{
+  S s;
+  foo (s);
+}
--- gcc/testsuite/g++.dg/gomp/cancel-3.C	(nonexistent)
+++ gcc/testsuite/g++.dg/gomp/cancel-3.C	(revision 268245)
@@ -0,0 +1,12 @@
+// { dg-do compile }
+
+struct S { int s; } s;
+
+void
+foo (void)
+{
+  #pragma omp parallel
+  {
+    #pragma omp cancel parallel if (s)	// { dg-error "could not convert 's' from 'S' to 'bool'" }
+  }
+}
--- gcc/testsuite/c-c++-common/gomp/cancel-2.c	(nonexistent)
+++ gcc/testsuite/c-c++-common/gomp/cancel-2.c	(revision 268245)
@@ -0,0 +1,10 @@
+/* { dg-do compile } */
+
+void
+foo (void)
+{
+  #pragma omp parallel
+  {
+    #pragma omp cancel parallel if (1) if (1)			/* { dg-error "too many 'if' clauses without modifier" } */
+  }
+}
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-24  Jakub Jelinek  <jakub@redhat.com>

	PR tree-optimization/88964
	* gimple-loop-interchange.cc (loop_cand::analyze_induction_var): Also
	punt if HONOR_SNANS (chrec).

--- gcc/gimple-loop-interchange.cc	(revision 268246)
+++ gcc/gimple-loop-interchange.cc	(revision 268247)
@@ -690,8 +690,8 @@ loop_cand::analyze_induction_var (tree v
     {
       /* Punt on floating point invariants if honoring signed zeros,
 	 representing that as + 0.0 would change the result if init
-	 is -0.0.  */
-      if (HONOR_SIGNED_ZEROS (chrec))
+	 is -0.0.  Similarly for SNaNs it can raise exception.  */
+      if (HONOR_SIGNED_ZEROS (chrec) || HONOR_SNANS (chrec))
 	return false;
       struct induction *iv = XCNEW (struct induction);
       iv->var = var;
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-27  Jakub Jelinek  <jakub@redhat.com>

	PR target/87214
	* config/i386/sse.md
	(<mask_codefor>avx512dq_shuf_<shuffletype>64x2_1<mask_name>,
	avx512f_shuf_<shuffletype>64x2_1<mask_name>): Ensure the
	first constants in pairs are multiples of 2.  Formatting fixes.
	(avx512vl_shuf_<shuffletype>32x4_1<mask_name>,
	avx512vl_shuf_<shuffletype>32x4_1<mask_name>): Ensure the
	first constants in each quadruple are multiples of 4.  Formatting fixes.

	* gcc.target/i386/avx512vl-pr87214-1.c: New test.
	* gcc.target/i386/avx512vl-pr87214-2.c: New test.

--- gcc/config/i386/sse.md	(revision 268309)
+++ gcc/config/i386/sse.md	(revision 268310)
@@ -13372,13 +13372,15 @@ (define_insn "<mask_codefor>avx512dq_shu
 	  (vec_concat:<ssedoublemode>
 	    (match_operand:VI8F_256 1 "register_operand" "v")
 	    (match_operand:VI8F_256 2 "nonimmediate_operand" "vm"))
-	  (parallel [(match_operand 3  "const_0_to_3_operand")
-		     (match_operand 4  "const_0_to_3_operand")
-		     (match_operand 5  "const_4_to_7_operand")
-		     (match_operand 6  "const_4_to_7_operand")])))]
+	  (parallel [(match_operand 3 "const_0_to_3_operand")
+		     (match_operand 4 "const_0_to_3_operand")
+		     (match_operand 5 "const_4_to_7_operand")
+		     (match_operand 6 "const_4_to_7_operand")])))]
   "TARGET_AVX512VL
-   && (INTVAL (operands[3]) == (INTVAL (operands[4]) - 1)
-       && INTVAL (operands[5]) == (INTVAL (operands[6]) - 1))"
+   && (INTVAL (operands[3]) & 1) == 0
+   && INTVAL (operands[3]) == INTVAL (operands[4]) - 1
+   && (INTVAL (operands[5]) & 1) == 0
+   && INTVAL (operands[5]) == INTVAL (operands[6]) - 1"
 {
   int mask;
   mask = INTVAL (operands[3]) / 2;
@@ -13421,19 +13423,23 @@ (define_insn "avx512f_shuf_<shuffletype>
 	  (vec_concat:<ssedoublemode>
 	    (match_operand:V8FI 1 "register_operand" "v")
 	    (match_operand:V8FI 2 "nonimmediate_operand" "vm"))
-	  (parallel [(match_operand 3  "const_0_to_7_operand")
-		     (match_operand 4  "const_0_to_7_operand")
-		     (match_operand 5  "const_0_to_7_operand")
-		     (match_operand 6  "const_0_to_7_operand")
-		     (match_operand 7  "const_8_to_15_operand")
-		     (match_operand 8  "const_8_to_15_operand")
-		     (match_operand 9  "const_8_to_15_operand")
-		     (match_operand 10  "const_8_to_15_operand")])))]
+	  (parallel [(match_operand 3 "const_0_to_7_operand")
+		     (match_operand 4 "const_0_to_7_operand")
+		     (match_operand 5 "const_0_to_7_operand")
+		     (match_operand 6 "const_0_to_7_operand")
+		     (match_operand 7 "const_8_to_15_operand")
+		     (match_operand 8 "const_8_to_15_operand")
+		     (match_operand 9 "const_8_to_15_operand")
+		     (match_operand 10 "const_8_to_15_operand")])))]
   "TARGET_AVX512F
-   && (INTVAL (operands[3]) == (INTVAL (operands[4]) - 1)
-       && INTVAL (operands[5]) == (INTVAL (operands[6]) - 1)
-       && INTVAL (operands[7]) == (INTVAL (operands[8]) - 1)
-       && INTVAL (operands[9]) == (INTVAL (operands[10]) - 1))"
+   && (INTVAL (operands[3]) & 1) == 0
+   && INTVAL (operands[3]) == INTVAL (operands[4]) - 1
+   && (INTVAL (operands[5]) & 1) == 0
+   && INTVAL (operands[5]) == INTVAL (operands[6]) - 1
+   && (INTVAL (operands[7]) & 1) == 0
+   && INTVAL (operands[7]) == INTVAL (operands[8]) - 1
+   && (INTVAL (operands[9]) & 1) == 0
+   && INTVAL (operands[9]) == INTVAL (operands[10]) - 1"
 {
   int mask;
   mask = INTVAL (operands[3]) / 2;
@@ -13479,21 +13485,23 @@ (define_insn "avx512vl_shuf_<shuffletype
 	  (vec_concat:<ssedoublemode>
 	    (match_operand:VI4F_256 1 "register_operand" "v")
 	    (match_operand:VI4F_256 2 "nonimmediate_operand" "vm"))
-	  (parallel [(match_operand 3  "const_0_to_7_operand")
-		     (match_operand 4  "const_0_to_7_operand")
-		     (match_operand 5  "const_0_to_7_operand")
-		     (match_operand 6  "const_0_to_7_operand")
-		     (match_operand 7  "const_8_to_15_operand")
-		     (match_operand 8  "const_8_to_15_operand")
-		     (match_operand 9  "const_8_to_15_operand")
+	  (parallel [(match_operand 3 "const_0_to_7_operand")
+		     (match_operand 4 "const_0_to_7_operand")
+		     (match_operand 5 "const_0_to_7_operand")
+		     (match_operand 6 "const_0_to_7_operand")
+		     (match_operand 7 "const_8_to_15_operand")
+		     (match_operand 8 "const_8_to_15_operand")
+		     (match_operand 9 "const_8_to_15_operand")
 		     (match_operand 10 "const_8_to_15_operand")])))]
   "TARGET_AVX512VL
-   && (INTVAL (operands[3]) == (INTVAL (operands[4]) - 1)
-       && INTVAL (operands[3]) == (INTVAL (operands[5]) - 2)
-       && INTVAL (operands[3]) == (INTVAL (operands[6]) - 3)
-       && INTVAL (operands[7]) == (INTVAL (operands[8]) - 1)
-       && INTVAL (operands[7]) == (INTVAL (operands[9]) - 2)
-       && INTVAL (operands[7]) == (INTVAL (operands[10]) - 3))"
+   && (INTVAL (operands[3]) & 3) == 0
+   && INTVAL (operands[3]) == INTVAL (operands[4]) - 1
+   && INTVAL (operands[3]) == INTVAL (operands[5]) - 2
+   && INTVAL (operands[3]) == INTVAL (operands[6]) - 3
+   && (INTVAL (operands[7]) & 3) == 0
+   && INTVAL (operands[7]) == INTVAL (operands[8]) - 1
+   && INTVAL (operands[7]) == INTVAL (operands[9]) - 2
+   && INTVAL (operands[7]) == INTVAL (operands[10]) - 3"
 {
   int mask;
   mask = INTVAL (operands[3]) / 4;
@@ -13545,35 +13553,39 @@ (define_insn "avx512f_shuf_<shuffletype>
 	  (vec_concat:<ssedoublemode>
 	    (match_operand:V16FI 1 "register_operand" "v")
 	    (match_operand:V16FI 2 "nonimmediate_operand" "vm"))
-	  (parallel [(match_operand 3  "const_0_to_15_operand")
-		     (match_operand 4  "const_0_to_15_operand")
-		     (match_operand 5  "const_0_to_15_operand")
-		     (match_operand 6  "const_0_to_15_operand")
-		     (match_operand 7  "const_0_to_15_operand")
-		     (match_operand 8  "const_0_to_15_operand")
-		     (match_operand 9  "const_0_to_15_operand")
-		     (match_operand 10  "const_0_to_15_operand")
-		     (match_operand 11  "const_16_to_31_operand")
-		     (match_operand 12  "const_16_to_31_operand")
-		     (match_operand 13  "const_16_to_31_operand")
-		     (match_operand 14  "const_16_to_31_operand")
-		     (match_operand 15  "const_16_to_31_operand")
-		     (match_operand 16  "const_16_to_31_operand")
-		     (match_operand 17  "const_16_to_31_operand")
-		     (match_operand 18  "const_16_to_31_operand")])))]
+	  (parallel [(match_operand 3 "const_0_to_15_operand")
+		     (match_operand 4 "const_0_to_15_operand")
+		     (match_operand 5 "const_0_to_15_operand")
+		     (match_operand 6 "const_0_to_15_operand")
+		     (match_operand 7 "const_0_to_15_operand")
+		     (match_operand 8 "const_0_to_15_operand")
+		     (match_operand 9 "const_0_to_15_operand")
+		     (match_operand 10 "const_0_to_15_operand")
+		     (match_operand 11 "const_16_to_31_operand")
+		     (match_operand 12 "const_16_to_31_operand")
+		     (match_operand 13 "const_16_to_31_operand")
+		     (match_operand 14 "const_16_to_31_operand")
+		     (match_operand 15 "const_16_to_31_operand")
+		     (match_operand 16 "const_16_to_31_operand")
+		     (match_operand 17 "const_16_to_31_operand")
+		     (match_operand 18 "const_16_to_31_operand")])))]
   "TARGET_AVX512F
-   && (INTVAL (operands[3]) == (INTVAL (operands[4]) - 1)
-       && INTVAL (operands[3]) == (INTVAL (operands[5]) - 2)
-       && INTVAL (operands[3]) == (INTVAL (operands[6]) - 3)
-       && INTVAL (operands[7]) == (INTVAL (operands[8]) - 1)
-       && INTVAL (operands[7]) == (INTVAL (operands[9]) - 2)
-       && INTVAL (operands[7]) == (INTVAL (operands[10]) - 3)
-       && INTVAL (operands[11]) == (INTVAL (operands[12]) - 1)
-       && INTVAL (operands[11]) == (INTVAL (operands[13]) - 2)
-       && INTVAL (operands[11]) == (INTVAL (operands[14]) - 3)
-       && INTVAL (operands[15]) == (INTVAL (operands[16]) - 1)
-       && INTVAL (operands[15]) == (INTVAL (operands[17]) - 2)
-       && INTVAL (operands[15]) == (INTVAL (operands[18]) - 3))"
+   && (INTVAL (operands[3]) & 3) == 0
+   && INTVAL (operands[3]) == INTVAL (operands[4]) - 1
+   && INTVAL (operands[3]) == INTVAL (operands[5]) - 2
+   && INTVAL (operands[3]) == INTVAL (operands[6]) - 3
+   && (INTVAL (operands[7]) & 3) == 0
+   && INTVAL (operands[7]) == INTVAL (operands[8]) - 1
+   && INTVAL (operands[7]) == INTVAL (operands[9]) - 2
+   && INTVAL (operands[7]) == INTVAL (operands[10]) - 3
+   && (INTVAL (operands[11]) & 3) == 0
+   && INTVAL (operands[11]) == INTVAL (operands[12]) - 1
+   && INTVAL (operands[11]) == INTVAL (operands[13]) - 2
+   && INTVAL (operands[11]) == INTVAL (operands[14]) - 3
+   && (INTVAL (operands[15]) & 3) == 0
+   && INTVAL (operands[15]) == INTVAL (operands[16]) - 1
+   && INTVAL (operands[15]) == INTVAL (operands[17]) - 2
+   && INTVAL (operands[15]) == INTVAL (operands[18]) - 3"
 {
   int mask;
   mask = INTVAL (operands[3]) / 4;
--- gcc/testsuite/gcc.target/i386/avx512vl-pr87214-1.c	(nonexistent)
+++ gcc/testsuite/gcc.target/i386/avx512vl-pr87214-1.c	(revision 268310)
@@ -0,0 +1,44 @@
+/* PR target/87214 */
+/* { dg-do run { target { avx512vl } } } */
+/* { dg-options "-O3 -mavx512vl -mtune=skylake-avx512" } */
+
+#define AVX512VL
+#define AVX512F_LEN 512
+#define AVX512F_LEN_HALF 256
+#include "avx512f-check.h"
+
+struct s { unsigned long a, b, c; };
+
+void __attribute__ ((noipa))
+foo (struct s *restrict s1, struct s *restrict s2, int n)
+{
+  for (int i = 0; i < n; ++i)
+    {
+      s1[i].b = s2[i].b;
+      s1[i].c = s2[i].c;
+      s2[i].c = 0;
+    }
+}
+                            
+#define N 12
+
+static void
+test_256 (void)
+{
+  struct s s1[N], s2[N];
+  for (unsigned int j = 0; j < N; ++j)
+    {
+      s2[j].a = j * 5;
+      s2[j].b = j * 5 + 2;
+      s2[j].c = j * 5 + 4;
+    }
+  foo (s1, s2, N);
+  for (unsigned int j = 0; j < N; ++j)
+  if (s1[j].b != j * 5 + 2)
+    __builtin_abort ();
+}
+
+static void
+test_128 (void)
+{
+}
--- gcc/testsuite/gcc.target/i386/avx512vl-pr87214-2.c	(nonexistent)
+++ gcc/testsuite/gcc.target/i386/avx512vl-pr87214-2.c	(revision 268310)
@@ -0,0 +1,128 @@
+/* PR target/87214 */
+/* { dg-do run { target { avx512vl } } } */
+/* { dg-options "-O2 -mavx512vl" } */
+
+#define AVX512VL
+#define AVX512F_LEN 512
+#define AVX512F_LEN_HALF 256
+#include "avx512f-check.h"
+
+typedef long long int v4di __attribute__((vector_size (4 * sizeof (long long int))));
+typedef double v4df __attribute__((vector_size (4 * sizeof (double))));
+typedef long long int v8di __attribute__((vector_size (8 * sizeof (long long int))));
+typedef double v8df __attribute__((vector_size (8 * sizeof (double))));
+typedef int v8si __attribute__((vector_size (8 * sizeof (int))));
+typedef float v8sf __attribute__((vector_size (8 * sizeof (float))));
+typedef int v16si __attribute__((vector_size (16 * sizeof (int))));
+typedef float v16sf __attribute__((vector_size (16 * sizeof (float))));
+
+__attribute__((noipa)) void
+f1 (v4di *p)
+{
+  p[0] = __builtin_shuffle (p[1], p[2], (v4di) { 2, 3, 5, 6 });
+}
+
+__attribute__((noipa)) void
+f2 (v4df *p)
+{
+  p[0] = __builtin_shuffle (p[1], p[2], (v4di) { 1, 2, 6, 7 });
+}
+
+__attribute__((noipa)) void
+f3 (v8di *p)
+{
+  p[0] = __builtin_shuffle (p[1], p[2], (v8di) { 2, 3, 5, 6, 8, 9, 11, 12 });
+}
+
+__attribute__((noipa)) void
+f4 (v8df *p)
+{
+  p[0] = __builtin_shuffle (p[1], p[2], (v8di) { 1, 2, 6, 7, 9, 10, 12, 13 });
+}
+
+__attribute__((noipa)) void
+f5 (v8si *p)
+{
+  p[0] = __builtin_shuffle (p[1], p[2], (v8si) { 2, 3, 4, 5, 9, 10, 11, 12 });
+}
+
+__attribute__((noipa)) void
+f6 (v8sf *p)
+{
+  p[0] = __builtin_shuffle (p[1], p[2], (v8si) { 1, 2, 3, 4, 12, 13, 14, 15 });
+}
+
+__attribute__((noipa)) void
+f7 (v16si *p)
+{
+  p[0] = __builtin_shuffle (p[1], p[2], (v16si) { 0, 1, 2, 3, 1, 2, 3, 4, 16, 17, 18, 19, 25, 26, 27, 28 });
+}
+
+__attribute__((noipa)) void
+f8 (v16sf *p)
+{
+  p[0] = __builtin_shuffle (p[1], p[2], (v16si) { 1, 2, 3, 4, 4, 5, 6, 7, 17, 18, 19, 20, 18, 19, 20, 21 });
+}
+
+static void
+test_256 (void)
+{
+  v4di a[3] = { { 0, 0, 0, 0 }, { 10, 11, 12, 13 }, { 14, 15, 16, 17 } };
+  f1 (a);
+  if (a[0][0] != 12 || a[0][1] != 13 || a[0][2] != 15 || a[0][3] != 16)
+    __builtin_abort ();
+  v4df b[3] = { { 0.0, 0.0, 0.0, 0.0 }, { 10.0, 11.0, 12.0, 13.0 }, { 14.0, 15.0, 16.0, 17.0 } };
+  f2 (b);
+  if (b[0][0] != 11.0 || b[0][1] != 12.0 || b[0][2] != 16.0 || b[0][3] != 17.0)
+    __builtin_abort ();
+  v8di c[3] = { { 0, 0, 0, 0, 0, 0, 0, 0 }, { 10, 11, 12, 13, 14, 15, 16, 17 }, { 18, 19, 20, 21, 22, 23, 24, 25 } };
+  f3 (c);
+  if (c[0][0] != 12 || c[0][1] != 13 || c[0][2] != 15 || c[0][3] != 16
+      || c[0][4] != 18 || c[0][5] != 19 || c[0][6] != 21 || c[0][7] != 22)
+    __builtin_abort ();
+  v8df d[3] = { { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 },
+		{ 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0 },
+		{ 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0, 25.0 } };
+  f4 (d);
+  if (d[0][0] != 11.0 || d[0][1] != 12.0 || d[0][2] != 16.0 || d[0][3] != 17.0
+      || d[0][4] != 19.0 || d[0][5] != 20.0 || d[0][6] != 22.0 || d[0][7] != 23.0)
+    __builtin_abort ();
+  v8si e[3] = { { 0, 0, 0, 0, 0, 0, 0, 0 }, { 10, 11, 12, 13, 14, 15, 16, 17 }, { 18, 19, 20, 21, 22, 23, 24, 25 } };
+  f5 (e);
+  if (e[0][0] != 12 || e[0][1] != 13 || e[0][2] != 14 || e[0][3] != 15
+      || e[0][4] != 19 || e[0][5] != 20 || e[0][6] != 21 || e[0][7] != 22)
+    __builtin_abort ();
+  v8sf f[3] = { { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f },
+		{ 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f },
+		{ 18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f } };
+  f6 (f);
+  if (f[0][0] != 11.0f || f[0][1] != 12.0f || f[0][2] != 13.0f || f[0][3] != 14.0f
+      || f[0][4] != 22.0f || f[0][5] != 23.0f || f[0][6] != 24.0f || f[0][7] != 25.0f)
+    __builtin_abort ();
+  v16si g[3] = { { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
+		 { 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25 },
+		 { 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41 } };
+  f7 (g);
+  if (g[0][0] != 10 || g[0][1] != 11 || g[0][2] != 12 || g[0][3] != 13
+      || g[0][4] != 11 || g[0][5] != 12 || g[0][6] != 13 || g[0][7] != 14
+      || g[0][8] != 26 || g[0][9] != 27 || g[0][10] != 28 || g[0][11] != 29
+      || g[0][12] != 35 || g[0][13] != 36 || g[0][14] != 37 || g[0][15] != 38)
+    __builtin_abort ();
+  v16sf h[3] = { { 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f,
+		   0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f },
+		 { 10.0f, 11.0f, 12.0f, 13.0f, 14.0f, 15.0f, 16.0f, 17.0f,
+		   18.0f, 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f, 25.0f },
+		 { 26.0f, 27.0f, 28.0f, 29.0f, 30.0f, 31.0f, 32.0f, 33.0f,
+		   34.0f, 35.0f, 36.0f, 37.0f, 38.0f, 39.0f, 40.0f, 41.0f } };
+  f8 (h);
+  if (h[0][0] != 11.0f || h[0][1] != 12.0f || h[0][2] != 13.0f || h[0][3] != 14.0f
+      || h[0][4] != 14.0f || h[0][5] != 15.0f || h[0][6] != 16.0f || h[0][7] != 17.0f
+      || h[0][8] != 27.0f || h[0][9] != 28.0f || h[0][10] != 29.0f || h[0][11] != 30.0f
+      || h[0][12] != 28.0f || h[0][13] != 29.0f || h[0][14] != 30.0f || h[0][15] != 31.0f)
+    __builtin_abort ();
+}
+
+static void
+test_128 (void)
+{
+}
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-28  Jakub Jelinek  <jakub@redhat.com>

	PR target/89073
	* doc/invoke.texi (-mclwb, -mprfchw, -mrdpid, -mrdseed, -msgx,
	-madx, -mhle, -mavx5124fmaps, -mavx512vnni, -mavx5124vnniw): Document
	x86 ISA options.
	(bmi2): Add missing @opindex.
	* doc/extend.texi (x86 target attribute): Move fma4, lwp, ssse3
	options alphabetically.  Add missing 3dnow, 3dnowa, adx, avx, avx2,
	avx5124fmaps, avx5124vnniw, avx512bitalg, avx512bw, avx512cd,
	avx512dq, avx512er, avx512f, avx512ifma, avx512pf, avx512vbmi,
	avx512vbmi2, avx512vl, avx512vnni, avx512vpopcntdq, bmi, bmi2,
	clflushopt, clwb, clzero, crc32, cx16, f16c, fma, fsgsbase,
	fxsr, gfni, hle, lzcnt, movbe, movdir64b, movdiri, mwaitx, pconfig,
	pku, prefetchwt1, prfchw, rdpid, rdrnd, rdseed, rtm, sahf,
	sgx, sha, shstk, tbm, vaes, vpclmulqdq, wbnoinvd, xsave,
	xsavec, xsaveopt and xsaves options.

--- gcc/doc/extend.texi	(revision 268334)
+++ gcc/doc/extend.texi	(revision 268335)
@@ -5642,36 +5642,296 @@ allows specification of target-specific
 
 On the x86, the following options are allowed:
 @table @samp
+@item 3dnow
+@itemx no-3dnow
+@cindex @code{target("3dnow")} function attribute, x86
+Enable/disable the generation of the 3DNow!@: instructions.
+
+@item 3dnowa
+@itemx no-3dnowa
+@cindex @code{target("3dnowa")} function attribute, x86
+Enable/disable the generation of the enhanced 3DNow!@: instructions.
+
 @item abm
 @itemx no-abm
 @cindex @code{target("abm")} function attribute, x86
 Enable/disable the generation of the advanced bit instructions.
 
+@item adx
+@itemx no-adx
+@cindex @code{target("adx")} function attribute, x86
+Enable/disable the generation of the ADX instructions.
+
 @item aes
 @itemx no-aes
 @cindex @code{target("aes")} function attribute, x86
 Enable/disable the generation of the AES instructions.
 
+@item avx
+@itemx no-avx
+@cindex @code{target("avx")} function attribute, x86
+Enable/disable the generation of the AVX instructions.
+
+@item avx2
+@itemx no-avx2
+@cindex @code{target("avx2")} function attribute, x86
+Enable/disable the generation of the AVX2 instructions.
+
+@item avx5124fmaps
+@itemx no-avx5124fmaps
+@cindex @code{target("avx5124fmaps")} function attribute, x86
+Enable/disable the generation of the AVX5124FMAPS instructions.
+
+@item avx5124vnniw
+@itemx no-avx5124vnniw
+@cindex @code{target("avx5124vnniw")} function attribute, x86
+Enable/disable the generation of the AVX5124VNNIW instructions.
+
+@item avx512bitalg
+@itemx no-avx512bitalg
+@cindex @code{target("avx512bitalg")} function attribute, x86
+Enable/disable the generation of the AVX512BITALG instructions.
+
+@item avx512bw
+@itemx no-avx512bw
+@cindex @code{target("avx512bw")} function attribute, x86
+Enable/disable the generation of the AVX512BW instructions.
+
+@item avx512cd
+@itemx no-avx512cd
+@cindex @code{target("avx512cd")} function attribute, x86
+Enable/disable the generation of the AVX512CD instructions.
+
+@item avx512dq
+@itemx no-avx512dq
+@cindex @code{target("avx512dq")} function attribute, x86
+Enable/disable the generation of the AVX512DQ instructions.
+
+@item avx512er
+@itemx no-avx512er
+@cindex @code{target("avx512er")} function attribute, x86
+Enable/disable the generation of the AVX512ER instructions.
+
+@item avx512f
+@itemx no-avx512f
+@cindex @code{target("avx512f")} function attribute, x86
+Enable/disable the generation of the AVX512F instructions.
+
+@item avx512ifma
+@itemx no-avx512ifma
+@cindex @code{target("avx512ifma")} function attribute, x86
+Enable/disable the generation of the AVX512IFMA instructions.
+
+@item avx512pf
+@itemx no-avx512pf
+@cindex @code{target("avx512pf")} function attribute, x86
+Enable/disable the generation of the AVX512PF instructions.
+
+@item avx512vbmi
+@itemx no-avx512vbmi
+@cindex @code{target("avx512vbmi")} function attribute, x86
+Enable/disable the generation of the AVX512VBMI instructions.
+
+@item avx512vbmi2
+@itemx no-avx512vbmi2
+@cindex @code{target("avx512vbmi2")} function attribute, x86
+Enable/disable the generation of the AVX512VBMI2 instructions.
+
+@item avx512vl
+@itemx no-avx512vl
+@cindex @code{target("avx512vl")} function attribute, x86
+Enable/disable the generation of the AVX512VL instructions.
+
+@item avx512vnni
+@itemx no-avx512vnni
+@cindex @code{target("avx512vnni")} function attribute, x86
+Enable/disable the generation of the AVX512VNNI instructions.
+
+@item avx512vpopcntdq
+@itemx no-avx512vpopcntdq
+@cindex @code{target("avx512vpopcntdq")} function attribute, x86
+Enable/disable the generation of the AVX512VPOPCNTDQ instructions.
+
+@item bmi
+@itemx no-bmi
+@cindex @code{target("bmi")} function attribute, x86
+Enable/disable the generation of the BMI instructions.
+
+@item bmi2
+@itemx no-bmi2
+@cindex @code{target("bmi2")} function attribute, x86
+Enable/disable the generation of the BMI2 instructions.
+
+@item clflushopt
+@itemx no-clflushopt
+@cindex @code{target("clflushopt")} function attribute, x86
+Enable/disable the generation of the CLFLUSHOPT instructions.
+
+@item clwb
+@itemx no-clwb
+@cindex @code{target("clwb")} function attribute, x86
+Enable/disable the generation of the CLWB instructions.
+
+@item clzero
+@itemx no-clzero
+@cindex @code{target("clzero")} function attribute, x86
+Enable/disable the generation of the CLZERO instructions.
+
+@item crc32
+@itemx no-crc32
+@cindex @code{target("crc32")} function attribute, x86
+Enable/disable the generation of the CRC32 instructions.
+
+@item cx16
+@itemx no-cx16
+@cindex @code{target("cx16")} function attribute, x86
+Enable/disable the generation of the CMPXCHG16B instructions.
+
 @item default
 @cindex @code{target("default")} function attribute, x86
 @xref{Function Multiversioning}, where it is used to specify the
 default function version.
 
+@item f16c
+@itemx no-f16c
+@cindex @code{target("f16c")} function attribute, x86
+Enable/disable the generation of the F16C instructions.
+
+@item fma
+@itemx no-fma
+@cindex @code{target("fma")} function attribute, x86
+Enable/disable the generation of the FMA instructions.
+
+@item fma4
+@itemx no-fma4
+@cindex @code{target("fma4")} function attribute, x86
+Enable/disable the generation of the FMA4 instructions.
+
+@item fsgsbase
+@itemx no-fsgsbase
+@cindex @code{target("fsgsbase")} function attribute, x86
+Enable/disable the generation of the FSGSBASE instructions.
+
+@item fxsr
+@itemx no-fxsr
+@cindex @code{target("fxsr")} function attribute, x86
+Enable/disable the generation of the FXSR instructions.
+
+@item gfni
+@itemx no-gfni
+@cindex @code{target("gfni")} function attribute, x86
+Enable/disable the generation of the GFNI instructions.
+
+@item hle
+@itemx no-hle
+@cindex @code{target("hle")} function attribute, x86
+Enable/disable the generation of the HLE instruction prefixes.
+
+@item lwp
+@itemx no-lwp
+@cindex @code{target("lwp")} function attribute, x86
+Enable/disable the generation of the LWP instructions.
+
+@item lzcnt
+@itemx no-lzcnt
+@cindex @code{target("lzcnt")} function attribute, x86
+Enable/disable the generation of the LZCNT instructions.
+
 @item mmx
 @itemx no-mmx
 @cindex @code{target("mmx")} function attribute, x86
 Enable/disable the generation of the MMX instructions.
 
+@item movbe
+@itemx no-movbe
+@cindex @code{target("movbe")} function attribute, x86
+Enable/disable the generation of the MOVBE instructions.
+
+@item movdir64b
+@itemx no-movdir64b
+@cindex @code{target("movdir64b")} function attribute, x86
+Enable/disable the generation of the MOVDIR64B instructions.
+
+@item movdiri
+@itemx no-movdiri
+@cindex @code{target("movdiri")} function attribute, x86
+Enable/disable the generation of the MOVDIRI instructions.
+
+@item mwaitx
+@itemx no-mwaitx
+@cindex @code{target("mwaitx")} function attribute, x86
+Enable/disable the generation of the MWAITX instructions.
+
 @item pclmul
 @itemx no-pclmul
 @cindex @code{target("pclmul")} function attribute, x86
 Enable/disable the generation of the PCLMUL instructions.
 
+@item pconfig
+@itemx no-pconfig
+@cindex @code{target("pconfig")} function attribute, x86
+Enable/disable the generation of the PCONFIG instructions.
+
+@item pku
+@itemx no-pku
+@cindex @code{target("pku")} function attribute, x86
+Enable/disable the generation of the PKU instructions.
+
 @item popcnt
 @itemx no-popcnt
 @cindex @code{target("popcnt")} function attribute, x86
 Enable/disable the generation of the POPCNT instruction.
 
+@item prefetchwt1
+@itemx no-prefetchwt1
+@cindex @code{target("prefetchwt1")} function attribute, x86
+Enable/disable the generation of the PREFETCHWT1 instructions.
+
+@item prfchw
+@itemx no-prfchw
+@cindex @code{target("prfchw")} function attribute, x86
+Enable/disable the generation of the PREFETCHW instruction.
+
+@item rdpid
+@itemx no-rdpid
+@cindex @code{target("rdpid")} function attribute, x86
+Enable/disable the generation of the RDPID instructions.
+
+@item rdrnd
+@itemx no-rdrnd
+@cindex @code{target("rdrnd")} function attribute, x86
+Enable/disable the generation of the RDRND instructions.
+
+@item rdseed
+@itemx no-rdseed
+@cindex @code{target("rdseed")} function attribute, x86
+Enable/disable the generation of the RDSEED instructions.
+
+@item rtm
+@itemx no-rtm
+@cindex @code{target("rtm")} function attribute, x86
+Enable/disable the generation of the RTM instructions.
+
+@item sahf
+@itemx no-sahf
+@cindex @code{target("sahf")} function attribute, x86
+Enable/disable the generation of the SAHF instructions.
+
+@item sgx
+@itemx no-sgx
+@cindex @code{target("sgx")} function attribute, x86
+Enable/disable the generation of the SGX instructions.
+
+@item sha
+@itemx no-sha
+@cindex @code{target("sha")} function attribute, x86
+Enable/disable the generation of the SHA instructions.
+
+@item shstk
+@itemx no-shstk
+@cindex @code{target("shstk")} function attribute, x86
+Enable/disable the shadow stack built-in functions from CET.
+
 @item sse
 @itemx no-sse
 @cindex @code{target("sse")} function attribute, x86
@@ -5708,25 +5968,55 @@ Enable/disable the generation of the sse
 @cindex @code{target("sse4a")} function attribute, x86
 Enable/disable the generation of the SSE4A instructions.
 
-@item fma4
-@itemx no-fma4
-@cindex @code{target("fma4")} function attribute, x86
-Enable/disable the generation of the FMA4 instructions.
+@item ssse3
+@itemx no-ssse3
+@cindex @code{target("ssse3")} function attribute, x86
+Enable/disable the generation of the SSSE3 instructions.
+
+@item tbm
+@itemx no-tbm
+@cindex @code{target("tbm")} function attribute, x86
+Enable/disable the generation of the TBM instructions.
+
+@item vaes
+@itemx no-vaes
+@cindex @code{target("vaes")} function attribute, x86
+Enable/disable the generation of the VAES instructions.
+
+@item vpclmulqdq
+@itemx no-vpclmulqdq
+@cindex @code{target("vpclmulqdq")} function attribute, x86
+Enable/disable the generation of the VPCLMULQDQ instructions.
+
+@item wbnoinvd
+@itemx no-wbnoinvd
+@cindex @code{target("wbnoinvd")} function attribute, x86
+Enable/disable the generation of the WBNOINVD instructions.
 
 @item xop
 @itemx no-xop
 @cindex @code{target("xop")} function attribute, x86
 Enable/disable the generation of the XOP instructions.
 
-@item lwp
-@itemx no-lwp
-@cindex @code{target("lwp")} function attribute, x86
-Enable/disable the generation of the LWP instructions.
-
-@item ssse3
-@itemx no-ssse3
-@cindex @code{target("ssse3")} function attribute, x86
-Enable/disable the generation of the SSSE3 instructions.
+@item xsave
+@itemx no-xsave
+@cindex @code{target("xsave")} function attribute, x86
+Enable/disable the generation of the XSAVE instructions.
+
+@item xsavec
+@itemx no-xsavec
+@cindex @code{target("xsavec")} function attribute, x86
+Enable/disable the generation of the XSAVEC instructions.
+
+@item xsaveopt
+@itemx no-xsaveopt
+@cindex @code{target("xsaveopt")} function attribute, x86
+Enable/disable the generation of the XSAVEOPT instructions.
+
+@item xsaves
+@itemx no-xsaves
+@cindex @code{target("xsaves")} function attribute, x86
+Enable/disable the generation of the XSAVES instructions.
 
 @item cld
 @itemx no-cld
--- gcc/doc/invoke.texi	(revision 268334)
+++ gcc/doc/invoke.texi	(revision 268335)
@@ -1257,12 +1257,14 @@ See RS/6000 and PowerPC Options.
 -mavx2  -mavx512f  -mavx512pf  -mavx512er  -mavx512cd  -mavx512vl @gol
 -mavx512bw  -mavx512dq  -mavx512ifma  -mavx512vbmi  -msha  -maes @gol
 -mpclmul  -mfsgsbase  -mrdrnd  -mf16c  -mfma -mpconfig -mwbnoinvd @gol
--mprefetchwt1  -mclflushopt  -mxsavec  -mxsaves @gol
+-mprefetchwt1  -mclflushopt  -mclwb  -mxsavec  -mxsaves @gol
 -msse4a  -m3dnow  -m3dnowa  -mpopcnt  -mabm  -mbmi  -mtbm  -mfma4  -mxop @gol
--mlzcnt  -mbmi2  -mfxsr  -mxsave  -mxsaveopt  -mrtm  -mlwp  -mmpx  @gol
+-madx  -mlzcnt  -mbmi2  -mfxsr  -mxsave  -mxsaveopt  -mrtm  -mlwp  -mmpx  @gol
 -mmwaitx  -mclzero  -mpku  -mthreads -mgfni  -mvaes  @gol
 -mshstk -mforce-indirect-call -mavx512vbmi2 @gol
 -mvpclmulqdq -mavx512bitalg -mmovdiri -mmovdir64b -mavx512vpopcntdq @gol
+-mavx5124fmaps  -mavx512vnni  -mavx5124vnniw  -mprfchw  -mrdpid @gol
+-mrdseed  -msgx @gol
 -mms-bitfields  -mno-align-stringops  -minline-all-stringops @gol
 -minline-stringops-dynamically  -mstringop-strategy=@var{alg} @gol
 -mmemcpy-strategy=@var{strategy}  -mmemset-strategy=@var{strategy} @gol
@@ -27250,6 +27252,9 @@ preferred alignment to @option{-mpreferr
 @itemx -mclflushopt
 @opindex mclflushopt
 @need 200
+@itemx -mclwb
+@opindex mclwb
+@need 200
 @itemx -mfsgsbase
 @opindex mfsgsbase
 @need 200
@@ -27271,9 +27276,21 @@ preferred alignment to @option{-mpreferr
 @itemx -mfma4
 @opindex mfma4
 @need 200
+@itemx -mprfchw
+@opindex mprfchw
+@need 200
+@itemx -mrdpid
+@opindex mrdpid
+@need 200
 @itemx -mprefetchwt1
 @opindex mprefetchwt1
 @need 200
+@itemx -mrdseed
+@opindex mrdseed
+@need 200
+@itemx -msgx
+@opindex msgx
+@need 200
 @itemx -mxop
 @opindex mxop
 @need 200
@@ -27292,10 +27309,14 @@ preferred alignment to @option{-mpreferr
 @itemx -mabm
 @opindex mabm
 @need 200
+@itemx -madx
+@opindex madx
+@need 200
 @itemx -mbmi
 @opindex mbmi
 @need 200
 @itemx -mbmi2
+@opindex mbmi2
 @need 200
 @itemx -mlzcnt
 @opindex mlzcnt
@@ -27318,6 +27339,9 @@ preferred alignment to @option{-mpreferr
 @itemx -mrtm
 @opindex mrtm
 @need 200
+@itemx -mhle
+@opindex mhle
+@need 200
 @itemx -mtbm
 @opindex mtbm
 @need 200
@@ -27356,15 +27380,26 @@ preferred alignment to @option{-mpreferr
 @need 200
 @itemx -mavx512vpopcntdq
 @opindex mavx512vpopcntdq
+@need 200
+@itemx -mavx5124fmaps
+@opindex mavx5124fmaps
+@need 200
+@itemx -mavx512vnni
+@opindex mavx512vnni
+@need 200
+@itemx -mavx5124vnniw
+@opindex mavx5124vnniw
 These switches enable the use of instructions in the MMX, SSE,
-SSE2, SSE3, SSSE3, SSE4.1, AVX, AVX2, AVX512F, AVX512PF, AVX512ER, AVX512CD,
-SHA, AES, PCLMUL, FSGSBASE, RDRND, F16C, FMA, SSE4A, FMA4, XOP, LWP, ABM,
-AVX512VL, AVX512BW, AVX512DQ, AVX512IFMA, AVX512VBMI, BMI, BMI2, VAES,
-FXSR, XSAVE, XSAVEOPT, LZCNT, RTM, MPX, MWAITX, PKU, IBT, SHSTK, AVX512VBMI2,
-GFNI, VPCLMULQDQ, AVX512BITALG, MOVDIRI, MOVDIR64B,
-AVX512VPOPCNTDQ3DNow!@: or enhanced 3DNow!@: extended instruction sets.
-Each has a corresponding @option{-mno-} option to disable use of these
-instructions.
+SSE2, SSE3, SSSE3, SSE4, SSE4A, SSE4.1, SSE4.2, AVX, AVX2, AVX512F, AVX512PF,
+AVX512ER, AVX512CD, AVX512VL, AVX512BW, AVX512DQ, AVX512IFMA, AVX512VBMI, SHA,
+AES, PCLMUL, CLFLUSHOPT, CLWB, FSGSBASE, RDRND, F16C, FMA, PCONFIG,
+WBNOINVD, FMA4, PREFETCHW, RDPID, PREFETCHWT1, RDSEED, SGX, XOP, LWP,
+3DNow!@:, enhanced 3DNow!@:, POPCNT, ABM, ADX, BMI, BMI2, LZCNT, FXSR, XSAVE,
+XSAVEOPT, XSAVEC, XSAVES, RTM, HLE, TBM, MPX, MWAITX, CLZERO, PKU, AVX512VBMI2,
+GFNI, VAES, VPCLMULQDQ, AVX512BITALG, MOVDIRI, MOVDIR64B,
+AVX512VPOPCNTDQ, AVX5124FMAPS, AVX512VNNI, or AVX5124VNNIW
+extended instruction sets.  Each has a corresponding @option{-mno-} option to
+disable use of these instructions.
 
 These extensions are also available as built-in functions: see
 @ref{x86 Built-in Functions}, for details of the functions enabled and
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-28  Jakub Jelinek  <jakub@redhat.com>

	PR middle-end/89002
	* gimplify.c (gimplify_omp_for): When adding OMP_CLAUSE_*_GIMPLE_SEQ
	for lastprivate/linear IV, push gimplify context around gimplify_assign
	and, if it needed any temporaries, pop it into a gimple bind around the
	sequence.

	* testsuite/libgomp.c/pr89002.c: New test.

--- gcc/gimplify.c	(revision 268344)
+++ gcc/gimplify.c	(revision 268345)
@@ -11167,8 +11167,17 @@ gimplify_omp_for (tree *expr_p, gimple_s
 		  seq = &OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c);
 		else
 		  seq = &OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c);
+		push_gimplify_context ();
 		gimplify_assign (decl, t, seq);
-	    }
+		gimple *bind = NULL;
+		if (gimplify_ctxp->temps)
+		  {
+		    bind = gimple_build_bind (NULL_TREE, *seq, NULL_TREE);
+		    *seq = NULL;
+		    gimplify_seq_add_stmt (seq, bind);
+		  }
+		pop_gimplify_context (bind);
+	      }
 	}
     }
 
--- libgomp/testsuite/libgomp.c/pr89002.c	(nonexistent)
+++ libgomp/testsuite/libgomp.c/pr89002.c	(revision 268345)
@@ -0,0 +1,43 @@
+/* PR middle-end/89002 */
+
+extern void abort (void);
+
+int
+foo (int x)
+{
+  int a;
+  int *p = &a;
+
+#pragma omp taskloop lastprivate (a)
+  for (a = 0; a < x; ++a)
+    ;
+  return *p;
+}
+
+int
+bar (int x)
+{
+  int a;
+  int *p = &a;
+
+#pragma omp parallel
+#pragma omp single
+#pragma omp taskloop lastprivate (a)
+  for (a = 0; a < x; ++a)
+    ;
+  return *p;
+}
+
+int
+main ()
+{
+#pragma omp parallel
+#pragma omp single
+  {
+    if (foo (4) != 4)
+      abort ();
+  }
+  if (bar (6) != 6)
+    abort ();
+  return 0;
+}
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-29  Jakub Jelinek  <jakub@redhat.com>

	PR c++/66676
	PR ipa/89104
	* omp-simd-clone.c (simd_clone_clauses_extract)
	<case OMP_CLAUSE_ALIGNED>: Ignore clauses with NULL
	OMP_CLAUSE_ALIGNED_ALIGNMENT.

	* gcc.dg/gomp/pr89104.c: New test.

--- gcc/omp-simd-clone.c	(revision 268369)
+++ gcc/omp-simd-clone.c	(revision 268370)
@@ -242,6 +242,10 @@ simd_clone_clauses_extract (struct cgrap
 	  }
 	case OMP_CLAUSE_ALIGNED:
 	  {
+	    /* Ignore aligned (x) for declare simd, for the ABI we really
+	       need an alignment specified.  */
+	    if (OMP_CLAUSE_ALIGNED_ALIGNMENT (t) == NULL_TREE)
+	      break;
 	    tree decl = OMP_CLAUSE_DECL (t);
 	    int argno = tree_to_uhwi (decl);
 	    clone_info->args[argno].alignment
--- gcc/testsuite/gcc.dg/gomp/pr89104.c	(nonexistent)
+++ gcc/testsuite/gcc.dg/gomp/pr89104.c	(revision 268370)
@@ -0,0 +1,11 @@
+/* PR c++/66676 */
+/* PR ipa/89104 */
+/* { dg-do compile } */
+/* { dg-options "-O2 -fopenmp-simd" } */
+
+#pragma omp declare simd uniform (x) aligned (x)
+int
+foo (int *x, int y)
+{
+  return x[y];
+}
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-30  Jakub Jelinek  <jakub@redhat.com>

	PR c++/89105
	* config/i386/i386.c (ix86_warn_parameter_passing_abi): Don't warn
	for arguments to functions that are TU-local and shouldn't be
	referenced by assembly.

	* g++.dg/abi/pr89105.C: New test.

--- gcc/config/i386/i386.c	(revision 268381)
+++ gcc/config/i386/i386.c	(revision 268382)
@@ -29562,6 +29562,10 @@ ix86_warn_parameter_passing_abi (cumulat
   if (!TYPE_EMPTY_P (type))
     return;
 
+  /* Don't warn if the function isn't visible outside of the TU.  */
+  if (cum->decl && !TREE_PUBLIC (cum->decl))
+    return;
+
   const_tree ctx = get_ultimate_context (cum->decl);
   if (ctx != NULL_TREE
       && !TRANSLATION_UNIT_WARN_EMPTY_P (ctx))
--- gcc/testsuite/g++.dg/abi/pr89105.C	(nonexistent)
+++ gcc/testsuite/g++.dg/abi/pr89105.C	(revision 268382)
@@ -0,0 +1,16 @@
+// PR c++/89105
+// { dg-do compile { target { { i?86-*-* x86_64-*-* } && { c++11 } } } }
+// { dg-options "-fabi-version=12 -Wabi=11" }
+
+namespace {
+  template<typename F>
+    void run(F f, int i)	// { dg-bogus "parameter passing ABI changes in -fabi-version=12" }
+    {
+      f(i);
+    }
+}
+
+void f()
+{
+  run([](int) { }, 1);		// { dg-bogus "parameter passing ABI changes in -fabi-version=12" }
+}
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-30  Jakub Jelinek  <jakub@redhat.com>

	PR c++/88988
	* lambda.c (is_capture_proxy): Don't return true for
	DECL_OMP_PRIVATIZED_MEMBER artificial vars.

	* testsuite/libgomp.c++/pr88988.C: New test.

--- gcc/cp/lambda.c	(revision 268406)
+++ gcc/cp/lambda.c	(revision 268407)
@@ -262,6 +262,9 @@ is_capture_proxy (tree decl)
 	  && DECL_HAS_VALUE_EXPR_P (decl)
 	  && !DECL_ANON_UNION_VAR_P (decl)
 	  && !DECL_DECOMPOSITION_P (decl)
+	  && !(DECL_ARTIFICIAL (decl)
+	       && DECL_LANG_SPECIFIC (decl)
+	       && DECL_OMP_PRIVATIZED_MEMBER (decl))
 	  && LAMBDA_FUNCTION_P (DECL_CONTEXT (decl)));
 }
 
--- libgomp/testsuite/libgomp.c++/pr88988.C	(nonexistent)
+++ libgomp/testsuite/libgomp.c++/pr88988.C	(revision 268407)
@@ -0,0 +1,28 @@
+// PR c++/88988
+// { dg-do compile }
+// { dg-additional-options "-std=c++14" }
+
+extern "C" void abort ();
+
+template <typename T>
+struct A {
+  A () : a(), b()
+  {
+    [&] ()
+    {
+#pragma omp task firstprivate (a) shared (b)
+      b = ++a;
+#pragma omp taskwait
+    } ();
+  }
+
+  T a, b;
+};
+
+int
+main ()
+{
+  A<int> x;
+  if (x.a != 0 || x.b != 1)
+    abort ();
+}
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-01-31  Jakub Jelinek  <jakub@redhat.com>

	PR sanitizer/89124
	* ipa-inline.c (sanitize_attrs_match_for_inline_p): Allow inlining
	always_inline callees into no_sanitize_address callers.

	* c-c++-common/asan/pr89124.c: New test.

--- gcc/ipa-inline.c	(revision 268414)
+++ gcc/ipa-inline.c	(revision 268415)
@@ -264,6 +264,12 @@ sanitize_attrs_match_for_inline_p (const
   if (!caller || !callee)
     return true;
 
+  /* Allow inlining always_inline functions into no_sanitize_address
+     functions.  */
+  if (!sanitize_flags_p (SANITIZE_ADDRESS, caller)
+      && lookup_attribute ("always_inline", DECL_ATTRIBUTES (callee)))
+    return true;
+
   return ((sanitize_flags_p (SANITIZE_ADDRESS, caller)
 	   == sanitize_flags_p (SANITIZE_ADDRESS, callee))
 	  && (sanitize_flags_p (SANITIZE_POINTER_COMPARE, caller)
--- gcc/testsuite/c-c++-common/asan/pr89124.c	(nonexistent)
+++ gcc/testsuite/c-c++-common/asan/pr89124.c	(revision 268415)
@@ -0,0 +1,14 @@
+/* PR sanitizer/89124 */
+/* { dg-do compile } */
+
+static int inline __attribute__ ((always_inline))
+foo (int x)
+{
+  return x + 1;
+}
+
+__attribute__ ((no_sanitize_address)) int
+bar (int x)
+{
+  return foo (x);
+}
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-02-01  Jakub Jelinek  <jakub@redhat.com>

	PR tree-optimization/88107
	* tree-cfg.c (find_outermost_region_in_block): Add ALL argument,
	instead of assertion that eh_region_outermost is non-NULL, if it
	is NULL, set *ALL to true and return NULL.
	(move_sese_region_to_fn): Adjust caller, if all is set, call
	duplicate_eh_regions with NULL region.

	* gcc.dg/gomp/pr88107.c: New test.

--- gcc/tree-cfg.c	(revision 268443)
+++ gcc/tree-cfg.c	(revision 268444)
@@ -7143,11 +7143,14 @@ move_block_to_fn (struct function *dest_
 }
 
 /* Examine the statements in BB (which is in SRC_CFUN); find and return
-   the outermost EH region.  Use REGION as the incoming base EH region.  */
+   the outermost EH region.  Use REGION as the incoming base EH region.
+   If there is no single outermost region, return NULL and set *ALL to
+   true.  */
 
 static eh_region
 find_outermost_region_in_block (struct function *src_cfun,
-				basic_block bb, eh_region region)
+				basic_block bb, eh_region region,
+				bool *all)
 {
   gimple_stmt_iterator si;
 
@@ -7166,7 +7169,11 @@ find_outermost_region_in_block (struct f
 	  else if (stmt_region != region)
 	    {
 	      region = eh_region_outermost (src_cfun, stmt_region, region);
-	      gcc_assert (region != NULL);
+	      if (region == NULL)
+		{
+		  *all = true;
+		  return NULL;
+		}
 	    }
 	}
     }
@@ -7501,12 +7508,17 @@ move_sese_region_to_fn (struct function
   if (saved_cfun->eh)
     {
       eh_region region = NULL;
+      bool all = false;
 
       FOR_EACH_VEC_ELT (bbs, i, bb)
-	region = find_outermost_region_in_block (saved_cfun, bb, region);
+	{
+	  region = find_outermost_region_in_block (saved_cfun, bb, region, &all);
+	  if (all)
+	    break;
+	}
 
       init_eh_for_function ();
-      if (region != NULL)
+      if (region != NULL || all)
 	{
 	  new_label_map = htab_create (17, tree_map_hash, tree_map_eq, free);
 	  eh_map = duplicate_eh_regions (saved_cfun, region, 0,
--- gcc/testsuite/gcc.dg/gomp/pr88107.c	(nonexistent)
+++ gcc/testsuite/gcc.dg/gomp/pr88107.c	(revision 268444)
@@ -0,0 +1,35 @@
+/* PR tree-optimization/88107 */
+/* { dg-do compile { target fgraphite } } */
+/* { dg-require-effective-target vect_simd_clones } */
+/* { dg-options "-O2 -fexceptions -floop-nest-optimize -fnon-call-exceptions -fopenmp-simd -ftree-parallelize-loops=2" } */
+
+#define N 1024
+int a[N], b[N];
+long int c[N];
+unsigned char d[N];
+
+#pragma omp declare simd notinbranch
+__attribute__((noinline)) static int
+foo (long int a, int b, int c)
+{
+  return a + b + c;
+}
+
+#pragma omp declare simd notinbranch
+__attribute__((noinline)) static long int
+bar (int a, int b, long int c)
+{
+  return a + b + c;
+}
+
+void
+baz (void)
+{
+  int i;
+  #pragma omp simd
+  for (i = 0; i < N; i++)
+    a[i] = foo (c[i], a[i], b[i]) + 6;
+  #pragma omp simd
+  for (i = 0; i < N; i++)
+    c[i] = bar (a[i], b[i], c[i]) * 2;
+}
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-02-01  Jakub Jelinek  <jakub@redhat.com>

	PR fortran/83246
	PR fortran/89084
	* trans-decl.c (generate_local_decl): Add referenced FL_PARAMETERs
	if sym->ns->construct_entities rather than if
	sym->ns->parent->code->op == EXEC_BLOCK.

	* gfortran.dg/pr89084.f90: New test.
	* gfortran.dg/lto/pr89084_0.f90: New test.
	* gfortran.dg/pr83246.f90: New test.

--- gcc/fortran/trans-decl.c	(revision 268461)
+++ gcc/fortran/trans-decl.c	(revision 268462)
@@ -5735,10 +5735,7 @@ generate_local_decl (gfc_symbol * sym)
 			  "imported at %L", sym->name, &sym->declared_at);
 	}
 
-      if (sym->ns
-	  && sym->ns->parent
-	  && sym->ns->parent->code
-	  && sym->ns->parent->code->op == EXEC_BLOCK)
+      if (sym->ns && sym->ns->construct_entities)
 	{
 	  if (sym->attr.referenced)
 	    gfc_get_symbol_decl (sym);
--- gcc/testsuite/gfortran.dg/pr83246.f90	(nonexistent)
+++ gcc/testsuite/gfortran.dg/pr83246.f90	(revision 268462)
@@ -0,0 +1,9 @@
+! PR fortran/83246
+! { dg-do link }
+   program dusty_corner 
+   write(*,*)'BLOCK TESTS' 
+   MAKEDATAP: block
+   integer,parameter :: scratch(*)=[1,2,3]
+   write(*,*)scratch
+   endblock MAKEDATAP
+   end program dusty_corner
--- gcc/testsuite/gfortran.dg/lto/pr89084_0.f90	(nonexistent)
+++ gcc/testsuite/gfortran.dg/lto/pr89084_0.f90	(revision 268462)
@@ -0,0 +1,24 @@
+! PR fortran/89084
+! { dg-lto-do link }
+! { dg-lto-options {{ -O0 -flto }} }
+
+integer function foo ()
+  write (*,*) 'foo'
+  block
+    integer, parameter :: idxs(3) = (/ 1, 2, 3 /)
+    integer :: i
+    foo = 0
+    do i = 1, size(idxs)
+      foo = foo + idxs(i)
+    enddo
+  end block
+end function foo
+program pr89084
+  integer :: i
+  interface
+    integer function foo ()
+    end function
+  end interface
+  i = foo ()
+  if (i.ne.6) stop 1
+end
--- gcc/testsuite/gfortran.dg/pr89084.f90	(nonexistent)
+++ gcc/testsuite/gfortran.dg/pr89084.f90	(revision 268462)
@@ -0,0 +1,23 @@
+! PR fortran/89084
+! { dg-do run }
+
+integer function foo ()
+  write (*,*) 'foo'
+  block
+    integer, parameter :: idxs(3) = (/ 1, 2, 3 /)
+    integer :: i
+    foo = 0
+    do i = 1, size(idxs)
+      foo = foo + idxs(i)
+    enddo
+  end block
+end function foo
+program pr89084
+  integer :: i
+  interface
+    integer function foo ()
+    end function
+  end interface
+  i = foo ()
+  if (i.ne.6) stop 1
+end
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-02-02  Jakub Jelinek  <jakub@redhat.com>

	PR middle-end/87887
	* config/i386/i386.c (ix86_simd_clone_compute_vecsize_and_simdlen):
	Punt with warning on aggregate return or argument types.  Ignore
	type/mode checking for uniform arguments.

	* gcc.dg/gomp/pr87887-1.c: New test.
	* gcc.dg/gomp/pr87887-2.c: New test.

--- gcc/config/i386/i386.c	(revision 268465)
+++ gcc/config/i386/i386.c	(revision 268466)
@@ -50433,7 +50433,9 @@ ix86_simd_clone_compute_vecsize_and_simd
       case E_DFmode:
       /* case E_SCmode: */
       /* case E_DCmode: */
-	break;
+	if (!AGGREGATE_TYPE_P (ret_type))
+	  break;
+	/* FALLTHRU */
       default:
 	warning_at (DECL_SOURCE_LOCATION (node->decl), 0,
 		    "unsupported return type %qT for simd", ret_type);
@@ -50444,7 +50446,6 @@ ix86_simd_clone_compute_vecsize_and_simd
   int i;
 
   for (t = DECL_ARGUMENTS (node->decl), i = 0; t; t = DECL_CHAIN (t), i++)
-    /* FIXME: Shouldn't we allow such arguments if they are uniform?  */
     switch (TYPE_MODE (TREE_TYPE (t)))
       {
       case E_QImode:
@@ -50455,8 +50456,12 @@ ix86_simd_clone_compute_vecsize_and_simd
       case E_DFmode:
       /* case E_SCmode: */
       /* case E_DCmode: */
-	break;
+	if (!AGGREGATE_TYPE_P (TREE_TYPE (t)))
+	  break;
+	/* FALLTHRU */
       default:
+	if (clonei->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM)
+	  break;
 	warning_at (DECL_SOURCE_LOCATION (node->decl), 0,
 		    "unsupported argument type %qT for simd", TREE_TYPE (t));
 	return 0;
--- gcc/testsuite/gcc.dg/gomp/pr87887-1.c	(nonexistent)
+++ gcc/testsuite/gcc.dg/gomp/pr87887-1.c	(revision 268466)
@@ -0,0 +1,26 @@
+/* PR middle-end/87887 */
+/* { dg-do compile } */
+/* { dg-require-effective-target vect_simd_clones } */
+/* { dg-additional-options "-w" } */
+
+struct S { int n; };
+#pragma omp declare simd
+struct S
+foo (int x)
+{
+  return (struct S) { x };
+}
+
+#pragma omp declare simd
+int
+bar (struct S x)
+{
+  return x.n;
+}
+
+#pragma omp declare simd uniform (x)
+int
+baz (int w, struct S x, int y)
+{
+  return w + x.n + y;
+}
--- gcc/testsuite/gcc.dg/gomp/pr87887-2.c	(nonexistent)
+++ gcc/testsuite/gcc.dg/gomp/pr87887-2.c	(revision 268466)
@@ -0,0 +1,25 @@
+/* PR middle-end/87887 */
+/* { dg-do compile { target i?86-*-* x86_64-*-* } } */
+/* { dg-require-effective-target vect_simd_clones } */
+
+struct S { int n; };
+#pragma omp declare simd
+struct S
+foo (int x)		/* { dg-warning "unsupported return type 'struct S' for simd" } */
+{
+  return (struct S) { x };
+}
+
+#pragma omp declare simd
+int
+bar (struct S x)	/* { dg-warning "unsupported argument type 'struct S' for simd" } */
+{
+  return x.n;
+}
+
+#pragma omp declare simd uniform (x)
+int
+baz (int w, struct S x, int y)
+{
+  return w + x.n + y;
+}
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-02-05  Jakub Jelinek  <jakub@redhat.com>

	PR target/89186
	* optabs.c (prepare_cmp_insn): Pass x and y to
	emit_block_comp_via_libcall rather than XEXP (x, 0) and XEXP (y, 0).

	* g++.dg/ext/vector36.C: New test.

--- gcc/optabs.c	(revision 268530)
+++ gcc/optabs.c	(revision 268531)
@@ -3917,7 +3917,7 @@ prepare_cmp_insn (rtx x, rtx y, enum rtx
 	goto fail;
 
       /* Otherwise call a library function.  */
-      result = emit_block_comp_via_libcall (XEXP (x, 0), XEXP (y, 0), size);
+      result = emit_block_comp_via_libcall (x, y, size);
 
       x = result;
       y = const0_rtx;
--- gcc/testsuite/g++.dg/ext/vector36.C	(nonexistent)
+++ gcc/testsuite/g++.dg/ext/vector36.C	(revision 268531)
@@ -0,0 +1,6 @@
+// PR target/89186
+// { dg-do compile }
+// { dg-options "-fnon-call-exceptions" }
+// { dg-additional-options "-mno-sse" { target i?86-*-* x86_64-*-* } }
+
+#include "vector27.C"
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-02-05  Jakub Jelinek  <jakub@redhat.com>

	PR rtl-optimization/89195
	* combine.c (make_extraction): For MEMs, don't extract bytes outside
	of the original MEM.

	* gcc.c-torture/execute/pr89195.c: New test.

--- gcc/combine.c	(revision 268541)
+++ gcc/combine.c	(revision 268542)
@@ -7687,6 +7687,7 @@ make_extraction (machine_mode mode, rtx
 	      /* We can't do this if we are widening INNER_MODE (it
 		 may not be aligned, for one thing).  */
 	      && !paradoxical_subreg_p (tmode, inner_mode)
+	      && known_le (pos + len, GET_MODE_PRECISION (is_mode))
 	      && (inner_mode == tmode
 		  || (! mode_dependent_address_p (XEXP (inner, 0),
 						  MEM_ADDR_SPACE (inner))
--- gcc/testsuite/gcc.c-torture/execute/pr89195.c	(nonexistent)
+++ gcc/testsuite/gcc.c-torture/execute/pr89195.c	(revision 268542)
@@ -0,0 +1,22 @@
+/* PR rtl-optimization/89195 */
+/* { dg-require-effective-target int32plus } */
+
+struct S { unsigned i : 24; };
+
+volatile unsigned char x;
+
+__attribute__((noipa)) int
+foo (struct S d) 
+{
+  return d.i & x;
+}
+
+int
+main ()
+{
+  struct S d = { 0x123456 };
+  x = 0x75;
+  if (foo (d) != (0x56 & 0x75))
+    __builtin_abort ();
+  return 0;
+}
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-02-05  Jakub Jelinek  <jakub@redhat.com>

	PR target/89188
	* dce.c (delete_unmarked_insns): Don't remove no-op moves if they
	can throw, non-call exceptions are enabled and we can't delete
	dead exceptions or alter cfg.  Set must_clean if
	delete_insn_and_edges returns true, don't set it blindly for calls.

	* g++.dg/opt/pr89188.C: New test.

--- gcc/dce.c	(revision 268543)
+++ gcc/dce.c	(revision 268544)
@@ -584,7 +584,12 @@ delete_unmarked_insns (void)
 	  rtx turn_into_use = NULL_RTX;
 
 	  /* Always delete no-op moves.  */
-	  if (noop_move_p (insn))
+	  if (noop_move_p (insn)
+	      /* Unless the no-op move can throw and we are not allowed
+		 to alter cfg.  */
+	      && (!cfun->can_throw_non_call_exceptions
+		  || (cfun->can_delete_dead_exceptions && can_alter_cfg)
+		  || insn_nothrow_p (insn)))
 	    {
 	      if (RTX_FRAME_RELATED_P (insn))
 		turn_into_use
@@ -627,12 +632,6 @@ delete_unmarked_insns (void)
 	     for the destination regs in order to avoid dangling notes.  */
 	  remove_reg_equal_equiv_notes_for_defs (insn);
 
-	  /* If a pure or const call is deleted, this may make the cfg
-	     have unreachable blocks.  We rememeber this and call
-	     delete_unreachable_blocks at the end.  */
-	  if (CALL_P (insn))
-	    must_clean = true;
-
 	  if (turn_into_use)
 	    {
 	      /* Don't remove frame related noop moves if they cary
@@ -645,7 +644,7 @@ delete_unmarked_insns (void)
 	    }
 	  else
 	    /* Now delete the insn.  */
-	    delete_insn_and_edges (insn);
+	    must_clean |= delete_insn_and_edges (insn);
 	}
 
   /* Deleted a pure or const call.  */
--- gcc/testsuite/g++.dg/opt/pr89188.C	(nonexistent)
+++ gcc/testsuite/g++.dg/opt/pr89188.C	(revision 268544)
@@ -0,0 +1,13 @@
+// PR target/89188
+// { dg-do compile { target c++11 } }
+// { dg-options "-Og -flive-range-shrinkage -fnon-call-exceptions" }
+
+struct Ax {
+  int n, a[];
+};
+
+int i = 12345678;
+int main() {
+  static Ax s{456, i};
+  ((s.a[0]) ? (void)0 : (void)0);
+}
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-02-05  Jakub Jelinek  <jakub@redhat.com>

	PR rtl-optimization/11304
	* gcc.target/i386/call-1.c (set_eax): Add "eax" clobber.
	* gcc.target/i386/call-2.c: New test.

--- gcc/testsuite/gcc.target/i386/call-1.c	(revision 268561)
+++ gcc/testsuite/gcc.target/i386/call-1.c	(revision 268562)
@@ -11,7 +11,7 @@ volatile int r;
 
 void set_eax(int val)
 {
-  __asm__ __volatile__ ("mov %0, %%eax" : : "m" (val));
+  __asm__ __volatile__ ("mov %0, %%eax" : : "m" (val) : "eax");
 }
 
 void foo(int val)
--- gcc/testsuite/gcc.target/i386/call-2.c	(nonexistent)
+++ gcc/testsuite/gcc.target/i386/call-2.c	(revision 268562)
@@ -0,0 +1,12 @@
+/* PR optimization/11304 */
+/* Originator: <manuel.serrano@sophia.inria.fr> */
+/* { dg-do run } */
+/* { dg-options "-O -fomit-frame-pointer" } */
+
+/* Verify that %eax is always restored after a call.  */
+
+__attribute__((noipa)) void set_eax(int val);
+__attribute__((noipa)) void foo(int val);
+__attribute__((noipa)) int bar(int x);
+
+#include "call-1.c"
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-02-05  Jakub Jelinek  <jakub@redhat.com>

	PR c++/89187
	* optimize.c (maybe_thunk_body): Clear TREE_ADDRESSABLE on
	PARM_DECLs of the thunk.
	* lambda.c (maybe_add_lambda_conv_op): Likewise.

	* g++.dg/opt/pr89187.C: New test.

--- gcc/cp/optimize.c	(revision 268563)
+++ gcc/cp/optimize.c	(revision 268564)
@@ -417,6 +417,8 @@ maybe_thunk_body (tree fn, bool force)
 		  gcc_assert (clone_parm);
 		  DECL_ABSTRACT_ORIGIN (clone_parm) = NULL;
 		  args[parmno] = clone_parm;
+		  /* Clear TREE_ADDRESSABLE on thunk arguments.  */
+		  TREE_ADDRESSABLE (clone_parm) = 0;
 		  clone_parm = TREE_CHAIN (clone_parm);
 		}
 	      if (fn_parm_typelist)
--- gcc/cp/lambda.c	(revision 268563)
+++ gcc/cp/lambda.c	(revision 268564)
@@ -1130,6 +1130,9 @@ maybe_add_lambda_conv_op (tree type)
       {
 	tree new_node = copy_node (src);
 
+	/* Clear TREE_ADDRESSABLE on thunk arguments.  */
+	TREE_ADDRESSABLE (new_node) = 0;
+
 	if (!fn_args)
 	  fn_args = tgt = new_node;
 	else
--- gcc/testsuite/g++.dg/opt/pr89187.C	(nonexistent)
+++ gcc/testsuite/g++.dg/opt/pr89187.C	(revision 268564)
@@ -0,0 +1,23 @@
+// PR c++/89187
+// { dg-do compile { target c++11 } }
+// { dg-options "-Os -fno-tree-ccp -fno-tree-sra -fno-inline" }
+
+template <typename T, int N> struct A {
+  typedef T __attribute__((vector_size (N))) type;
+};
+template <typename T, int N> using B = typename A<T, N>::type;
+template <typename T> using C = B<T, 4>;
+struct D {
+  D (C<int> x) : d{x[3]} {}
+  D foo () { return d; }
+  C<int> d;
+};
+extern D d;
+struct { D bar () { return d; } } l;
+struct E { void baz () const; };
+
+void
+E::baz () const
+{
+  l.bar ().foo ();
+}
2019-02-07  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-02-06  Jakub Jelinek  <jakub@redhat.com>

	PR c/89211
	* c-parser.c (c_parser_declaration_or_fndef): Don't update
	DECL_ARGUMENTS of d if it has been defined already.  Use a single if
	instead of 3 nested ifs.

	* gcc.dg/pr89211.c: New test.

--- gcc/c/c-parser.c	(revision 268573)
+++ gcc/c/c-parser.c	(revision 268574)
@@ -2154,10 +2154,12 @@ c_parser_declaration_or_fndef (c_parser
 	      tree d = start_decl (declarator, specs, false,
 				   chainon (postfix_attrs,
 					    all_prefix_attrs));
-	      if (d && TREE_CODE (d) == FUNCTION_DECL)
-		if (declarator->kind == cdk_function)
-		  if (DECL_ARGUMENTS (d) == NULL_TREE)
-		    DECL_ARGUMENTS (d) = declarator->u.arg_info->parms;
+	      if (d
+		  && TREE_CODE (d) == FUNCTION_DECL
+		  && declarator->kind == cdk_function
+		  && DECL_ARGUMENTS (d) == NULL_TREE
+		  && DECL_INITIAL (d) == NULL_TREE)
+		DECL_ARGUMENTS (d) = declarator->u.arg_info->parms;
 	      if (omp_declare_simd_clauses.exists ())
 		{
 		  tree parms = NULL_TREE;
--- gcc/testsuite/gcc.dg/pr89211.c	(nonexistent)
+++ gcc/testsuite/gcc.dg/pr89211.c	(revision 268574)
@@ -0,0 +1,8 @@
+/* PR c/89211 */
+/* { dg-do compile } */
+
+void foo ();
+void foo ()
+{
+  void foo (struct S);	/* { dg-warning "declared inside parameter list" } */
+}

Comments

Jakub Jelinek Feb. 9, 2019, 8:52 a.m. UTC | #1
On Thu, Feb 07, 2019 at 04:02:54PM +0100, Jakub Jelinek wrote:
> Another month have passed since my last 8.x backporting effort,
> thus I've backported following 32 patches from trunk to 8.x,
> bootstrapped/regtested on x86_64-linux and i686-linux and committed.

And two further ones now, bootstrapped/regtested on powerpc64{,le}-linux
and committed.

	Jakub
2019-02-09  Jakub Jelinek  <jakub@redhat.com>

	PR middle-end/89243
	* g++.dg/opt/pr89188.C: Include ../torture/pr88861.C.

	Backported from mainline
	2019-01-16  David Malcolm  <dmalcolm@redhat.com>

	PR target/88861
	* combine.c (delete_noop_moves): Convert to "bool" return,
	returning true if any edges are eliminated.
	(combine_instructions): Also return true if delete_noop_moves
	returns true.

	* g++.dg/torture/pr88861.C: New test.

--- gcc/combine.c	(revision 267983)
+++ gcc/combine.c	(revision 267984)
@@ -983,14 +983,17 @@ combine_validate_cost (rtx_insn *i0, rtx
 }
 
 
-/* Delete any insns that copy a register to itself.  */
+/* Delete any insns that copy a register to itself.
+   Return true if the CFG was changed.  */
 
-static void
+static bool
 delete_noop_moves (void)
 {
   rtx_insn *insn, *next;
   basic_block bb;
 
+  bool edges_deleted = false;
+
   FOR_EACH_BB_FN (bb, cfun)
     {
       for (insn = BB_HEAD (bb); insn != NEXT_INSN (BB_END (bb)); insn = next)
@@ -1001,10 +1004,12 @@ delete_noop_moves (void)
 	      if (dump_file)
 		fprintf (dump_file, "deleting noop move %d\n", INSN_UID (insn));
 
-	      delete_insn_and_edges (insn);
+	      edges_deleted |= delete_insn_and_edges (insn);
 	    }
 	}
     }
+
+  return edges_deleted;
 }
 
 
@@ -1143,8 +1148,8 @@ insn_a_feeds_b (rtx_insn *a, rtx_insn *b
 /* Main entry point for combiner.  F is the first insn of the function.
    NREGS is the first unused pseudo-reg number.
 
-   Return nonzero if the combiner has turned an indirect jump
-   instruction into a direct jump.  */
+   Return nonzero if the CFG was changed (e.g. if the combiner has
+   turned an indirect jump instruction into a direct jump).  */
 static int
 combine_instructions (rtx_insn *f, unsigned int nregs)
 {
@@ -1529,7 +1534,7 @@ retry:
   default_rtl_profile ();
   clear_bb_flags ();
   new_direct_jump_p |= purge_all_dead_edges ();
-  delete_noop_moves ();
+  new_direct_jump_p |= delete_noop_moves ();
 
   /* Clean up.  */
   obstack_free (&insn_link_obstack, NULL);
--- gcc/testsuite/g++.dg/torture/pr88861.C	(nonexistent)
+++ gcc/testsuite/g++.dg/torture/pr88861.C	(revision 267984)
@@ -0,0 +1,11 @@
+// { dg-options "-fnon-call-exceptions" }
+
+struct Ax {
+  int n, a[];
+};
+
+int i = 12345678;
+int main() {
+  static Ax s{456, i};
+  ((s.a[0]) ? (void)0 : (void)0);
+}
--- gcc/testsuite/g++.dg/opt/pr89188.C	2019-02-07 15:55:14.595876158 +0100
+++ gcc/testsuite/g++.dg/opt/pr89188.C	2019-02-08 19:37:02.497288425 +0100
@@ -2,12 +2,4 @@
 // { dg-do compile { target c++11 } }
 // { dg-options "-Og -flive-range-shrinkage -fnon-call-exceptions" }
 
-struct Ax {
-  int n, a[];
-};
-
-int i = 12345678;
-int main() {
-  static Ax s{456, i};
-  ((s.a[0]) ? (void)0 : (void)0);
-}
+#include "../torture/pr88861.C"
2019-02-09  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-02-08  Jakub Jelinek  <jakub@redhat.com>

	PR rtl-optimization/89234
	* except.c (copy_reg_eh_region_note_forward): Return if note_or_insn
	is a NOTE, CODE_LABEL etc. - rtx_insn * other than INSN_P.
	(copy_reg_eh_region_note_backward): Likewise.

	* g++.dg/ubsan/pr89234.C: New test.

--- gcc/except.c	(revision 268668)
+++ gcc/except.c	(revision 268669)
@@ -1756,6 +1756,8 @@ copy_reg_eh_region_note_forward (rtx not
       if (note == NULL)
 	return;
     }
+  else if (is_a <rtx_insn *> (note_or_insn))
+    return;
   note = XEXP (note, 0);
 
   for (insn = first; insn != last ; insn = NEXT_INSN (insn))
@@ -1778,6 +1780,8 @@ copy_reg_eh_region_note_backward (rtx no
       if (note == NULL)
 	return;
     }
+  else if (is_a <rtx_insn *> (note_or_insn))
+    return;
   note = XEXP (note, 0);
 
   for (insn = last; insn != first; insn = PREV_INSN (insn))
--- gcc/testsuite/g++.dg/ubsan/pr89234.C	(nonexistent)
+++ gcc/testsuite/g++.dg/ubsan/pr89234.C	(revision 268669)
@@ -0,0 +1,11 @@
+// PR rtl-optimization/89234
+// { dg-do compile { target dfp } }
+// { dg-options "-O2 -fnon-call-exceptions -fsanitize=null" }
+
+typedef float __attribute__((mode (SD))) _Decimal32;
+
+void
+foo (_Decimal32 *b, _Decimal32 c)
+{
+  *b = c + 1.5;
+}
Jakub Jelinek Feb. 14, 2019, 7:44 a.m. UTC | #2
On Sat, Feb 09, 2019 at 09:52:52AM +0100, Jakub Jelinek wrote:
> And two further ones now, bootstrapped/regtested on powerpc64{,le}-linux
> and committed.

4 further ones, bootstrapped/regtested on x86_64-linux and i686-linux.

	Jakub
2019-02-14  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-02-09  Jakub Jelinek  <jakub@redhat.com>

	PR middle-end/89246
	* config/i386/i386.c (ix86_simd_clone_compute_vecsize_and_simdlen):
	If !node->definition and TYPE_ARG_TYPES is non-NULL, use
	TYPE_ARG_TYPES instead of DECL_ARGUMENTS.

	* gcc.dg/gomp/pr89246-1.c: New test.
	* gcc.dg/gomp/pr89246-2.c: New test.

--- gcc/config/i386/i386.c	(revision 268717)
+++ gcc/config/i386/i386.c	(revision 268718)
@@ -50447,28 +50447,34 @@ ix86_simd_clone_compute_vecsize_and_simd
 
   tree t;
   int i;
+  tree type_arg_types = TYPE_ARG_TYPES (TREE_TYPE (node->decl));
+  bool decl_arg_p = (node->definition || type_arg_types == NULL_TREE);
 
-  for (t = DECL_ARGUMENTS (node->decl), i = 0; t; t = DECL_CHAIN (t), i++)
-    switch (TYPE_MODE (TREE_TYPE (t)))
-      {
-      case E_QImode:
-      case E_HImode:
-      case E_SImode:
-      case E_DImode:
-      case E_SFmode:
-      case E_DFmode:
-      /* case E_SCmode: */
-      /* case E_DCmode: */
-	if (!AGGREGATE_TYPE_P (TREE_TYPE (t)))
-	  break;
-	/* FALLTHRU */
-      default:
-	if (clonei->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM)
-	  break;
-	warning_at (DECL_SOURCE_LOCATION (node->decl), 0,
-		    "unsupported argument type %qT for simd", TREE_TYPE (t));
-	return 0;
-      }
+  for (t = (decl_arg_p ? DECL_ARGUMENTS (node->decl) : type_arg_types), i = 0;
+       t && t != void_list_node; t = TREE_CHAIN (t), i++)
+    {
+      tree arg_type = decl_arg_p ? TREE_TYPE (t) : TREE_VALUE (t);
+      switch (TYPE_MODE (arg_type))
+	{
+	case E_QImode:
+	case E_HImode:
+	case E_SImode:
+	case E_DImode:
+	case E_SFmode:
+	case E_DFmode:
+	/* case E_SCmode: */
+	/* case E_DCmode: */
+	  if (!AGGREGATE_TYPE_P (arg_type))
+	    break;
+	  /* FALLTHRU */
+	default:
+	  if (clonei->args[i].arg_type == SIMD_CLONE_ARG_TYPE_UNIFORM)
+	    break;
+	  warning_at (DECL_SOURCE_LOCATION (node->decl), 0,
+		      "unsupported argument type %qT for simd", arg_type);
+	  return 0;
+	}
+    }
 
   if (!TREE_PUBLIC (node->decl))
     {
--- gcc/testsuite/gcc.dg/gomp/pr89246-1.c	(nonexistent)
+++ gcc/testsuite/gcc.dg/gomp/pr89246-1.c	(revision 268718)
@@ -0,0 +1,19 @@
+/* PR middle-end/89246 */
+/* { dg-do link { target { int128 && vect_simd_clones } } } */
+/* { dg-options "-O2 -fopenmp-simd -w" } */
+/* { dg-additional-sources "pr89246-2.c" } */
+
+#pragma omp declare simd
+int foo (__int128 x)
+{
+  return x;
+}
+
+#pragma omp declare simd
+extern int bar (int x);
+
+int
+main ()
+{
+  return foo (0) + bar (0);
+}
--- gcc/testsuite/gcc.dg/gomp/pr89246-2.c	(nonexistent)
+++ gcc/testsuite/gcc.dg/gomp/pr89246-2.c	(revision 268718)
@@ -0,0 +1,13 @@
+/* PR middle-end/89246 */
+/* { dg-do compile { target int128 } } */
+/* { dg-options "-O0 -fno-openmp -fno-openmp-simd" } */
+
+#pragma omp declare simd
+extern int foo (__int128 x);
+
+#pragma omp declare simd
+int
+bar (int x)
+{
+  return x + foo (0);
+}
2019-02-14  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-02-13  Jakub Jelinek  <jakub@redhat.com>

	PR target/89290
	* config/i386/predicates.md (x86_64_immediate_operand): Allow
	TLS UNSPECs offsetted by signed 32-bit CONST_INT even with
	-mcmodel=large.

	* gcc.target/i386/pr89290.c: New test.

--- gcc/config/i386/predicates.md	(revision 268836)
+++ gcc/config/i386/predicates.md	(revision 268837)
@@ -182,7 +182,7 @@ (define_predicate "x86_64_immediate_oper
 	  rtx op1 = XEXP (XEXP (op, 0), 0);
 	  rtx op2 = XEXP (XEXP (op, 0), 1);
 
-	  if (ix86_cmodel == CM_LARGE)
+	  if (ix86_cmodel == CM_LARGE && GET_CODE (op1) != UNSPEC)
 	    return false;
 	  if (!CONST_INT_P (op2))
 	    return false;
--- gcc/testsuite/gcc.target/i386/pr89290.c	(nonexistent)
+++ gcc/testsuite/gcc.target/i386/pr89290.c	(revision 268837)
@@ -0,0 +1,19 @@
+/* PR target/89290 */
+/* { dg-do compile { target { tls && lp64 } } } */
+/* { dg-options "-O0 -mcmodel=large" } */
+
+struct S { long int a, b; } e;
+__thread struct S s;
+__thread struct S t[2];
+
+void
+foo (void)
+{
+  s = e;
+}
+
+void
+bar (void)
+{
+  t[1] = e;
+}
2019-02-14  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-02-13  Jakub Jelinek  <jakub@redhat.com>

	PR middle-end/89281
	* optabs.c (prepare_cmp_insn): Use UINTVAL (size) instead of
	INTVAL (size), compare it to GET_MODE_MASK instead of
	1 << GET_MODE_BITSIZE.

--- gcc/optabs.c	(revision 268840)
+++ gcc/optabs.c	(revision 268841)
@@ -3898,7 +3898,7 @@ prepare_cmp_insn (rtx x, rtx y, enum rtx
 
 	  /* Must make sure the size fits the insn's mode.  */
 	  if (CONST_INT_P (size)
-	      ? INTVAL (size) >= (1 << GET_MODE_BITSIZE (cmp_mode))
+	      ? UINTVAL (size) > GET_MODE_MASK (cmp_mode)
 	      : (GET_MODE_BITSIZE (as_a <scalar_int_mode> (GET_MODE (size)))
 		 > GET_MODE_BITSIZE (cmp_mode)))
 	    continue;
2019-02-14  Jakub Jelinek  <jakub@redhat.com>

	Backported from mainline
	2019-02-13  Jakub Jelinek  <jakub@redhat.com>

	PR middle-end/89303
	* tree-ssa-structalias.c (set_uids_in_ptset): Or in vi->is_heap_var
	into pt->vars_contains_escaped_heap instead of setting
	pt->vars_contains_escaped_heap to it.

	2019-02-13  Jonathan Wakely  <jwakely@redhat.com>
		    Jakub Jelinek  <jakub@redhat.com>

	PR middle-end/89303
	* g++.dg/torture/pr89303.C: New test.

--- gcc/tree-ssa-structalias.c	(revision 268842)
+++ gcc/tree-ssa-structalias.c	(revision 268843)
@@ -6412,7 +6412,7 @@ set_uids_in_ptset (bitmap into, bitmap f
 	      && bitmap_bit_p (escaped_vi->solution, i)))
 	{
 	  pt->vars_contains_escaped = true;
-	  pt->vars_contains_escaped_heap = vi->is_heap_var;
+	  pt->vars_contains_escaped_heap |= vi->is_heap_var;
 	}
 
       if (vi->is_restrict_var)
--- gcc/testsuite/g++.dg/torture/pr89303.C	(nonexistent)
+++ gcc/testsuite/g++.dg/torture/pr89303.C	(revision 268843)
@@ -0,0 +1,792 @@
+// PR middle-end/89303
+// { dg-do run }
+// { dg-additional-options "-std=c++14" }
+
+namespace std
+{
+  typedef __SIZE_TYPE__ size_t;
+  typedef decltype(nullptr) nullptr_t;
+
+  template<typename _Tp, _Tp __v>
+    struct integral_constant
+    {
+      static constexpr _Tp value = __v;
+      typedef _Tp value_type;
+      typedef integral_constant<_Tp, __v> type;
+      constexpr operator value_type() const noexcept { return value; }
+      constexpr value_type operator()() const noexcept { return value; }
+    };
+
+  template<typename _Tp, _Tp __v>
+    constexpr _Tp integral_constant<_Tp, __v>::value;
+
+  typedef integral_constant<bool, true> true_type;
+  typedef integral_constant<bool, false> false_type;
+
+  template<bool __v>
+    using __bool_constant = integral_constant<bool, __v>;
+
+  template<bool, typename, typename>
+    struct conditional;
+
+  template<typename...>
+    struct __and_;
+
+  template<>
+    struct __and_<>
+    : public true_type
+    { };
+
+  template<typename _B1>
+    struct __and_<_B1>
+    : public _B1
+    { };
+
+  template<typename _B1, typename _B2>
+    struct __and_<_B1, _B2>
+    : public conditional<_B1::value, _B2, _B1>::type
+    { };
+
+  template<typename _B1, typename _B2, typename _B3, typename... _Bn>
+    struct __and_<_B1, _B2, _B3, _Bn...>
+    : public conditional<_B1::value, __and_<_B2, _B3, _Bn...>, _B1>::type
+    { };
+
+  template<typename>
+    struct remove_cv;
+
+  template<typename>
+    struct __is_void_helper
+    : public false_type { };
+
+  template<>
+    struct __is_void_helper<void>
+    : public true_type { };
+
+  template<typename _Tp>
+    struct is_void
+    : public __is_void_helper<typename remove_cv<_Tp>::type>::type
+    { };
+
+  template<typename _Tp, typename _Up = _Tp&&>
+    _Up
+    __declval(int);
+
+  template<typename _Tp>
+    _Tp
+    __declval(long);
+
+  template<typename _Tp>
+    auto declval() noexcept -> decltype(__declval<_Tp>(0));
+
+  template<typename, typename>
+    struct is_same
+    : public false_type { };
+
+  template<typename _Tp>
+    struct is_same<_Tp, _Tp>
+    : public true_type { };
+
+  template<typename _Tp>
+    struct remove_const
+    { typedef _Tp type; };
+
+  template<typename _Tp>
+    struct remove_const<_Tp const>
+    { typedef _Tp type; };
+
+  template<typename _Tp>
+    struct remove_volatile
+    { typedef _Tp type; };
+
+  template<typename _Tp>
+    struct remove_volatile<_Tp volatile>
+    { typedef _Tp type; };
+
+  template<typename _Tp>
+    struct remove_cv
+    {
+      typedef typename
+      remove_const<typename remove_volatile<_Tp>::type>::type type;
+    };
+
+  template<typename _Tp>
+    struct remove_reference
+    { typedef _Tp type; };
+
+  template<typename _Tp>
+    struct remove_reference<_Tp&>
+    { typedef _Tp type; };
+
+  template<typename _Tp>
+    struct remove_reference<_Tp&&>
+    { typedef _Tp type; };
+
+  template<bool, typename _Tp = void>
+    struct enable_if
+    { };
+
+  template<typename _Tp>
+    struct enable_if<true, _Tp>
+    { typedef _Tp type; };
+
+  template<typename... _Cond>
+    using _Require = typename enable_if<__and_<_Cond...>::value>::type;
+
+  template<bool _Cond, typename _Iftrue, typename _Iffalse>
+    struct conditional
+    { typedef _Iftrue type; };
+
+  template<typename _Iftrue, typename _Iffalse>
+    struct conditional<false, _Iftrue, _Iffalse>
+    { typedef _Iffalse type; };
+
+  template<typename _Tp>
+    struct __declval_protector
+    {
+      static const bool __stop = false;
+    };
+
+  template<typename _Tp>
+    auto declval() noexcept -> decltype(__declval<_Tp>(0))
+    {
+      static_assert(__declval_protector<_Tp>::__stop,
+      "declval() must not be used!");
+      return __declval<_Tp>(0);
+    }
+
+  namespace void_details {
+    template <class... >
+    struct make_void { using type = void; };
+}
+
+template <class... T> using __void_t = typename void_details ::make_void<T...>::type;
+
+  template<typename _Tp>
+    inline constexpr _Tp*
+    __addressof(_Tp& __r) noexcept
+    {
+      return reinterpret_cast<_Tp*>
+	(&const_cast<char&>(reinterpret_cast<const volatile char&>(__r)));
+    }
+
+  template<typename _Tp>
+    constexpr _Tp&&
+    forward(typename std::remove_reference<_Tp>::type& __t) noexcept
+    { return static_cast<_Tp&&>(__t); }
+
+  template<typename _Tp>
+    constexpr _Tp&&
+    forward(typename std::remove_reference<_Tp>::type&& __t) noexcept
+    {
+      return static_cast<_Tp&&>(__t);
+    }
+
+  template<typename _Tp>
+    constexpr typename std::remove_reference<_Tp>::type&&
+    move(_Tp&& __t) noexcept
+    { return static_cast<typename std::remove_reference<_Tp>::type&&>(__t); }
+}
+       
+inline void* operator new(std::size_t, void* p) { return p; }
+
+extern "C" void* malloc(std::size_t);
+extern "C" void free(void*);
+
+namespace std
+{
+  template<typename T>
+    class allocator
+    {
+    public:
+      using value_type = T;
+
+      allocator() { }
+
+      template<typename U>
+        allocator(const allocator<U>&) { }
+
+      T* allocate(size_t n) { return (T*)malloc(n*sizeof(T)); }
+      void deallocate(T* p, size_t) { free(p); }
+
+      template<typename U, typename... Args>
+        void construct(U* p, Args&&... args)
+        { ::new((void*)p) U(args...); }
+
+      template<typename U>
+        void destroy(U* p)
+        { p->~U(); }
+    };
+
+  class __undefined;
+
+  template<typename _Tp, typename _Up>
+    struct __replace_first_arg
+    { };
+
+  template<template<typename, typename...> class _Template, typename _Up,
+           typename _Tp, typename... _Types>
+    struct __replace_first_arg<_Template<_Tp, _Types...>, _Up>
+    { using type = _Template<_Up, _Types...>; };
+
+  struct __allocator_traits_base
+  {
+    template<typename _Tp, typename _Up, typename = void>
+      struct __rebind : __replace_first_arg<_Tp, _Up> { };
+
+    template<typename _Tp, typename _Up>
+      struct __rebind<_Tp, _Up,
+        __void_t<typename _Tp::template rebind<_Up>::other>>
+      { using type = typename _Tp::template rebind<_Up>::other; };
+  };
+
+  template<typename _Alloc, typename _Up>
+    using __alloc_rebind
+      = typename __allocator_traits_base::template __rebind<_Alloc, _Up>::type;
+
+  template<typename _Alloc>
+    struct allocator_traits;
+
+  template<typename _Tp>
+    struct allocator_traits<allocator<_Tp>>
+    {
+      using allocator_type = allocator<_Tp>;
+      using value_type = _Tp;
+      using pointer = _Tp*;
+      using const_pointer = const _Tp*;
+      using size_type = std::size_t;
+
+      static pointer
+      allocate(allocator_type& __a, size_type __n)
+      { return __a.allocate(__n); }
+
+      static void
+      deallocate(allocator_type& __a, pointer __p, size_type __n)
+      { __a.deallocate(__p, __n); }
+
+      template<typename _Up, typename... _Args>
+        static void
+        construct(allocator_type& __a, _Up* __p, _Args&&... __args)
+        { __a.construct(__p, std::forward<_Args>(__args)...); }
+
+      template<typename _Up>
+        static void
+        destroy(allocator_type& __a, _Up* __p)
+        { __a.destroy(__p); }
+    };
+
+  template<typename _Alloc>
+    struct __allocated_ptr
+    {
+      using pointer = typename allocator_traits<_Alloc>::pointer;
+      using value_type = typename allocator_traits<_Alloc>::value_type;
+
+      __allocated_ptr(_Alloc& __a, pointer __ptr) noexcept
+      : _M_alloc(std::__addressof(__a)), _M_ptr(__ptr)
+      { }
+
+      template<typename _Ptr,
+        typename _Req = _Require<is_same<_Ptr, value_type*>>>
+      __allocated_ptr(_Alloc& __a, _Ptr __ptr)
+      : _M_alloc(std::__addressof(__a)),
+      _M_ptr(__ptr)
+      { }
+
+      __allocated_ptr(__allocated_ptr&& __gd) noexcept
+      : _M_alloc(__gd._M_alloc), _M_ptr(__gd._M_ptr)
+      { __gd._M_ptr = nullptr; }
+
+      ~__allocated_ptr()
+      {
+        if (_M_ptr != nullptr)
+          std::allocator_traits<_Alloc>::deallocate(*_M_alloc, _M_ptr, 1);
+      }
+
+      __allocated_ptr&
+      operator=(std::nullptr_t) noexcept
+      {
+        _M_ptr = nullptr;
+        return *this;
+      }
+
+      value_type* get() { return _M_ptr; }
+
+    private:
+      _Alloc* _M_alloc;
+      pointer _M_ptr;
+    };
+
+  template<typename _Alloc>
+    __allocated_ptr<_Alloc>
+    __allocate_guarded(_Alloc& __a)
+    {
+      return { __a, std::allocator_traits<_Alloc>::allocate(__a, 1) };
+    }
+
+  template<typename _Tp>
+    struct __aligned_buffer
+    {
+      alignas(__alignof__(_Tp)) unsigned char _M_storage[sizeof(_Tp)];
+      __aligned_buffer() = default;
+
+      void*
+      _M_addr() noexcept
+      {
+        return static_cast<void*>(&_M_storage);
+      }
+
+      const void*
+      _M_addr() const noexcept
+      {
+        return static_cast<const void*>(&_M_storage);
+      }
+
+      _Tp*
+      _M_ptr() noexcept
+      { return static_cast<_Tp*>(_M_addr()); }
+
+      const _Tp*
+      _M_ptr() const noexcept
+      { return static_cast<const _Tp*>(_M_addr()); }
+    };
+
+  class bad_weak_ptr { };
+
+  inline void
+  __throw_bad_weak_ptr()
+  { (throw (bad_weak_ptr())); }
+
+    class _Sp_counted_base
+    {
+    public:
+      _Sp_counted_base() noexcept
+      : _M_use_count(1), _M_weak_count(1) { }
+
+      virtual
+      ~_Sp_counted_base() noexcept
+      { }
+
+      virtual void
+      _M_dispose() noexcept = 0;
+
+      virtual void
+      _M_destroy() noexcept
+      { delete this; }
+
+      void
+      _M_add_ref_copy()
+      { ++_M_use_count; }
+
+      void
+      _M_add_ref_lock()
+      {
+        if (_M_use_count == 0)
+          __throw_bad_weak_ptr();
+        ++_M_use_count;
+      }
+
+      void
+      _M_release() noexcept
+      {
+        if (--_M_use_count == 0)
+        {
+          _M_dispose();
+          if (--_M_weak_count == 0)
+            _M_destroy();
+        }
+      }
+
+      void
+      _M_weak_add_ref() noexcept
+      { ++_M_weak_count; }
+
+      void
+      _M_weak_release() noexcept
+      {
+        if (--_M_weak_count == 0)
+          _M_destroy();
+      }
+
+      long
+      _M_get_use_count() const noexcept
+      {
+        return _M_use_count;
+      }
+
+    private:
+      _Sp_counted_base(_Sp_counted_base const&) = delete;
+      _Sp_counted_base& operator=(_Sp_counted_base const&) = delete;
+
+      int _M_use_count;
+      int _M_weak_count;
+    };
+
+  template<typename _Tp>
+    class shared_ptr;
+
+  template<typename _Tp>
+    class weak_ptr;
+
+  template<typename _Tp>
+    class enable_shared_from_this;
+
+  class __weak_count;
+
+  class __shared_count;
+
+  template<typename _Alloc>
+    struct _Sp_alloc_shared_tag
+    {
+      const _Alloc& _M_a;
+    };
+
+  template<typename _Tp, typename _Alloc>
+    class _Sp_counted_ptr_inplace final : public _Sp_counted_base
+    {
+      class _Impl : _Alloc
+      {
+      public:
+        explicit _Impl(_Alloc __a) noexcept : _Alloc(__a) { }
+
+        _Alloc& _M_alloc() noexcept { return *this; }
+
+        __aligned_buffer<_Tp> _M_storage;
+      };
+
+    public:
+      using __allocator_type = __alloc_rebind<_Alloc, _Sp_counted_ptr_inplace>;
+
+      template<typename... _Args>
+        _Sp_counted_ptr_inplace(_Alloc __a, _Args&&... __args)
+        : _M_impl(__a)
+        {
+          allocator_traits<_Alloc>::construct(__a, _M_ptr(),
+              std::forward<_Args>(__args)...);
+        }
+
+      ~_Sp_counted_ptr_inplace() noexcept { }
+
+      virtual void
+      _M_dispose() noexcept
+      {
+        allocator_traits<_Alloc>::destroy(_M_impl._M_alloc(), _M_ptr());
+      }
+
+      virtual void
+      _M_destroy() noexcept
+      {
+        __allocator_type __a(_M_impl._M_alloc());
+        __allocated_ptr<__allocator_type> __guard_ptr{ __a, this };
+        this->~_Sp_counted_ptr_inplace();
+      }
+
+    private:
+      friend class __shared_count;
+
+      _Tp* _M_ptr() noexcept { return _M_impl._M_storage._M_ptr(); }
+
+      _Impl _M_impl;
+    };
+
+  class __shared_count
+  {
+  public:
+    constexpr __shared_count() noexcept : _M_pi(0)
+    { }
+
+    template<typename _Tp, typename _Alloc, typename... _Args>
+      __shared_count(_Tp*& __p, _Sp_alloc_shared_tag<_Alloc> __a,
+          _Args&&... __args)
+      {
+        typedef _Sp_counted_ptr_inplace<_Tp, _Alloc> _Sp_cp_type;
+        typename _Sp_cp_type::__allocator_type __a2(__a._M_a);
+        auto __guard = std::__allocate_guarded(__a2);
+        _Sp_cp_type* __mem = __guard.get();
+        auto __pi = ::new (__mem)
+          _Sp_cp_type(__a._M_a, std::forward<_Args>(__args)...);
+        __guard = nullptr;
+        _M_pi = __pi;
+        __p = __pi->_M_ptr();
+      }
+
+    ~__shared_count() noexcept
+    {
+      if (_M_pi != nullptr)
+        _M_pi->_M_release();
+    }
+
+    __shared_count(const __shared_count& __r) noexcept
+    : _M_pi(__r._M_pi)
+    {
+      if (_M_pi != 0)
+        _M_pi->_M_add_ref_copy();
+    }
+
+    explicit __shared_count(const __weak_count& __r);
+
+    long
+    _M_get_use_count() const noexcept
+    { return _M_pi != 0 ? _M_pi->_M_get_use_count() : 0; }
+
+  private:
+    friend class __weak_count;
+
+    _Sp_counted_base* _M_pi;
+  };
+
+  class __weak_count
+  {
+  public:
+    constexpr __weak_count() noexcept : _M_pi(nullptr)
+    { }
+
+    __weak_count(const __shared_count& __r) noexcept
+    : _M_pi(__r._M_pi)
+    {
+      if (_M_pi != nullptr)
+        _M_pi->_M_weak_add_ref();
+    }
+
+    __weak_count(const __weak_count& __r) noexcept
+    : _M_pi(__r._M_pi)
+    {
+      if (_M_pi != nullptr)
+        _M_pi->_M_weak_add_ref();
+    }
+
+    __weak_count(__weak_count&& __r) noexcept
+    : _M_pi(__r._M_pi)
+    { __r._M_pi = nullptr; }
+
+    ~__weak_count() noexcept
+    {
+      if (_M_pi != nullptr)
+      {
+        _M_pi->_M_weak_release();
+      }
+    }
+
+    __weak_count&
+    operator=(const __shared_count& __r) noexcept
+    {
+      _Sp_counted_base* __tmp = __r._M_pi;
+      if (__tmp != nullptr)
+        __tmp->_M_weak_add_ref();
+      if (_M_pi != nullptr)
+        _M_pi->_M_weak_release();
+      _M_pi = __tmp;
+      return *this;
+    }
+
+    long
+    _M_get_use_count() const noexcept
+    { return _M_pi != nullptr ? _M_pi->_M_get_use_count() : 0; }
+
+  private:
+    friend class __shared_count;
+
+    _Sp_counted_base* _M_pi;
+  };
+
+  inline
+  __shared_count::__shared_count(const __weak_count& __r)
+  : _M_pi(__r._M_pi)
+  {
+    if (_M_pi != nullptr)
+      _M_pi->_M_add_ref_lock();
+    else
+      __throw_bad_weak_ptr();
+  }
+
+  template<typename _Tp>
+    class shared_ptr
+    {
+    public:
+      using element_type = _Tp;
+
+      constexpr shared_ptr() noexcept
+        : _M_ptr(0), _M_refcount()
+        { }
+
+      shared_ptr(const shared_ptr&) noexcept = default;
+      shared_ptr& operator=(const shared_ptr&) noexcept = default;
+      ~shared_ptr() = default;
+
+      template<typename _Yp>
+	explicit shared_ptr(const weak_ptr<_Yp>& __r)
+	: _M_refcount(__r._M_refcount) // may throw
+	{
+	  // It is now safe to copy __r._M_ptr, as
+	  // _M_refcount(__r._M_refcount) did not throw.
+	  _M_ptr = __r._M_ptr;
+	}
+
+      long
+      use_count() const noexcept
+      { return _M_refcount._M_get_use_count(); }
+
+      element_type* operator->() const noexcept { return _M_ptr; }
+
+    protected:
+
+      template<typename _Alloc, typename... _Args>
+        shared_ptr(_Sp_alloc_shared_tag<_Alloc> __tag, _Args&&... __args)
+        : _M_ptr(), _M_refcount(_M_ptr, __tag, std::forward<_Args>(__args)...)
+        { _M_enable_shared_from_this_with(_M_ptr); }
+
+      template<typename _Tp1, typename _Alloc,
+        typename... _Args>
+          friend shared_ptr<_Tp1>
+          allocate_shared(const _Alloc& __a, _Args&&... __args);
+
+      friend class weak_ptr<_Tp>;
+
+    private:
+
+      template<typename _Yp>
+        using __esft_base_t = decltype(__enable_shared_from_this_base(
+              std::declval<const __shared_count&>(),
+              std::declval<_Yp*>()));
+
+      template<typename _Yp, typename = void>
+        struct __has_esft_base
+        : false_type { };
+
+      template<typename _Yp>
+        struct __has_esft_base<_Yp, __void_t<__esft_base_t<_Yp>>>
+        : true_type { };
+
+      template<typename _Yp, typename _Yp2 = typename remove_cv<_Yp>::type>
+        typename enable_if<__has_esft_base<_Yp2>::value>::type
+        _M_enable_shared_from_this_with(_Yp* __p) noexcept
+        {
+          if (auto __base = __enable_shared_from_this_base(_M_refcount, __p))
+            __base->_M_weak_assign(const_cast<_Yp2*>(__p), _M_refcount);
+        }
+
+      template<typename _Tp1> friend class shared_ptr;
+      template<typename _Tp1> friend class weak_ptr;
+
+      element_type* _M_ptr;
+      __shared_count _M_refcount;
+    };
+
+  template<typename _Tp>
+    class weak_ptr
+    {
+    public:
+      using element_type = _Tp;
+
+      constexpr weak_ptr() noexcept
+      : _M_ptr(nullptr), _M_refcount()
+      { }
+
+      weak_ptr(const weak_ptr&) noexcept = default;
+
+      ~weak_ptr() = default;
+
+      weak_ptr&
+      operator=(const weak_ptr& __r) noexcept = default;
+
+      long
+      use_count() const noexcept
+      { return _M_refcount._M_get_use_count(); }
+
+    private:
+
+      void
+      _M_assign(_Tp* __ptr, const __shared_count& __refcount) noexcept
+      {
+        if (use_count() == 0)
+        {
+          _M_ptr = __ptr;
+          _M_refcount = __refcount;
+        }
+      }
+
+      template<typename _Tp1> friend class shared_ptr;
+      template<typename _Tp1> friend class weak_ptr;
+      friend class enable_shared_from_this<_Tp>;
+
+      element_type* _M_ptr;
+      __weak_count _M_refcount;
+    };
+
+  template<typename _Tp>
+    class enable_shared_from_this
+    {
+    protected:
+      constexpr enable_shared_from_this() noexcept { }
+
+      enable_shared_from_this(const enable_shared_from_this&) noexcept { }
+
+      enable_shared_from_this&
+      operator=(const enable_shared_from_this&) noexcept
+      { return *this; }
+
+      ~enable_shared_from_this() { }
+
+    public:
+      shared_ptr<_Tp>
+      shared_from_this()
+      { return shared_ptr<_Tp>(this->_M_weak_this); }
+
+      shared_ptr<const _Tp>
+      shared_from_this() const
+      { return shared_ptr<const _Tp>(this->_M_weak_this); }
+
+    private:
+      template<typename _Tp1>
+        void
+        _M_weak_assign(_Tp1* __p, const __shared_count& __n) const noexcept
+        { _M_weak_this._M_assign(__p, __n); }
+
+      friend const enable_shared_from_this*
+      __enable_shared_from_this_base(const __shared_count&,
+         const enable_shared_from_this* __p)
+      { return __p; }
+
+      template<typename>
+        friend class shared_ptr;
+
+      mutable weak_ptr<_Tp> _M_weak_this;
+    };
+
+  template<typename _Tp, typename _Alloc, typename... _Args>
+    inline shared_ptr<_Tp>
+    allocate_shared(const _Alloc& __a, _Args&&... __args)
+    {
+      return shared_ptr<_Tp>(_Sp_alloc_shared_tag<_Alloc>{__a},
+        std::forward<_Args>(__args)...);
+    }
+
+  template<typename _Tp, typename... _Args>
+    inline shared_ptr<_Tp>
+    make_shared(_Args&&... __args)
+    {
+      typedef typename std::remove_const<_Tp>::type _Tp_nc;
+      return std::allocate_shared<_Tp>(std::allocator<_Tp_nc>(),
+           std::forward<_Args>(__args)...);
+    }
+}
+
+class blob final: public std::enable_shared_from_this<blob>
+{
+  int* data;
+
+public:
+  blob() { data = new int; }
+  ~blob() { delete data; }
+};
+
+static int
+bar(std::shared_ptr<blob>)
+{
+  return 0;
+}
+
+int main()
+{
+  std::shared_ptr<blob> tg = std::make_shared<blob>();
+  return tg->shared_from_this().use_count() - 2;
+}
diff mbox series

Patch

--- gcc/dwarf2out.c	(revision 267593)
+++ gcc/dwarf2out.c	(revision 267594)
@@ -14401,6 +14401,10 @@  expansion_failed (tree expr, rtx rtl, ch
     }
 }
 
+/* True if handling a former CONST by mem_loc_descriptor piecewise.  */
+
+static bool in_const_p;
+
 /* Helper function for const_ok_for_output.  */
 
 static bool
@@ -14423,6 +14427,7 @@  const_ok_for_output_1 (rtx rtl)
 	 one in a constant pool entry, so testing SYMBOL_REF_TLS_MODEL
 	 rather than DECL_THREAD_LOCAL_P is not just an optimization.  */
       if (flag_checking
+	  && !in_const_p
 	  && (XVECLEN (rtl, 0) == 0
 	      || GET_CODE (XVECEXP (rtl, 0, 0)) != SYMBOL_REF
 	      || SYMBOL_REF_TLS_MODEL (XVECEXP (rtl, 0, 0)) == TLS_MODEL_NONE))
@@ -14446,13 +14451,6 @@  const_ok_for_output_1 (rtx rtl)
   if (CONST_POLY_INT_P (rtl))
     return false;
 
-  if (targetm.const_not_ok_for_debug_p (rtl))
-    {
-      expansion_failed (NULL_TREE, rtl,
-			"Expression rejected for debug by the backend.\n");
-      return false;
-    }
-
   /* FIXME: Refer to PR60655. It is possible for simplification
      of rtl expressions in var tracking to produce such expressions.
      We should really identify / validate expressions
@@ -14465,6 +14463,41 @@  const_ok_for_output_1 (rtx rtl)
     case NOT:
     case NEG:
       return false;
+    case PLUS:
+      {
+	/* Make sure SYMBOL_REFs/UNSPECs are at most in one of the
+	   operands.  */
+	subrtx_var_iterator::array_type array;
+	bool first = false;
+	FOR_EACH_SUBRTX_VAR (iter, array, XEXP (rtl, 0), ALL)
+	  if (SYMBOL_REF_P (*iter)
+	      || LABEL_P (*iter)
+	      || GET_CODE (*iter) == UNSPEC)
+	    {
+	      first = true;
+	      break;
+	    }
+	if (!first)
+	  return true;
+	FOR_EACH_SUBRTX_VAR (iter, array, XEXP (rtl, 1), ALL)
+	  if (SYMBOL_REF_P (*iter)
+	      || LABEL_P (*iter)
+	      || GET_CODE (*iter) == UNSPEC)
+	    return false;
+	return true;
+      }
+    case MINUS:
+      {
+	/* Disallow negation of SYMBOL_REFs or UNSPECs when they
+	   appear in the second operand of MINUS.  */
+	subrtx_var_iterator::array_type array;
+	FOR_EACH_SUBRTX_VAR (iter, array, XEXP (rtl, 1), ALL)
+	  if (SYMBOL_REF_P (*iter)
+	      || LABEL_P (*iter)
+	      || GET_CODE (*iter) == UNSPEC)
+	    return false;
+	return true;
+      }
     default:
       return true;
     }
@@ -15608,6 +15641,7 @@  mem_loc_descriptor (rtx rtl, machine_mod
 	 pool.  */
     case CONST:
     case SYMBOL_REF:
+    case UNSPEC:
       if (!is_a <scalar_int_mode> (mode, &int_mode)
 	  || (GET_MODE_SIZE (int_mode) > DWARF2_ADDR_SIZE
 #ifdef POINTERS_EXTEND_UNSIGNED
@@ -15615,6 +15649,43 @@  mem_loc_descriptor (rtx rtl, machine_mod
 #endif
 	      ))
 	break;
+
+      if (GET_CODE (rtl) == UNSPEC)
+	{
+	  /* If delegitimize_address couldn't do anything with the UNSPEC, we
+	     can't express it in the debug info.  This can happen e.g. with some
+	     TLS UNSPECs.  Allow UNSPECs formerly from CONST that the backend
+	     approves.  */
+	  bool not_ok = false;
+
+	  if (!in_const_p)
+	    break;
+
+	  subrtx_var_iterator::array_type array;
+	  FOR_EACH_SUBRTX_VAR (iter, array, rtl, ALL)
+	    if (*iter != rtl && !CONSTANT_P (*iter))
+	      {
+		not_ok = true;
+		break;
+	      }
+
+	  if (not_ok)
+	    break;
+
+	  FOR_EACH_SUBRTX_VAR (iter, array, rtl, ALL)
+	    if (!const_ok_for_output_1 (*iter))
+	      {
+		not_ok = true;
+		break;
+	      }
+
+	  if (not_ok)
+	    break;
+
+	  rtl = gen_rtx_CONST (GET_MODE (rtl), rtl);
+	  goto symref;
+	}
+
       if (GET_CODE (rtl) == SYMBOL_REF
 	  && SYMBOL_REF_TLS_MODEL (rtl) != TLS_MODEL_NONE)
 	{
@@ -15662,8 +15733,13 @@  mem_loc_descriptor (rtx rtl, machine_mod
 		  }
 		break;
 	      default:
-		mem_loc_result = mem_loc_descriptor (XEXP (rtl, 0), int_mode,
-						     mem_mode, initialized);
+		{
+		  bool save_in_const_p = in_const_p;
+		  in_const_p = true;
+		  mem_loc_result = mem_loc_descriptor (XEXP (rtl, 0), int_mode,
+						       mem_mode, initialized);
+		  in_const_p = save_in_const_p;
+		}
 		break;
 	      }
 	  break;
@@ -16283,7 +16359,6 @@  mem_loc_descriptor (rtx rtl, machine_mod
     case VEC_CONCAT:
     case VEC_DUPLICATE:
     case VEC_SERIES:
-    case UNSPEC:
     case HIGH:
     case FMA:
     case STRICT_LOW_PART:
@@ -16291,9 +16366,6 @@  mem_loc_descriptor (rtx rtl, machine_mod
     case CONST_FIXED:
     case CLRSB:
     case CLOBBER:
-      /* If delegitimize_address couldn't do anything with the UNSPEC, we
-	 can't express it in the debug info.  This can happen e.g. with some
-	 TLS UNSPECs.  */
       break;
 
     case CONST_STRING:
--- gcc/testsuite/gcc.dg/debug/dwarf2/pr88635.c	(nonexistent)
+++ gcc/testsuite/gcc.dg/debug/dwarf2/pr88635.c	(revision 267594)
@@ -0,0 +1,24 @@ 
+/* PR debug/88635 */
+/* { dg-do assemble } */
+/* { dg-options "-g -O2" } */
+/* { dg-additional-options "-fpie" { target pie } } */
+
+static void
+foo (char *b)
+{
+  unsigned c = 0;
+  --c;
+  do
+    if (++*b++ == 0)
+      break;
+  while (--c);
+  if (c == 0)
+    while (*b++)
+      ;
+}
+
+void
+bar (void)
+{
+  foo ("");
+}