diff mbox

-fsanitize=thread fixes (PR sanitizer/68260)

Message ID 20160906203359.GN14857@tucnak.redhat.com
State New
Headers show

Commit Message

Jakub Jelinek Sept. 6, 2016, 8:33 p.m. UTC
Hi!

As the PRs show, -fsanitize=thread hasn't been instrumenting __atomic_clear
and __atomic_test_and_set builtins (i.e. those that operate on bool always),
so we reported a data race on the testcase even when there wasn't one.
And, as the other testcase shows, there were various ICEs with
-fnon-call-exceptions -fsanitize=thread.

Fixed thusly, bootstrapped/regtested on x86_64-linux and i686-linux, ok for
trunk?

2016-09-06  Jakub Jelinek  <jakub@redhat.com>

	PR sanitizer/68260
	* tsan.c: Include target.h and tree-eh.h.
	(enum tsan_atomic_action): Add bool_clear and bool_test_and_set.
	(BOOL_CLEAR, BOOL_TEST_AND_SET): Define.
	(tsan_atomic_table): Add BUILT_IN_ATOMIC_CLEAR and
	BUILT_IN_ATOMIC_TEST_AND_SET entries.
	(instrument_builtin_call): Remove EH edges of replaced call stmts.
	Handle bool_clear and bool_test_and_set.
	(instrument_memory_accesses): Add RET argument, purge dead eh edges
	at the end of bbs and set *RET in that case to TODO_cleanup_cfg.
	(tsan_pass): Adjust instrument_memory_accesses caller, return ret.

	* c-c++-common/tsan/pr68260.c: New test.
	* g++.dg/tsan/pr68260.C: New test.


	Jakub

Comments

Richard Biener Sept. 7, 2016, 7:07 a.m. UTC | #1
On Tue, 6 Sep 2016, Jakub Jelinek wrote:

> Hi!
> 
> As the PRs show, -fsanitize=thread hasn't been instrumenting __atomic_clear
> and __atomic_test_and_set builtins (i.e. those that operate on bool always),
> so we reported a data race on the testcase even when there wasn't one.
> And, as the other testcase shows, there were various ICEs with
> -fnon-call-exceptions -fsanitize=thread.
> 
> Fixed thusly, bootstrapped/regtested on x86_64-linux and i686-linux, ok for
> trunk?
> 
> 2016-09-06  Jakub Jelinek  <jakub@redhat.com>
> 
> 	PR sanitizer/68260
> 	* tsan.c: Include target.h and tree-eh.h.
> 	(enum tsan_atomic_action): Add bool_clear and bool_test_and_set.
> 	(BOOL_CLEAR, BOOL_TEST_AND_SET): Define.
> 	(tsan_atomic_table): Add BUILT_IN_ATOMIC_CLEAR and
> 	BUILT_IN_ATOMIC_TEST_AND_SET entries.
> 	(instrument_builtin_call): Remove EH edges of replaced call stmts.
> 	Handle bool_clear and bool_test_and_set.
> 	(instrument_memory_accesses): Add RET argument, purge dead eh edges
> 	at the end of bbs and set *RET in that case to TODO_cleanup_cfg.
> 	(tsan_pass): Adjust instrument_memory_accesses caller, return ret.
> 
> 	* c-c++-common/tsan/pr68260.c: New test.
> 	* g++.dg/tsan/pr68260.C: New test.
> 
> --- gcc/tsan.c.jj	2016-07-11 22:18:08.000000000 +0200
> +++ gcc/tsan.c	2016-09-06 14:06:21.193642590 +0200
> @@ -40,6 +40,8 @@ along with GCC; see the file COPYING3.
>  #include "tsan.h"
>  #include "asan.h"
>  #include "builtins.h"
> +#include "target.h"
> +#include "tree-eh.h"
>  
>  /* Number of instrumented memory accesses in the current function.  */
>  
> @@ -240,7 +242,8 @@ instrument_expr (gimple_stmt_iterator gs
>  enum tsan_atomic_action
>  {
>    check_last, add_seq_cst, add_acquire, weak_cas, strong_cas,
> -  bool_cas, val_cas, lock_release, fetch_op, fetch_op_seq_cst
> +  bool_cas, val_cas, lock_release, fetch_op, fetch_op_seq_cst,
> +  bool_clear, bool_test_and_set
>  };
>  
>  /* Table how to map sync/atomic builtins to their corresponding
> @@ -274,6 +277,10 @@ static const struct tsan_map_atomic
>    TRANSFORM (fcode, tsan_fcode, fetch_op, code)
>  #define FETCH_OPS(fcode, tsan_fcode, code) \
>    TRANSFORM (fcode, tsan_fcode, fetch_op_seq_cst, code)
> +#define BOOL_CLEAR(fcode, tsan_fcode) \
> +  TRANSFORM (fcode, tsan_fcode, bool_clear, ERROR_MARK)
> +#define BOOL_TEST_AND_SET(fcode, tsan_fcode) \
> +  TRANSFORM (fcode, tsan_fcode, bool_test_and_set, ERROR_MARK)
>  
>    CHECK_LAST (ATOMIC_LOAD_1, TSAN_ATOMIC8_LOAD),
>    CHECK_LAST (ATOMIC_LOAD_2, TSAN_ATOMIC16_LOAD),
> @@ -463,7 +470,11 @@ static const struct tsan_map_atomic
>    LOCK_RELEASE (SYNC_LOCK_RELEASE_2, TSAN_ATOMIC16_STORE),
>    LOCK_RELEASE (SYNC_LOCK_RELEASE_4, TSAN_ATOMIC32_STORE),
>    LOCK_RELEASE (SYNC_LOCK_RELEASE_8, TSAN_ATOMIC64_STORE),
> -  LOCK_RELEASE (SYNC_LOCK_RELEASE_16, TSAN_ATOMIC128_STORE)
> +  LOCK_RELEASE (SYNC_LOCK_RELEASE_16, TSAN_ATOMIC128_STORE),
> +
> +  BOOL_CLEAR (ATOMIC_CLEAR, TSAN_ATOMIC8_STORE),
> +
> +  BOOL_TEST_AND_SET (ATOMIC_TEST_AND_SET, TSAN_ATOMIC8_EXCHANGE)
>  };
>  
>  /* Instrument an atomic builtin.  */
> @@ -493,6 +504,8 @@ instrument_builtin_call (gimple_stmt_ite
>  	    if (!tree_fits_uhwi_p (last_arg)
>  		|| memmodel_base (tree_to_uhwi (last_arg)) >= MEMMODEL_LAST)
>  	      return;
> +	    if (lookup_stmt_eh_lp (stmt))
> +	      remove_stmt_from_eh_lp (stmt);

These changes look bogus to me -- how can the tsan instrumentation
function _not_ throw when the original function we replace it can?

It seems you are just avoiding the ICEs for now "wrong-code".  (and
how does this relate to -fnon-call-exceptions as both are calls?)

The instrument_expr case seems to leave the original stmt in-place
(and thus the EH).

Richard.

>  	    gimple_call_set_fndecl (stmt, decl);
>  	    update_stmt (stmt);
>  	    if (tsan_atomic_table[i].action == fetch_op)
> @@ -514,6 +527,8 @@ instrument_builtin_call (gimple_stmt_ite
>  				       != add_acquire
>  				       ? MEMMODEL_SEQ_CST
>  				       : MEMMODEL_ACQUIRE);
> +	    if (lookup_stmt_eh_lp (stmt))
> +	      remove_stmt_from_eh_lp (stmt);
>  	    update_gimple_call (gsi, decl, num + 1, args[0], args[1], args[2]);
>  	    stmt = gsi_stmt (*gsi);
>  	    if (tsan_atomic_table[i].action == fetch_op_seq_cst)
> @@ -561,6 +576,8 @@ instrument_builtin_call (gimple_stmt_ite
>  	    if (!tree_fits_uhwi_p (args[5])
>  		|| memmodel_base (tree_to_uhwi (args[5])) >= MEMMODEL_LAST)
>  	      return;
> +	    if (lookup_stmt_eh_lp (stmt))
> +	      remove_stmt_from_eh_lp (stmt);
>  	    update_gimple_call (gsi, decl, 5, args[0], args[1], args[2],
>  				args[4], args[5]);
>  	    return;
> @@ -584,6 +601,8 @@ instrument_builtin_call (gimple_stmt_ite
>  	    g = gimple_build_assign (t, args[1]);
>  	    gsi_insert_before (gsi, g, GSI_SAME_STMT);
>  	    lhs = gimple_call_lhs (stmt);
> +	    if (lookup_stmt_eh_lp (stmt))
> +	      remove_stmt_from_eh_lp (stmt);
>  	    update_gimple_call (gsi, decl, 5, args[0],
>  				build_fold_addr_expr (t), args[2],
>  				build_int_cst (NULL_TREE,
> @@ -610,11 +629,66 @@ instrument_builtin_call (gimple_stmt_ite
>  	    gcc_assert (num == 1);
>  	    t = TYPE_ARG_TYPES (TREE_TYPE (decl));
>  	    t = TREE_VALUE (TREE_CHAIN (t));
> +	    if (lookup_stmt_eh_lp (stmt))
> +	      remove_stmt_from_eh_lp (stmt);
>  	    update_gimple_call (gsi, decl, 3, gimple_call_arg (stmt, 0),
>  				build_int_cst (t, 0),
>  				build_int_cst (NULL_TREE,
>  					       MEMMODEL_RELEASE));
>  	    return;
> +	  case bool_clear:
> +	  case bool_test_and_set:
> +	    if (BOOL_TYPE_SIZE != 8)
> +	      {
> +		decl = NULL_TREE;
> +		for (j = 1; j < 5; j++)
> +		  if (BOOL_TYPE_SIZE == (8 << j))
> +		    {
> +		      enum built_in_function tsan_fcode
> +			= (enum built_in_function)
> +			  (tsan_atomic_table[i].tsan_fcode + j);
> +		      decl = builtin_decl_implicit (tsan_fcode);
> +		      break;
> +		    }
> +		if (decl == NULL_TREE)
> +		  return;
> +	      }
> +	    last_arg = gimple_call_arg (stmt, num - 1);
> +	    if (!tree_fits_uhwi_p (last_arg)
> +		|| memmodel_base (tree_to_uhwi (last_arg)) >= MEMMODEL_LAST)
> +	      return;
> +	    t = TYPE_ARG_TYPES (TREE_TYPE (decl));
> +	    t = TREE_VALUE (TREE_CHAIN (t));
> +	    if (lookup_stmt_eh_lp (stmt))
> +	      remove_stmt_from_eh_lp (stmt);
> +	    if (tsan_atomic_table[i].action == bool_clear)
> +	      {
> +		update_gimple_call (gsi, decl, 3, gimple_call_arg (stmt, 0),
> +				    build_int_cst (t, 0), last_arg);
> +		return;
> +	      }
> +	    t = build_int_cst (t, targetm.atomic_test_and_set_trueval);
> +	    update_gimple_call (gsi, decl, 3, gimple_call_arg (stmt, 0),
> +				t, last_arg);
> +	    stmt = gsi_stmt (*gsi);
> +	    lhs = gimple_call_lhs (stmt);
> +	    if (lhs == NULL_TREE)
> +	      return;
> +	    if (targetm.atomic_test_and_set_trueval != 1
> +		|| !useless_type_conversion_p (TREE_TYPE (lhs),
> +					       TREE_TYPE (t)))
> +	      {
> +		tree new_lhs = make_ssa_name (TREE_TYPE (t));
> +		gimple_call_set_lhs (stmt, new_lhs);
> +		if (targetm.atomic_test_and_set_trueval != 1)
> +		  g = gimple_build_assign (lhs, NE_EXPR, new_lhs,
> +					   build_int_cst (TREE_TYPE (t), 0));
> +		else
> +		  g = gimple_build_assign (lhs, NOP_EXPR, new_lhs);
> +		gsi_insert_after (gsi, g, GSI_NEW_STMT);
> +		update_stmt (stmt);
> +	      }
> +	    return;
>  	  default:
>  	    continue;
>  	  }
> @@ -706,7 +780,7 @@ instrument_func_exit (void)
>     Return true if func entry/exit should be instrumented.  */
>  
>  static bool
> -instrument_memory_accesses (void)
> +instrument_memory_accesses (unsigned int *ret)
>  {
>    basic_block bb;
>    gimple_stmt_iterator gsi;
> @@ -715,22 +789,26 @@ instrument_memory_accesses (void)
>    auto_vec<gimple *> tsan_func_exits;
>  
>    FOR_EACH_BB_FN (bb, cfun)
> -    for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
> -      {
> -	gimple *stmt = gsi_stmt (gsi);
> -	if (is_gimple_call (stmt)
> -	    && gimple_call_internal_p (stmt)
> -	    && gimple_call_internal_fn (stmt) == IFN_TSAN_FUNC_EXIT)
> -	  {
> -	    if (fentry_exit_instrument)
> -	      replace_func_exit (stmt);
> -	    else
> -	      tsan_func_exits.safe_push (stmt);
> -	    func_exit_seen = true;
> -	  }
> -	else
> -	  fentry_exit_instrument |= instrument_gimple (&gsi);
> -      }
> +    {
> +      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
> +	{
> +	  gimple *stmt = gsi_stmt (gsi);
> +	  if (is_gimple_call (stmt)
> +	      && gimple_call_internal_p (stmt)
> +	      && gimple_call_internal_fn (stmt) == IFN_TSAN_FUNC_EXIT)
> +	    {
> +	      if (fentry_exit_instrument)
> +		replace_func_exit (stmt);
> +	      else
> +		tsan_func_exits.safe_push (stmt);
> +	      func_exit_seen = true;
> +	    }
> +	  else
> +	    fentry_exit_instrument |= instrument_gimple (&gsi);
> +	}
> +      if (gimple_purge_dead_eh_edges (bb))
> +	*ret = TODO_cleanup_cfg;
> +    }
>    unsigned int i;
>    gimple *stmt;
>    FOR_EACH_VEC_ELT (tsan_func_exits, i, stmt)
> @@ -776,10 +854,11 @@ instrument_func_entry (void)
>  static unsigned
>  tsan_pass (void)
>  {
> +  unsigned int ret = 0;
>    initialize_sanitizer_builtins ();
> -  if (instrument_memory_accesses ())
> +  if (instrument_memory_accesses (&ret))
>      instrument_func_entry ();
> -  return 0;
> +  return ret;
>  }
>  
>  /* Inserts __tsan_init () into the list of CTORs.  */
> --- gcc/testsuite/c-c++-common/tsan/pr68260.c.jj	2016-09-06 15:56:27.772209548 +0200
> +++ gcc/testsuite/c-c++-common/tsan/pr68260.c	2016-09-06 16:08:23.424203133 +0200
> @@ -0,0 +1,28 @@
> +/* PR sanitizer/68260 */
> +
> +#include <pthread.h>
> +#include <stdbool.h>
> +
> +bool lock;
> +int counter;
> +
> +void *
> +tf (void *arg)
> +{
> +  (void) arg;
> +  while (__atomic_test_and_set (&lock, __ATOMIC_ACQUIRE))
> +    ;
> +  ++counter;
> +  __atomic_clear (&lock, __ATOMIC_RELEASE);
> +  return (void *) 0;
> +}
> +
> +int
> +main ()
> +{
> +  pthread_t thr;
> +  pthread_create (&thr, 0, tf, 0);
> +  tf ((void *) 0);
> +  pthread_join (thr, 0);
> +  return 0;
> +}
> --- gcc/testsuite/g++.dg/tsan/pr68260.C.jj	2016-09-06 15:58:41.118530940 +0200
> +++ gcc/testsuite/g++.dg/tsan/pr68260.C	2016-09-06 16:03:11.988121144 +0200
> @@ -0,0 +1,19 @@
> +/* PR sanitizer/68260 */
> +/* { dg-do compile } */
> +/* { dg-additional-options "-fnon-call-exceptions" } */
> +
> +int
> +foo (bool *p, int *q)
> +{
> +  try
> +    {
> +      while (__atomic_test_and_set (p, __ATOMIC_ACQUIRE))
> +	;
> +      __atomic_clear (p, __ATOMIC_RELEASE);
> +      return __sync_fetch_and_add_4 (q, 4);
> +    }
> +  catch (...)
> +    {
> +      return -1;
> +    }
> +}
> 
> 	Jakub
> 
>
Jakub Jelinek Sept. 7, 2016, 8:40 p.m. UTC | #2
On Wed, Sep 07, 2016 at 09:07:45AM +0200, Richard Biener wrote:
> > @@ -493,6 +504,8 @@ instrument_builtin_call (gimple_stmt_ite
> >  	    if (!tree_fits_uhwi_p (last_arg)
> >  		|| memmodel_base (tree_to_uhwi (last_arg)) >= MEMMODEL_LAST)
> >  	      return;
> > +	    if (lookup_stmt_eh_lp (stmt))
> > +	      remove_stmt_from_eh_lp (stmt);
> 
> These changes look bogus to me -- how can the tsan instrumentation
> function _not_ throw when the original function we replace it can?

The __tsan*atomic* functions are right now declared to be nothrow, so the
patch just matches how they are declared.
While the sync-builtins.def use
#define ATTR_NOTHROWCALL_LEAF_LIST (flag_non_call_exceptions ? \
        ATTR_LEAF_LIST : ATTR_NOTHROW_LEAF_LIST)
I guess I could use the same for the tsan atomics, but wonder if it will work
properly when the libtsan is built with exceptions disabled and without
-fnon-call-exceptions.  Perhaps it would, at least if it is built with
-fasynchronous-unwind-tables (which is the case for x86_64 and aarch64 and
tsan isn't supported elsewhere).
> 
> It seems you are just avoiding the ICEs for now "wrong-code".  (and
> how does this relate to -fnon-call-exceptions as both are calls?)
> 
> The instrument_expr case seems to leave the original stmt in-place
> (and thus the EH).

Those are different.  For loads and stores there is just added call that
logs in the load or store, but for atomics the atomics are done inside of
the library and the library tracks everything.

	Jakub
Jakub Jelinek Sept. 8, 2016, 12:34 p.m. UTC | #3
On Wed, Sep 07, 2016 at 10:40:20PM +0200, Jakub Jelinek wrote:
> On Wed, Sep 07, 2016 at 09:07:45AM +0200, Richard Biener wrote:
> > > @@ -493,6 +504,8 @@ instrument_builtin_call (gimple_stmt_ite
> > >  	    if (!tree_fits_uhwi_p (last_arg)
> > >  		|| memmodel_base (tree_to_uhwi (last_arg)) >= MEMMODEL_LAST)
> > >  	      return;
> > > +	    if (lookup_stmt_eh_lp (stmt))
> > > +	      remove_stmt_from_eh_lp (stmt);
> > 
> > These changes look bogus to me -- how can the tsan instrumentation
> > function _not_ throw when the original function we replace it can?
> 
> The __tsan*atomic* functions are right now declared to be nothrow, so the
> patch just matches how they are declared.
> While the sync-builtins.def use
> #define ATTR_NOTHROWCALL_LEAF_LIST (flag_non_call_exceptions ? \
>         ATTR_LEAF_LIST : ATTR_NOTHROW_LEAF_LIST)
> I guess I could use the same for the tsan atomics, but wonder if it will work
> properly when the libtsan is built with exceptions disabled and without
> -fnon-call-exceptions.  Perhaps it would, at least if it is built with
> -fasynchronous-unwind-tables (which is the case for x86_64 and aarch64 and
> tsan isn't supported elsewhere).

Actually, looking at the libsanitizer/tsan/ implementation, I have doubts
the atomics handling code would behave reasonably if an exception is thrown
during the atomic memory access, I'm afraid some classes wouldn't be
destructed.

I can split the patch into two, one dealing just with the
__atomic_clear/__atomic_test_and_set instrumentation and another for the
preexisting -fnon-call-exceptions ICEs.  For the latter, one possibility
would be to error out on the -fsanitize=thread -fnon-call-exceptions
combination.

	Jakub
diff mbox

Patch

--- gcc/tsan.c.jj	2016-07-11 22:18:08.000000000 +0200
+++ gcc/tsan.c	2016-09-06 14:06:21.193642590 +0200
@@ -40,6 +40,8 @@  along with GCC; see the file COPYING3.
 #include "tsan.h"
 #include "asan.h"
 #include "builtins.h"
+#include "target.h"
+#include "tree-eh.h"
 
 /* Number of instrumented memory accesses in the current function.  */
 
@@ -240,7 +242,8 @@  instrument_expr (gimple_stmt_iterator gs
 enum tsan_atomic_action
 {
   check_last, add_seq_cst, add_acquire, weak_cas, strong_cas,
-  bool_cas, val_cas, lock_release, fetch_op, fetch_op_seq_cst
+  bool_cas, val_cas, lock_release, fetch_op, fetch_op_seq_cst,
+  bool_clear, bool_test_and_set
 };
 
 /* Table how to map sync/atomic builtins to their corresponding
@@ -274,6 +277,10 @@  static const struct tsan_map_atomic
   TRANSFORM (fcode, tsan_fcode, fetch_op, code)
 #define FETCH_OPS(fcode, tsan_fcode, code) \
   TRANSFORM (fcode, tsan_fcode, fetch_op_seq_cst, code)
+#define BOOL_CLEAR(fcode, tsan_fcode) \
+  TRANSFORM (fcode, tsan_fcode, bool_clear, ERROR_MARK)
+#define BOOL_TEST_AND_SET(fcode, tsan_fcode) \
+  TRANSFORM (fcode, tsan_fcode, bool_test_and_set, ERROR_MARK)
 
   CHECK_LAST (ATOMIC_LOAD_1, TSAN_ATOMIC8_LOAD),
   CHECK_LAST (ATOMIC_LOAD_2, TSAN_ATOMIC16_LOAD),
@@ -463,7 +470,11 @@  static const struct tsan_map_atomic
   LOCK_RELEASE (SYNC_LOCK_RELEASE_2, TSAN_ATOMIC16_STORE),
   LOCK_RELEASE (SYNC_LOCK_RELEASE_4, TSAN_ATOMIC32_STORE),
   LOCK_RELEASE (SYNC_LOCK_RELEASE_8, TSAN_ATOMIC64_STORE),
-  LOCK_RELEASE (SYNC_LOCK_RELEASE_16, TSAN_ATOMIC128_STORE)
+  LOCK_RELEASE (SYNC_LOCK_RELEASE_16, TSAN_ATOMIC128_STORE),
+
+  BOOL_CLEAR (ATOMIC_CLEAR, TSAN_ATOMIC8_STORE),
+
+  BOOL_TEST_AND_SET (ATOMIC_TEST_AND_SET, TSAN_ATOMIC8_EXCHANGE)
 };
 
 /* Instrument an atomic builtin.  */
@@ -493,6 +504,8 @@  instrument_builtin_call (gimple_stmt_ite
 	    if (!tree_fits_uhwi_p (last_arg)
 		|| memmodel_base (tree_to_uhwi (last_arg)) >= MEMMODEL_LAST)
 	      return;
+	    if (lookup_stmt_eh_lp (stmt))
+	      remove_stmt_from_eh_lp (stmt);
 	    gimple_call_set_fndecl (stmt, decl);
 	    update_stmt (stmt);
 	    if (tsan_atomic_table[i].action == fetch_op)
@@ -514,6 +527,8 @@  instrument_builtin_call (gimple_stmt_ite
 				       != add_acquire
 				       ? MEMMODEL_SEQ_CST
 				       : MEMMODEL_ACQUIRE);
+	    if (lookup_stmt_eh_lp (stmt))
+	      remove_stmt_from_eh_lp (stmt);
 	    update_gimple_call (gsi, decl, num + 1, args[0], args[1], args[2]);
 	    stmt = gsi_stmt (*gsi);
 	    if (tsan_atomic_table[i].action == fetch_op_seq_cst)
@@ -561,6 +576,8 @@  instrument_builtin_call (gimple_stmt_ite
 	    if (!tree_fits_uhwi_p (args[5])
 		|| memmodel_base (tree_to_uhwi (args[5])) >= MEMMODEL_LAST)
 	      return;
+	    if (lookup_stmt_eh_lp (stmt))
+	      remove_stmt_from_eh_lp (stmt);
 	    update_gimple_call (gsi, decl, 5, args[0], args[1], args[2],
 				args[4], args[5]);
 	    return;
@@ -584,6 +601,8 @@  instrument_builtin_call (gimple_stmt_ite
 	    g = gimple_build_assign (t, args[1]);
 	    gsi_insert_before (gsi, g, GSI_SAME_STMT);
 	    lhs = gimple_call_lhs (stmt);
+	    if (lookup_stmt_eh_lp (stmt))
+	      remove_stmt_from_eh_lp (stmt);
 	    update_gimple_call (gsi, decl, 5, args[0],
 				build_fold_addr_expr (t), args[2],
 				build_int_cst (NULL_TREE,
@@ -610,11 +629,66 @@  instrument_builtin_call (gimple_stmt_ite
 	    gcc_assert (num == 1);
 	    t = TYPE_ARG_TYPES (TREE_TYPE (decl));
 	    t = TREE_VALUE (TREE_CHAIN (t));
+	    if (lookup_stmt_eh_lp (stmt))
+	      remove_stmt_from_eh_lp (stmt);
 	    update_gimple_call (gsi, decl, 3, gimple_call_arg (stmt, 0),
 				build_int_cst (t, 0),
 				build_int_cst (NULL_TREE,
 					       MEMMODEL_RELEASE));
 	    return;
+	  case bool_clear:
+	  case bool_test_and_set:
+	    if (BOOL_TYPE_SIZE != 8)
+	      {
+		decl = NULL_TREE;
+		for (j = 1; j < 5; j++)
+		  if (BOOL_TYPE_SIZE == (8 << j))
+		    {
+		      enum built_in_function tsan_fcode
+			= (enum built_in_function)
+			  (tsan_atomic_table[i].tsan_fcode + j);
+		      decl = builtin_decl_implicit (tsan_fcode);
+		      break;
+		    }
+		if (decl == NULL_TREE)
+		  return;
+	      }
+	    last_arg = gimple_call_arg (stmt, num - 1);
+	    if (!tree_fits_uhwi_p (last_arg)
+		|| memmodel_base (tree_to_uhwi (last_arg)) >= MEMMODEL_LAST)
+	      return;
+	    t = TYPE_ARG_TYPES (TREE_TYPE (decl));
+	    t = TREE_VALUE (TREE_CHAIN (t));
+	    if (lookup_stmt_eh_lp (stmt))
+	      remove_stmt_from_eh_lp (stmt);
+	    if (tsan_atomic_table[i].action == bool_clear)
+	      {
+		update_gimple_call (gsi, decl, 3, gimple_call_arg (stmt, 0),
+				    build_int_cst (t, 0), last_arg);
+		return;
+	      }
+	    t = build_int_cst (t, targetm.atomic_test_and_set_trueval);
+	    update_gimple_call (gsi, decl, 3, gimple_call_arg (stmt, 0),
+				t, last_arg);
+	    stmt = gsi_stmt (*gsi);
+	    lhs = gimple_call_lhs (stmt);
+	    if (lhs == NULL_TREE)
+	      return;
+	    if (targetm.atomic_test_and_set_trueval != 1
+		|| !useless_type_conversion_p (TREE_TYPE (lhs),
+					       TREE_TYPE (t)))
+	      {
+		tree new_lhs = make_ssa_name (TREE_TYPE (t));
+		gimple_call_set_lhs (stmt, new_lhs);
+		if (targetm.atomic_test_and_set_trueval != 1)
+		  g = gimple_build_assign (lhs, NE_EXPR, new_lhs,
+					   build_int_cst (TREE_TYPE (t), 0));
+		else
+		  g = gimple_build_assign (lhs, NOP_EXPR, new_lhs);
+		gsi_insert_after (gsi, g, GSI_NEW_STMT);
+		update_stmt (stmt);
+	      }
+	    return;
 	  default:
 	    continue;
 	  }
@@ -706,7 +780,7 @@  instrument_func_exit (void)
    Return true if func entry/exit should be instrumented.  */
 
 static bool
-instrument_memory_accesses (void)
+instrument_memory_accesses (unsigned int *ret)
 {
   basic_block bb;
   gimple_stmt_iterator gsi;
@@ -715,22 +789,26 @@  instrument_memory_accesses (void)
   auto_vec<gimple *> tsan_func_exits;
 
   FOR_EACH_BB_FN (bb, cfun)
-    for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
-      {
-	gimple *stmt = gsi_stmt (gsi);
-	if (is_gimple_call (stmt)
-	    && gimple_call_internal_p (stmt)
-	    && gimple_call_internal_fn (stmt) == IFN_TSAN_FUNC_EXIT)
-	  {
-	    if (fentry_exit_instrument)
-	      replace_func_exit (stmt);
-	    else
-	      tsan_func_exits.safe_push (stmt);
-	    func_exit_seen = true;
-	  }
-	else
-	  fentry_exit_instrument |= instrument_gimple (&gsi);
-      }
+    {
+      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+	{
+	  gimple *stmt = gsi_stmt (gsi);
+	  if (is_gimple_call (stmt)
+	      && gimple_call_internal_p (stmt)
+	      && gimple_call_internal_fn (stmt) == IFN_TSAN_FUNC_EXIT)
+	    {
+	      if (fentry_exit_instrument)
+		replace_func_exit (stmt);
+	      else
+		tsan_func_exits.safe_push (stmt);
+	      func_exit_seen = true;
+	    }
+	  else
+	    fentry_exit_instrument |= instrument_gimple (&gsi);
+	}
+      if (gimple_purge_dead_eh_edges (bb))
+	*ret = TODO_cleanup_cfg;
+    }
   unsigned int i;
   gimple *stmt;
   FOR_EACH_VEC_ELT (tsan_func_exits, i, stmt)
@@ -776,10 +854,11 @@  instrument_func_entry (void)
 static unsigned
 tsan_pass (void)
 {
+  unsigned int ret = 0;
   initialize_sanitizer_builtins ();
-  if (instrument_memory_accesses ())
+  if (instrument_memory_accesses (&ret))
     instrument_func_entry ();
-  return 0;
+  return ret;
 }
 
 /* Inserts __tsan_init () into the list of CTORs.  */
--- gcc/testsuite/c-c++-common/tsan/pr68260.c.jj	2016-09-06 15:56:27.772209548 +0200
+++ gcc/testsuite/c-c++-common/tsan/pr68260.c	2016-09-06 16:08:23.424203133 +0200
@@ -0,0 +1,28 @@ 
+/* PR sanitizer/68260 */
+
+#include <pthread.h>
+#include <stdbool.h>
+
+bool lock;
+int counter;
+
+void *
+tf (void *arg)
+{
+  (void) arg;
+  while (__atomic_test_and_set (&lock, __ATOMIC_ACQUIRE))
+    ;
+  ++counter;
+  __atomic_clear (&lock, __ATOMIC_RELEASE);
+  return (void *) 0;
+}
+
+int
+main ()
+{
+  pthread_t thr;
+  pthread_create (&thr, 0, tf, 0);
+  tf ((void *) 0);
+  pthread_join (thr, 0);
+  return 0;
+}
--- gcc/testsuite/g++.dg/tsan/pr68260.C.jj	2016-09-06 15:58:41.118530940 +0200
+++ gcc/testsuite/g++.dg/tsan/pr68260.C	2016-09-06 16:03:11.988121144 +0200
@@ -0,0 +1,19 @@ 
+/* PR sanitizer/68260 */
+/* { dg-do compile } */
+/* { dg-additional-options "-fnon-call-exceptions" } */
+
+int
+foo (bool *p, int *q)
+{
+  try
+    {
+      while (__atomic_test_and_set (p, __ATOMIC_ACQUIRE))
+	;
+      __atomic_clear (p, __ATOMIC_RELEASE);
+      return __sync_fetch_and_add_4 (q, 4);
+    }
+  catch (...)
+    {
+      return -1;
+    }
+}