Message ID | 20150113162223.GF1405@tucnak.redhat.com |
---|---|
State | New |
Headers | show |
On Tue, 13 Jan 2015 17:22:23, Jakub Jelinek wrote: > > > So do you mean following? I've bootstrapped/regtested on x86_64-linux and > i686-linux, but haven't tried any special testcases on it. If it works for > you, I'll commit it. > OK. Thanks Bernd. > 2015-01-13 Jakub Jelinek <jakub@redhat.com> > > * sanitizer_common/sanitizer_deadlock_detector.h: Cherry pick > upstream r224518 and r224519. > * tsan/tsan_rtl_thread.cc: Cherry pick upstream r224702 and > r224834. > > --- libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h.jj 2014-05-22 10:18:14.893626024 +0200 > +++ libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h 2015-01-13 14:07:40.096414924 +0100 > @@ -48,6 +48,8 @@ class DeadlockDetectorTLS { > if (epoch_ == current_epoch) return; > bv_.clear(); > epoch_ = current_epoch; > + n_recursive_locks = 0; > + n_all_locks_ = 0; > } > > uptr getEpoch() const { return epoch_; } > @@ -81,7 +83,8 @@ class DeadlockDetectorTLS { > } > } > // Printf("remLock: %zx %zx\n", lock_id, epoch_); > - CHECK(bv_.clearBit(lock_id)); > + if (!bv_.clearBit(lock_id)) > + return; // probably addLock happened before flush > if (n_all_locks_) { > for (sptr i = n_all_locks_ - 1; i>= 0; i--) { > if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) { > @@ -173,6 +176,7 @@ class DeadlockDetector { > recycled_nodes_.clear(); > available_nodes_.setAll(); > g_.clear(); > + n_edges_ = 0; > return getAvailableNode(data); > } > > --- libsanitizer/tsan/tsan_rtl_thread.cc.jj 2014-09-24 11:08:03.824028080 +0200 > +++ libsanitizer/tsan/tsan_rtl_thread.cc 2015-01-13 14:08:06.167954292 +0100 > @@ -109,12 +109,13 @@ void ThreadContext::OnStarted(void *arg) > thr->dd_pt = ctx->dd->CreatePhysicalThread(); > thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id); > } > + thr->fast_state.SetHistorySize(flags()->history_size); > + // Commit switch to the new part of the trace. > + // TraceAddEvent will reset stack0/mset0 in the new part for us. > + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); > + > thr->fast_synch_epoch = epoch0; > AcquireImpl(thr, 0, &sync); > - thr->fast_state.SetHistorySize(flags()->history_size); > - const uptr trace = (epoch0 / kTracePartSize) % TraceParts(); > - Trace *thr_trace = ThreadTrace(thr->tid); > - thr_trace->headers[trace].epoch0 = epoch0; > StatInc(thr, StatSyncAcquire); > sync.Reset(&thr->clock_cache); > DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx " > > > Jakub
--- libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h.jj 2014-05-22 10:18:14.893626024 +0200 +++ libsanitizer/sanitizer_common/sanitizer_deadlock_detector.h 2015-01-13 14:07:40.096414924 +0100 @@ -48,6 +48,8 @@ class DeadlockDetectorTLS { if (epoch_ == current_epoch) return; bv_.clear(); epoch_ = current_epoch; + n_recursive_locks = 0; + n_all_locks_ = 0; } uptr getEpoch() const { return epoch_; } @@ -81,7 +83,8 @@ class DeadlockDetectorTLS { } } // Printf("remLock: %zx %zx\n", lock_id, epoch_); - CHECK(bv_.clearBit(lock_id)); + if (!bv_.clearBit(lock_id)) + return; // probably addLock happened before flush if (n_all_locks_) { for (sptr i = n_all_locks_ - 1; i >= 0; i--) { if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) { @@ -173,6 +176,7 @@ class DeadlockDetector { recycled_nodes_.clear(); available_nodes_.setAll(); g_.clear(); + n_edges_ = 0; return getAvailableNode(data); } --- libsanitizer/tsan/tsan_rtl_thread.cc.jj 2014-09-24 11:08:03.824028080 +0200 +++ libsanitizer/tsan/tsan_rtl_thread.cc 2015-01-13 14:08:06.167954292 +0100 @@ -109,12 +109,13 @@ void ThreadContext::OnStarted(void *arg) thr->dd_pt = ctx->dd->CreatePhysicalThread(); thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id); } + thr->fast_state.SetHistorySize(flags()->history_size); + // Commit switch to the new part of the trace. + // TraceAddEvent will reset stack0/mset0 in the new part for us. + TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0); + thr->fast_synch_epoch = epoch0; AcquireImpl(thr, 0, &sync); - thr->fast_state.SetHistorySize(flags()->history_size); - const uptr trace = (epoch0 / kTracePartSize) % TraceParts(); - Trace *thr_trace = ThreadTrace(thr->tid); - thr_trace->headers[trace].epoch0 = epoch0; StatInc(thr, StatSyncAcquire); sync.Reset(&thr->clock_cache); DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "