| 1 | //===-- tsan_rtl_report.cpp -----------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file is a part of ThreadSanitizer (TSan), a race detector. |
| 10 | // |
| 11 | //===----------------------------------------------------------------------===// |
| 12 | |
| 13 | #include "sanitizer_common/sanitizer_common.h" |
| 14 | #include "sanitizer_common/sanitizer_internal_defs.h" |
| 15 | #include "sanitizer_common/sanitizer_libc.h" |
| 16 | #include "sanitizer_common/sanitizer_placement_new.h" |
| 17 | #include "sanitizer_common/sanitizer_stackdepot.h" |
| 18 | #include "sanitizer_common/sanitizer_stacktrace.h" |
| 19 | #include "tsan_defs.h" |
| 20 | #include "tsan_fd.h" |
| 21 | #include "tsan_flags.h" |
| 22 | #include "tsan_mman.h" |
| 23 | #include "tsan_platform.h" |
| 24 | #include "tsan_report.h" |
| 25 | #include "tsan_rtl.h" |
| 26 | #include "tsan_suppressions.h" |
| 27 | #include "tsan_symbolize.h" |
| 28 | #include "tsan_sync.h" |
| 29 | |
| 30 | namespace __tsan { |
| 31 | |
| 32 | using namespace __sanitizer; |
| 33 | |
| 34 | static ReportStack *SymbolizeStack(StackTrace trace); |
| 35 | |
| 36 | // Can be overriden by an application/test to intercept reports. |
| 37 | #ifdef TSAN_EXTERNAL_HOOKS |
| 38 | bool OnReport(const ReportDesc *rep, bool suppressed); |
| 39 | #else |
| 40 | SANITIZER_WEAK_CXX_DEFAULT_IMPL |
| 41 | bool OnReport(const ReportDesc *rep, bool suppressed) { |
| 42 | (void)rep; |
| 43 | return suppressed; |
| 44 | } |
| 45 | #endif |
| 46 | |
| 47 | SANITIZER_WEAK_DEFAULT_IMPL |
| 48 | void __tsan_on_report(const ReportDesc *rep) { |
| 49 | (void)rep; |
| 50 | } |
| 51 | |
| 52 | static void StackStripMain(SymbolizedStack *frames) { |
| 53 | SymbolizedStack *last_frame = nullptr; |
| 54 | SymbolizedStack *last_frame2 = nullptr; |
| 55 | for (SymbolizedStack *cur = frames; cur; cur = cur->next) { |
| 56 | last_frame2 = last_frame; |
| 57 | last_frame = cur; |
| 58 | } |
| 59 | |
| 60 | if (last_frame2 == 0) |
| 61 | return; |
| 62 | #if !SANITIZER_GO |
| 63 | const char *last = last_frame->info.function; |
| 64 | const char *last2 = last_frame2->info.function; |
| 65 | // Strip frame above 'main' |
| 66 | if (last2 && 0 == internal_strcmp(s1: last2, s2: "main" )) { |
| 67 | last_frame->ClearAll(); |
| 68 | last_frame2->next = nullptr; |
| 69 | // Strip our internal thread start routine. |
| 70 | } else if (last && 0 == internal_strcmp(s1: last, s2: "__tsan_thread_start_func" )) { |
| 71 | last_frame->ClearAll(); |
| 72 | last_frame2->next = nullptr; |
| 73 | // Strip global ctors init, .preinit_array and main caller. |
| 74 | } else if (last && (0 == internal_strcmp(s1: last, s2: "__do_global_ctors_aux" ) || |
| 75 | 0 == internal_strcmp(s1: last, s2: "__libc_csu_init" ) || |
| 76 | 0 == internal_strcmp(s1: last, s2: "__libc_start_main" ))) { |
| 77 | last_frame->ClearAll(); |
| 78 | last_frame2->next = nullptr; |
| 79 | // If both are 0, then we probably just failed to symbolize. |
| 80 | } else if (last || last2) { |
| 81 | // Ensure that we recovered stack completely. Trimmed stack |
| 82 | // can actually happen if we do not instrument some code, |
| 83 | // so it's only a debug print. However we must try hard to not miss it |
| 84 | // due to our fault. |
| 85 | DPrintf("Bottom stack frame is missed\n" ); |
| 86 | } |
| 87 | #else |
| 88 | // The last frame always point into runtime (gosched0, goexit0, runtime.main). |
| 89 | last_frame->ClearAll(); |
| 90 | last_frame2->next = nullptr; |
| 91 | #endif |
| 92 | } |
| 93 | |
| 94 | ReportStack *SymbolizeStackId(u32 stack_id) { |
| 95 | if (stack_id == 0) |
| 96 | return 0; |
| 97 | StackTrace stack = StackDepotGet(id: stack_id); |
| 98 | if (stack.trace == nullptr) |
| 99 | return nullptr; |
| 100 | return SymbolizeStack(trace: stack); |
| 101 | } |
| 102 | |
| 103 | static ReportStack *SymbolizeStack(StackTrace trace) { |
| 104 | if (trace.size == 0) |
| 105 | return 0; |
| 106 | SymbolizedStack *top = nullptr; |
| 107 | for (uptr si = 0; si < trace.size; si++) { |
| 108 | const uptr pc = trace.trace[si]; |
| 109 | uptr pc1 = pc; |
| 110 | // We obtain the return address, but we're interested in the previous |
| 111 | // instruction. |
| 112 | if ((pc & kExternalPCBit) == 0) |
| 113 | pc1 = StackTrace::GetPreviousInstructionPc(pc); |
| 114 | SymbolizedStack* ent = SymbolizeCode(addr: pc1, leaf: si == trace.size - 1); |
| 115 | #if SANITIZER_GO |
| 116 | if (ent == nullptr) { |
| 117 | // Go might have 0 frames for this PC (wrapper frames aren't reported). |
| 118 | continue; |
| 119 | } |
| 120 | #endif |
| 121 | CHECK_NE(ent, 0); |
| 122 | SymbolizedStack *last = ent; |
| 123 | while (last->next) { |
| 124 | last->info.address = pc; // restore original pc for report |
| 125 | last = last->next; |
| 126 | } |
| 127 | last->info.address = pc; // restore original pc for report |
| 128 | last->next = top; |
| 129 | top = ent; |
| 130 | } |
| 131 | StackStripMain(frames: top); |
| 132 | |
| 133 | auto *stack = New<ReportStack>(); |
| 134 | stack->frames = top; |
| 135 | return stack; |
| 136 | } |
| 137 | |
| 138 | bool ShouldReport(ThreadState *thr, ReportType typ) { |
| 139 | // We set thr->suppress_reports in the fork context. |
| 140 | // Taking any locking in the fork context can lead to deadlocks. |
| 141 | // If any locks are already taken, it's too late to do this check. |
| 142 | CheckedMutex::CheckNoLocks(); |
| 143 | // For the same reason check we didn't lock thread_registry yet. |
| 144 | if (SANITIZER_DEBUG) |
| 145 | ThreadRegistryLock l(&ctx->thread_registry); |
| 146 | if (!flags()->report_bugs || thr->suppress_reports) |
| 147 | return false; |
| 148 | switch (typ) { |
| 149 | case ReportTypeSignalUnsafe: |
| 150 | return flags()->report_signal_unsafe; |
| 151 | case ReportTypeThreadLeak: |
| 152 | #if !SANITIZER_GO |
| 153 | // It's impossible to join phantom threads |
| 154 | // in the child after fork. |
| 155 | if (ctx->after_multithreaded_fork) |
| 156 | return false; |
| 157 | #endif |
| 158 | return flags()->report_thread_leaks; |
| 159 | case ReportTypeMutexDestroyLocked: |
| 160 | return flags()->report_destroy_locked; |
| 161 | default: |
| 162 | return true; |
| 163 | } |
| 164 | } |
| 165 | |
| 166 | ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) { |
| 167 | ctx->thread_registry.CheckLocked(); |
| 168 | rep_ = New<ReportDesc>(); |
| 169 | rep_->typ = typ; |
| 170 | rep_->tag = tag; |
| 171 | ctx->report_mtx.Lock(); |
| 172 | } |
| 173 | |
| 174 | ScopedReportBase::~ScopedReportBase() { |
| 175 | ctx->report_mtx.Unlock(); |
| 176 | DestroyAndFree(p&: rep_); |
| 177 | } |
| 178 | |
| 179 | void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) { |
| 180 | ReportStack **rs = rep_->stacks.PushBack(); |
| 181 | *rs = SymbolizeStack(trace: stack); |
| 182 | (*rs)->suppressable = suppressable; |
| 183 | } |
| 184 | |
| 185 | void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, |
| 186 | Tid tid, StackTrace stack, |
| 187 | const MutexSet *mset) { |
| 188 | uptr addr0, size; |
| 189 | AccessType typ; |
| 190 | s.GetAccess(addr: &addr0, size: &size, typ: &typ); |
| 191 | auto *mop = New<ReportMop>(); |
| 192 | rep_->mops.PushBack(v: mop); |
| 193 | mop->tid = tid; |
| 194 | mop->addr = addr + addr0; |
| 195 | mop->size = size; |
| 196 | mop->write = !(typ & kAccessRead); |
| 197 | mop->atomic = typ & kAccessAtomic; |
| 198 | mop->external_tag = external_tag; |
| 199 | mop->stack_trace = stack; |
| 200 | for (uptr i = 0; i < mset->Size(); i++) { |
| 201 | MutexSet::Desc d = mset->Get(i); |
| 202 | int id = this->AddMutex(addr: d.addr, creation_stack_id: d.stack_id); |
| 203 | ReportMopMutex mtx = {.id: id, .write: d.write}; |
| 204 | mop->mset.PushBack(v: mtx); |
| 205 | } |
| 206 | } |
| 207 | |
| 208 | void ScopedReportBase::SymbolizeStackElems() { |
| 209 | // symbolize memory ops |
| 210 | for (usize i = 0, size = rep_->mops.Size(); i < size; i++) { |
| 211 | ReportMop *mop = rep_->mops[i]; |
| 212 | mop->stack = SymbolizeStack(trace: mop->stack_trace); |
| 213 | if (mop->stack) |
| 214 | mop->stack->suppressable = true; |
| 215 | } |
| 216 | |
| 217 | // symbolize locations |
| 218 | for (usize i = 0, size = rep_->locs.Size(); i < size; i++) { |
| 219 | // added locations have a NULL placeholder - don't dereference them |
| 220 | if (ReportLocation *loc = rep_->locs[i]) |
| 221 | loc->stack = SymbolizeStackId(stack_id: loc->stack_id); |
| 222 | } |
| 223 | |
| 224 | // symbolize any added locations |
| 225 | for (usize i = 0, size = rep_->added_location_addrs.Size(); i < size; i++) { |
| 226 | AddedLocationAddr *added_loc = &rep_->added_location_addrs[i]; |
| 227 | if (ReportLocation *loc = SymbolizeData(addr: added_loc->addr)) { |
| 228 | loc->suppressable = true; |
| 229 | rep_->locs[added_loc->locs_idx] = loc; |
| 230 | } |
| 231 | } |
| 232 | |
| 233 | // Filter out any added location placeholders that could not be symbolized |
| 234 | usize j = 0; |
| 235 | for (usize i = 0, size = rep_->locs.Size(); i < size; i++) { |
| 236 | if (rep_->locs[i] != nullptr) { |
| 237 | rep_->locs[j] = rep_->locs[i]; |
| 238 | j++; |
| 239 | } |
| 240 | } |
| 241 | rep_->locs.Resize(size: j); |
| 242 | |
| 243 | // symbolize threads |
| 244 | for (usize i = 0, size = rep_->threads.Size(); i < size; i++) { |
| 245 | ReportThread *rt = rep_->threads[i]; |
| 246 | rt->stack = SymbolizeStackId(stack_id: rt->stack_id); |
| 247 | if (rt->stack) |
| 248 | rt->stack->suppressable = rt->suppressable; |
| 249 | } |
| 250 | |
| 251 | // symbolize mutexes |
| 252 | for (usize i = 0, size = rep_->mutexes.Size(); i < size; i++) { |
| 253 | ReportMutex *rm = rep_->mutexes[i]; |
| 254 | rm->stack = SymbolizeStackId(stack_id: rm->stack_id); |
| 255 | } |
| 256 | } |
| 257 | |
| 258 | void ScopedReportBase::AddUniqueTid(Tid unique_tid) { |
| 259 | rep_->unique_tids.PushBack(v: unique_tid); |
| 260 | } |
| 261 | |
| 262 | void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) { |
| 263 | for (uptr i = 0; i < rep_->threads.Size(); i++) { |
| 264 | if ((u32)rep_->threads[i]->id == tctx->tid) |
| 265 | return; |
| 266 | } |
| 267 | auto *rt = New<ReportThread>(); |
| 268 | rep_->threads.PushBack(v: rt); |
| 269 | rt->id = tctx->tid; |
| 270 | rt->os_id = tctx->os_id; |
| 271 | rt->running = (tctx->status == ThreadStatusRunning); |
| 272 | rt->name = internal_strdup(s: tctx->name); |
| 273 | rt->parent_tid = tctx->parent_tid; |
| 274 | rt->thread_type = tctx->thread_type; |
| 275 | rt->stack_id = tctx->creation_stack_id; |
| 276 | rt->suppressable = suppressable; |
| 277 | } |
| 278 | |
| 279 | #if !SANITIZER_GO |
| 280 | static ThreadContext *FindThreadByTidLocked(Tid tid) { |
| 281 | ctx->thread_registry.CheckLocked(); |
| 282 | return static_cast<ThreadContext *>( |
| 283 | ctx->thread_registry.GetThreadLocked(tid)); |
| 284 | } |
| 285 | |
| 286 | static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) { |
| 287 | uptr addr = (uptr)arg; |
| 288 | ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); |
| 289 | if (tctx->status != ThreadStatusRunning) |
| 290 | return false; |
| 291 | ThreadState *thr = tctx->thr; |
| 292 | CHECK(thr); |
| 293 | return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) || |
| 294 | (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size)); |
| 295 | } |
| 296 | |
| 297 | ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { |
| 298 | ctx->thread_registry.CheckLocked(); |
| 299 | ThreadContext *tctx = |
| 300 | static_cast<ThreadContext *>(ctx->thread_registry.FindThreadContextLocked( |
| 301 | cb: IsInStackOrTls, arg: (void *)addr)); |
| 302 | if (!tctx) |
| 303 | return 0; |
| 304 | ThreadState *thr = tctx->thr; |
| 305 | CHECK(thr); |
| 306 | *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size); |
| 307 | return tctx; |
| 308 | } |
| 309 | #endif |
| 310 | |
| 311 | void ScopedReportBase::AddThread(Tid tid, bool suppressable) { |
| 312 | #if !SANITIZER_GO |
| 313 | if (const ThreadContext *tctx = FindThreadByTidLocked(tid)) |
| 314 | AddThread(tctx, suppressable); |
| 315 | #endif |
| 316 | } |
| 317 | |
| 318 | int ScopedReportBase::AddMutex(uptr addr, StackID creation_stack_id) { |
| 319 | for (uptr i = 0; i < rep_->mutexes.Size(); i++) { |
| 320 | if (rep_->mutexes[i]->addr == addr) |
| 321 | return rep_->mutexes[i]->id; |
| 322 | } |
| 323 | auto *rm = New<ReportMutex>(); |
| 324 | rep_->mutexes.PushBack(v: rm); |
| 325 | rm->id = rep_->mutexes.Size() - 1; |
| 326 | rm->addr = addr; |
| 327 | rm->stack_id = creation_stack_id; |
| 328 | return rm->id; |
| 329 | } |
| 330 | |
| 331 | void ScopedReportBase::AddLocation(uptr addr, uptr size) { |
| 332 | if (addr == 0) |
| 333 | return; |
| 334 | #if !SANITIZER_GO |
| 335 | int fd = -1; |
| 336 | Tid creat_tid = kInvalidTid; |
| 337 | StackID creat_stack = 0; |
| 338 | bool closed = false; |
| 339 | if (FdLocation(addr, fd: &fd, tid: &creat_tid, stack: &creat_stack, closed: &closed)) { |
| 340 | auto *loc = New<ReportLocation>(); |
| 341 | loc->type = ReportLocationFD; |
| 342 | loc->fd_closed = closed; |
| 343 | loc->fd = fd; |
| 344 | loc->tid = creat_tid; |
| 345 | loc->stack_id = creat_stack; |
| 346 | rep_->locs.PushBack(v: loc); |
| 347 | AddThread(tid: creat_tid); |
| 348 | return; |
| 349 | } |
| 350 | MBlock *b = 0; |
| 351 | uptr block_begin = 0; |
| 352 | Allocator *a = allocator(); |
| 353 | if (a->PointerIsMine(p: (void*)addr)) { |
| 354 | block_begin = (uptr)a->GetBlockBegin(p: (void *)addr); |
| 355 | if (block_begin) |
| 356 | b = ctx->metamap.GetBlock(p: block_begin); |
| 357 | } |
| 358 | if (!b) |
| 359 | b = JavaHeapBlock(addr, start: &block_begin); |
| 360 | if (b != 0) { |
| 361 | auto *loc = New<ReportLocation>(); |
| 362 | loc->type = ReportLocationHeap; |
| 363 | loc->heap_chunk_start = block_begin; |
| 364 | loc->heap_chunk_size = b->siz; |
| 365 | loc->external_tag = b->tag; |
| 366 | loc->tid = b->tid; |
| 367 | loc->stack_id = b->stk; |
| 368 | rep_->locs.PushBack(v: loc); |
| 369 | AddThread(tid: b->tid); |
| 370 | return; |
| 371 | } |
| 372 | bool is_stack = false; |
| 373 | if (ThreadContext *tctx = IsThreadStackOrTls(addr, is_stack: &is_stack)) { |
| 374 | auto *loc = New<ReportLocation>(); |
| 375 | loc->type = is_stack ? ReportLocationStack : ReportLocationTLS; |
| 376 | loc->tid = tctx->tid; |
| 377 | rep_->locs.PushBack(v: loc); |
| 378 | AddThread(tctx); |
| 379 | } |
| 380 | #endif |
| 381 | rep_->added_location_addrs.PushBack(v: {.addr: addr, .locs_idx: rep_->locs.Size()}); |
| 382 | rep_->locs.PushBack(v: nullptr); |
| 383 | } |
| 384 | |
| 385 | #if !SANITIZER_GO |
| 386 | void ScopedReportBase::AddSleep(StackID stack_id) { |
| 387 | rep_->sleep = SymbolizeStackId(stack_id); |
| 388 | } |
| 389 | #endif |
| 390 | |
| 391 | void ScopedReportBase::SetCount(int count) { rep_->count = count; } |
| 392 | |
| 393 | void ScopedReportBase::SetSigNum(int sig) { rep_->signum = sig; } |
| 394 | |
| 395 | const ReportDesc *ScopedReportBase::GetReport() const { return rep_; } |
| 396 | |
| 397 | ScopedReport::ScopedReport(ReportType typ, uptr tag) |
| 398 | : ScopedReportBase(typ, tag) {} |
| 399 | |
| 400 | ScopedReport::~ScopedReport() {} |
| 401 | |
| 402 | // Replays the trace up to last_pos position in the last part |
| 403 | // or up to the provided epoch/sid (whichever is earlier) |
| 404 | // and calls the provided function f for each event. |
| 405 | template <typename Func> |
| 406 | void TraceReplay(Trace *trace, TracePart *last, Event *last_pos, Sid sid, |
| 407 | Epoch epoch, Func f) { |
| 408 | TracePart *part = trace->parts.Front(); |
| 409 | Sid ev_sid = kFreeSid; |
| 410 | Epoch ev_epoch = kEpochOver; |
| 411 | for (;;) { |
| 412 | DCHECK_EQ(part->trace, trace); |
| 413 | // Note: an event can't start in the last element. |
| 414 | // Since an event can take up to 2 elements, |
| 415 | // we ensure we have at least 2 before adding an event. |
| 416 | Event *end = &part->events[TracePart::kSize - 1]; |
| 417 | if (part == last) |
| 418 | end = last_pos; |
| 419 | f(kFreeSid, kEpochOver, nullptr); // notify about part start |
| 420 | for (Event *evp = &part->events[0]; evp < end; evp++) { |
| 421 | Event *evp0 = evp; |
| 422 | if (!evp->is_access && !evp->is_func) { |
| 423 | switch (evp->type) { |
| 424 | case EventType::kTime: { |
| 425 | auto *ev = reinterpret_cast<EventTime *>(evp); |
| 426 | ev_sid = static_cast<Sid>(ev->sid); |
| 427 | ev_epoch = static_cast<Epoch>(ev->epoch); |
| 428 | if (ev_sid == sid && ev_epoch > epoch) |
| 429 | return; |
| 430 | break; |
| 431 | } |
| 432 | case EventType::kAccessExt: |
| 433 | FALLTHROUGH; |
| 434 | case EventType::kAccessRange: |
| 435 | FALLTHROUGH; |
| 436 | case EventType::kLock: |
| 437 | FALLTHROUGH; |
| 438 | case EventType::kRLock: |
| 439 | // These take 2 Event elements. |
| 440 | evp++; |
| 441 | break; |
| 442 | case EventType::kUnlock: |
| 443 | // This takes 1 Event element. |
| 444 | break; |
| 445 | } |
| 446 | } |
| 447 | CHECK_NE(ev_sid, kFreeSid); |
| 448 | CHECK_NE(ev_epoch, kEpochOver); |
| 449 | f(ev_sid, ev_epoch, evp0); |
| 450 | } |
| 451 | if (part == last) |
| 452 | return; |
| 453 | part = trace->parts.Next(e: part); |
| 454 | CHECK(part); |
| 455 | } |
| 456 | CHECK(0); |
| 457 | } |
| 458 | |
| 459 | static void RestoreStackMatch(VarSizeStackTrace *pstk, MutexSet *pmset, |
| 460 | Vector<uptr> *stack, MutexSet *mset, uptr pc, |
| 461 | bool *found) { |
| 462 | DPrintf2(" MATCHED\n" ); |
| 463 | *pmset = *mset; |
| 464 | stack->PushBack(v: pc); |
| 465 | pstk->Init(pcs: &(*stack)[0], cnt: stack->Size()); |
| 466 | stack->PopBack(); |
| 467 | *found = true; |
| 468 | } |
| 469 | |
| 470 | // Checks if addr1|size1 is fully contained in addr2|size2. |
| 471 | // We check for fully contained instread of just overlapping |
| 472 | // because a memory access is always traced once, but can be |
| 473 | // split into multiple accesses in the shadow. |
| 474 | static constexpr bool IsWithinAccess(uptr addr1, uptr size1, uptr addr2, |
| 475 | uptr size2) { |
| 476 | return addr1 >= addr2 && addr1 + size1 <= addr2 + size2; |
| 477 | } |
| 478 | |
| 479 | // Replays the trace of slot sid up to the target event identified |
| 480 | // by epoch/addr/size/typ and restores and returns tid, stack, mutex set |
| 481 | // and tag for that event. If there are multiple such events, it returns |
| 482 | // the last one. Returns false if the event is not present in the trace. |
| 483 | bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size, |
| 484 | AccessType typ, Tid *ptid, VarSizeStackTrace *pstk, |
| 485 | MutexSet *pmset, uptr *ptag) { |
| 486 | // This function restores stack trace and mutex set for the thread/epoch. |
| 487 | // It does so by getting stack trace and mutex set at the beginning of |
| 488 | // trace part, and then replaying the trace till the given epoch. |
| 489 | DPrintf2("RestoreStack: sid=%u@%u addr=0x%zx/%zu typ=%x\n" , |
| 490 | static_cast<int>(sid), static_cast<int>(epoch), addr, size, |
| 491 | static_cast<int>(typ)); |
| 492 | ctx->slot_mtx.CheckLocked(); // needed to prevent trace part recycling |
| 493 | ctx->thread_registry.CheckLocked(); |
| 494 | TidSlot *slot = &ctx->slots[static_cast<uptr>(sid)]; |
| 495 | Tid tid = kInvalidTid; |
| 496 | // Need to lock the slot mutex as it protects slot->journal. |
| 497 | slot->mtx.CheckLocked(); |
| 498 | for (uptr i = 0; i < slot->journal.Size(); i++) { |
| 499 | DPrintf2(" journal: epoch=%d tid=%d\n" , |
| 500 | static_cast<int>(slot->journal[i].epoch), slot->journal[i].tid); |
| 501 | if (i == slot->journal.Size() - 1 || slot->journal[i + 1].epoch > epoch) { |
| 502 | tid = slot->journal[i].tid; |
| 503 | break; |
| 504 | } |
| 505 | } |
| 506 | if (tid == kInvalidTid) |
| 507 | return false; |
| 508 | *ptid = tid; |
| 509 | ThreadContext *tctx = |
| 510 | static_cast<ThreadContext *>(ctx->thread_registry.GetThreadLocked(tid)); |
| 511 | Trace *trace = &tctx->trace; |
| 512 | // Snapshot first/last parts and the current position in the last part. |
| 513 | TracePart *first_part; |
| 514 | TracePart *last_part; |
| 515 | Event *last_pos; |
| 516 | { |
| 517 | Lock lock(&trace->mtx); |
| 518 | first_part = trace->parts.Front(); |
| 519 | if (!first_part) { |
| 520 | DPrintf2("RestoreStack: tid=%d trace=%p no trace parts\n" , tid, trace); |
| 521 | return false; |
| 522 | } |
| 523 | last_part = trace->parts.Back(); |
| 524 | last_pos = trace->final_pos; |
| 525 | if (tctx->thr) |
| 526 | last_pos = (Event *)atomic_load_relaxed(a: &tctx->thr->trace_pos); |
| 527 | } |
| 528 | DynamicMutexSet mset; |
| 529 | Vector<uptr> stack; |
| 530 | uptr prev_pc = 0; |
| 531 | bool found = false; |
| 532 | bool is_read = typ & kAccessRead; |
| 533 | bool is_atomic = typ & kAccessAtomic; |
| 534 | bool is_free = typ & kAccessFree; |
| 535 | DPrintf2("RestoreStack: tid=%d parts=[%p-%p] last_pos=%p\n" , tid, |
| 536 | trace->parts.Front(), last_part, last_pos); |
| 537 | TraceReplay( |
| 538 | trace, last: last_part, last_pos, sid, epoch, |
| 539 | f: [&](Sid ev_sid, Epoch ev_epoch, Event *evp) { |
| 540 | if (evp == nullptr) { |
| 541 | // Each trace part is self-consistent, so we reset state. |
| 542 | stack.Resize(size: 0); |
| 543 | mset->Reset(); |
| 544 | prev_pc = 0; |
| 545 | return; |
| 546 | } |
| 547 | bool match = ev_sid == sid && ev_epoch == epoch; |
| 548 | if (evp->is_access) { |
| 549 | if (evp->is_func == 0 && evp->type == EventType::kAccessExt && |
| 550 | evp->_ == 0) // NopEvent |
| 551 | return; |
| 552 | auto *ev = reinterpret_cast<EventAccess *>(evp); |
| 553 | uptr ev_addr = RestoreAddr(addr: ev->addr); |
| 554 | uptr ev_size = 1 << ev->size_log; |
| 555 | uptr ev_pc = |
| 556 | prev_pc + ev->pc_delta - (1 << (EventAccess::kPCBits - 1)); |
| 557 | prev_pc = ev_pc; |
| 558 | DPrintf2(" Access: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n" , ev_pc, |
| 559 | ev_addr, ev_size, ev->is_read, ev->is_atomic); |
| 560 | if (match && type == EventType::kAccessExt && |
| 561 | IsWithinAccess(addr1: addr, size1: size, addr2: ev_addr, size2: ev_size) && |
| 562 | is_read == ev->is_read && is_atomic == ev->is_atomic && !is_free) |
| 563 | RestoreStackMatch(pstk, pmset, stack: &stack, mset, pc: ev_pc, found: &found); |
| 564 | return; |
| 565 | } |
| 566 | if (evp->is_func) { |
| 567 | auto *ev = reinterpret_cast<EventFunc *>(evp); |
| 568 | if (ev->pc) { |
| 569 | DPrintf2(" FuncEnter: pc=0x%llx\n" , ev->pc); |
| 570 | stack.PushBack(v: ev->pc); |
| 571 | } else { |
| 572 | DPrintf2(" FuncExit\n" ); |
| 573 | // We don't log pathologically large stacks in each part, |
| 574 | // if the stack was truncated we can have more func exits than |
| 575 | // entries. |
| 576 | if (stack.Size()) |
| 577 | stack.PopBack(); |
| 578 | } |
| 579 | return; |
| 580 | } |
| 581 | switch (evp->type) { |
| 582 | case EventType::kAccessExt: { |
| 583 | auto *ev = reinterpret_cast<EventAccessExt *>(evp); |
| 584 | uptr ev_addr = RestoreAddr(addr: ev->addr); |
| 585 | uptr ev_size = 1 << ev->size_log; |
| 586 | prev_pc = ev->pc; |
| 587 | DPrintf2(" AccessExt: pc=0x%llx addr=0x%zx/%zu type=%u/%u\n" , |
| 588 | ev->pc, ev_addr, ev_size, ev->is_read, ev->is_atomic); |
| 589 | if (match && type == EventType::kAccessExt && |
| 590 | IsWithinAccess(addr1: addr, size1: size, addr2: ev_addr, size2: ev_size) && |
| 591 | is_read == ev->is_read && is_atomic == ev->is_atomic && |
| 592 | !is_free) |
| 593 | RestoreStackMatch(pstk, pmset, stack: &stack, mset, pc: ev->pc, found: &found); |
| 594 | break; |
| 595 | } |
| 596 | case EventType::kAccessRange: { |
| 597 | auto *ev = reinterpret_cast<EventAccessRange *>(evp); |
| 598 | uptr ev_addr = RestoreAddr(addr: ev->addr); |
| 599 | uptr ev_size = |
| 600 | (ev->size_hi << EventAccessRange::kSizeLoBits) + ev->size_lo; |
| 601 | uptr ev_pc = RestoreAddr(addr: ev->pc); |
| 602 | prev_pc = ev_pc; |
| 603 | DPrintf2(" Range: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n" , ev_pc, |
| 604 | ev_addr, ev_size, ev->is_read, ev->is_free); |
| 605 | if (match && type == EventType::kAccessExt && |
| 606 | IsWithinAccess(addr1: addr, size1: size, addr2: ev_addr, size2: ev_size) && |
| 607 | is_read == ev->is_read && !is_atomic && is_free == ev->is_free) |
| 608 | RestoreStackMatch(pstk, pmset, stack: &stack, mset, pc: ev_pc, found: &found); |
| 609 | break; |
| 610 | } |
| 611 | case EventType::kLock: |
| 612 | FALLTHROUGH; |
| 613 | case EventType::kRLock: { |
| 614 | auto *ev = reinterpret_cast<EventLock *>(evp); |
| 615 | bool is_write = ev->type == EventType::kLock; |
| 616 | uptr ev_addr = RestoreAddr(addr: ev->addr); |
| 617 | uptr ev_pc = RestoreAddr(addr: ev->pc); |
| 618 | StackID stack_id = |
| 619 | (ev->stack_hi << EventLock::kStackIDLoBits) + ev->stack_lo; |
| 620 | DPrintf2(" Lock: pc=0x%zx addr=0x%zx stack=%u write=%d\n" , ev_pc, |
| 621 | ev_addr, stack_id, is_write); |
| 622 | mset->AddAddr(addr: ev_addr, stack_id, write: is_write); |
| 623 | // Events with ev_pc == 0 are written to the beginning of trace |
| 624 | // part as initial mutex set (are not real). |
| 625 | if (match && type == EventType::kLock && addr == ev_addr && ev_pc) |
| 626 | RestoreStackMatch(pstk, pmset, stack: &stack, mset, pc: ev_pc, found: &found); |
| 627 | break; |
| 628 | } |
| 629 | case EventType::kUnlock: { |
| 630 | auto *ev = reinterpret_cast<EventUnlock *>(evp); |
| 631 | uptr ev_addr = RestoreAddr(addr: ev->addr); |
| 632 | DPrintf2(" Unlock: addr=0x%zx\n" , ev_addr); |
| 633 | mset->DelAddr(addr: ev_addr); |
| 634 | break; |
| 635 | } |
| 636 | case EventType::kTime: |
| 637 | // TraceReplay already extracted sid/epoch from it, |
| 638 | // nothing else to do here. |
| 639 | break; |
| 640 | } |
| 641 | }); |
| 642 | ExtractTagFromStack(stack: pstk, tag: ptag); |
| 643 | return found; |
| 644 | } |
| 645 | |
| 646 | bool RacyStacks::operator==(const RacyStacks &other) const { |
| 647 | if (hash[0] == other.hash[0] && hash[1] == other.hash[1]) |
| 648 | return true; |
| 649 | if (hash[0] == other.hash[1] && hash[1] == other.hash[0]) |
| 650 | return true; |
| 651 | return false; |
| 652 | } |
| 653 | |
| 654 | static bool FindRacyStacks(const RacyStacks &hash) { |
| 655 | for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { |
| 656 | if (hash == ctx->racy_stacks[i]) { |
| 657 | VPrintf(2, "ThreadSanitizer: suppressing report as doubled (stack)\n" ); |
| 658 | return true; |
| 659 | } |
| 660 | } |
| 661 | return false; |
| 662 | } |
| 663 | |
| 664 | static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) { |
| 665 | if (!flags()->suppress_equal_stacks) |
| 666 | return false; |
| 667 | RacyStacks hash; |
| 668 | hash.hash[0] = md5_hash(data: traces[0].trace, size: traces[0].size * sizeof(uptr)); |
| 669 | hash.hash[1] = md5_hash(data: traces[1].trace, size: traces[1].size * sizeof(uptr)); |
| 670 | { |
| 671 | ReadLock lock(&ctx->racy_mtx); |
| 672 | if (FindRacyStacks(hash)) |
| 673 | return true; |
| 674 | } |
| 675 | Lock lock(&ctx->racy_mtx); |
| 676 | if (FindRacyStacks(hash)) |
| 677 | return true; |
| 678 | ctx->racy_stacks.PushBack(v: hash); |
| 679 | return false; |
| 680 | } |
| 681 | |
| 682 | bool OutputReport(ThreadState *thr, ScopedReport &srep) { |
| 683 | // These should have been checked in ShouldReport. |
| 684 | // It's too late to check them here, we have already taken locks. |
| 685 | CHECK(flags()->report_bugs); |
| 686 | CHECK(!thr->suppress_reports); |
| 687 | srep.SymbolizeStackElems(); |
| 688 | atomic_store_relaxed(a: &ctx->last_symbolize_time_ns, v: NanoTime()); |
| 689 | const ReportDesc *rep = srep.GetReport(); |
| 690 | CHECK_EQ(thr->current_report, nullptr); |
| 691 | thr->current_report = rep; |
| 692 | Suppression *supp = 0; |
| 693 | uptr pc_or_addr = 0; |
| 694 | for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++) |
| 695 | pc_or_addr = IsSuppressed(typ: rep->typ, stack: rep->mops[i]->stack, sp: &supp); |
| 696 | for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++) |
| 697 | pc_or_addr = IsSuppressed(typ: rep->typ, stack: rep->stacks[i], sp: &supp); |
| 698 | for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++) |
| 699 | pc_or_addr = IsSuppressed(typ: rep->typ, stack: rep->threads[i]->stack, sp: &supp); |
| 700 | for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++) |
| 701 | pc_or_addr = IsSuppressed(typ: rep->typ, loc: rep->locs[i], sp: &supp); |
| 702 | if (pc_or_addr != 0) { |
| 703 | Lock lock(&ctx->fired_suppressions_mtx); |
| 704 | FiredSuppression s = {.type: srep.GetReport()->typ, .pc_or_addr: pc_or_addr, .supp: supp}; |
| 705 | ctx->fired_suppressions.push_back(element: s); |
| 706 | } |
| 707 | { |
| 708 | bool suppressed = OnReport(rep, suppressed: pc_or_addr != 0); |
| 709 | if (suppressed) { |
| 710 | thr->current_report = nullptr; |
| 711 | return false; |
| 712 | } |
| 713 | } |
| 714 | PrintReport(rep); |
| 715 | __tsan_on_report(rep); |
| 716 | ctx->nreported++; |
| 717 | if (flags()->halt_on_error) |
| 718 | Die(); |
| 719 | thr->current_report = nullptr; |
| 720 | return true; |
| 721 | } |
| 722 | |
| 723 | bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) { |
| 724 | ReadLock lock(&ctx->fired_suppressions_mtx); |
| 725 | for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { |
| 726 | if (ctx->fired_suppressions[k].type != type) |
| 727 | continue; |
| 728 | for (uptr j = 0; j < trace.size; j++) { |
| 729 | FiredSuppression *s = &ctx->fired_suppressions[k]; |
| 730 | if (trace.trace[j] == s->pc_or_addr) { |
| 731 | if (s->supp) |
| 732 | atomic_fetch_add(a: &s->supp->hit_count, v: 1, mo: memory_order_relaxed); |
| 733 | return true; |
| 734 | } |
| 735 | } |
| 736 | } |
| 737 | return false; |
| 738 | } |
| 739 | |
| 740 | static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) { |
| 741 | ReadLock lock(&ctx->fired_suppressions_mtx); |
| 742 | for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { |
| 743 | if (ctx->fired_suppressions[k].type != type) |
| 744 | continue; |
| 745 | FiredSuppression *s = &ctx->fired_suppressions[k]; |
| 746 | if (addr == s->pc_or_addr) { |
| 747 | if (s->supp) |
| 748 | atomic_fetch_add(a: &s->supp->hit_count, v: 1, mo: memory_order_relaxed); |
| 749 | return true; |
| 750 | } |
| 751 | } |
| 752 | return false; |
| 753 | } |
| 754 | |
| 755 | static bool SpuriousRace(Shadow old) { |
| 756 | Shadow last(LoadShadow(p: &ctx->last_spurious_race)); |
| 757 | return last.sid() == old.sid() && last.epoch() == old.epoch(); |
| 758 | } |
| 759 | |
| 760 | void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old, |
| 761 | AccessType typ0) { |
| 762 | CheckedMutex::CheckNoLocks(); |
| 763 | |
| 764 | // Symbolizer makes lots of intercepted calls. If we try to process them, |
| 765 | // at best it will cause deadlocks on internal mutexes. |
| 766 | ScopedIgnoreInterceptors ignore; |
| 767 | |
| 768 | uptr addr = ShadowToMem(s: shadow_mem); |
| 769 | DPrintf("#%d: ReportRace %p\n" , thr->tid, (void *)addr); |
| 770 | if (!ShouldReport(thr, typ: ReportTypeRace)) |
| 771 | return; |
| 772 | uptr addr_off0, size0; |
| 773 | cur.GetAccess(addr: &addr_off0, size: &size0, typ: nullptr); |
| 774 | uptr addr_off1, size1, typ1; |
| 775 | old.GetAccess(addr: &addr_off1, size: &size1, typ: &typ1); |
| 776 | if (!flags()->report_atomic_races && |
| 777 | ((typ0 & kAccessAtomic) || (typ1 & kAccessAtomic)) && |
| 778 | !(typ0 & kAccessFree) && !(typ1 & kAccessFree)) |
| 779 | return; |
| 780 | if (SpuriousRace(old)) |
| 781 | return; |
| 782 | |
| 783 | const uptr kMop = 2; |
| 784 | Shadow s[kMop] = {cur, old}; |
| 785 | uptr addr0 = addr + addr_off0; |
| 786 | uptr addr1 = addr + addr_off1; |
| 787 | uptr end0 = addr0 + size0; |
| 788 | uptr end1 = addr1 + size1; |
| 789 | uptr addr_min = min(a: addr0, b: addr1); |
| 790 | uptr addr_max = max(a: end0, b: end1); |
| 791 | if (IsExpectedReport(addr: addr_min, size: addr_max - addr_min)) |
| 792 | return; |
| 793 | |
| 794 | ReportType rep_typ = ReportTypeRace; |
| 795 | if ((typ0 & kAccessVptr) && (typ1 & kAccessFree)) |
| 796 | rep_typ = ReportTypeVptrUseAfterFree; |
| 797 | else if (typ0 & kAccessVptr) |
| 798 | rep_typ = ReportTypeVptrRace; |
| 799 | else if (typ1 & kAccessFree) |
| 800 | rep_typ = ReportTypeUseAfterFree; |
| 801 | |
| 802 | if (IsFiredSuppression(ctx, type: rep_typ, addr)) |
| 803 | return; |
| 804 | |
| 805 | VarSizeStackTrace traces[kMop]; |
| 806 | Tid tids[kMop] = {thr->tid, kInvalidTid}; |
| 807 | uptr tags[kMop] = {kExternalTagNone, kExternalTagNone}; |
| 808 | |
| 809 | ObtainCurrentStack(thr, toppc: thr->trace_prev_pc, stack: &traces[0], tag: &tags[0]); |
| 810 | if (IsFiredSuppression(ctx, type: rep_typ, trace: traces[0])) |
| 811 | return; |
| 812 | |
| 813 | DynamicMutexSet mset1; |
| 814 | MutexSet *mset[kMop] = {&thr->mset, mset1}; |
| 815 | |
| 816 | // Use alloca, because malloc during signal handling deadlocks |
| 817 | ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport)); |
| 818 | // Take a new scope as Apple platforms require the below locks released |
| 819 | // before symbolizing in order to avoid a deadlock |
| 820 | { |
| 821 | // We need to lock the slot during RestoreStack because it protects |
| 822 | // the slot journal. |
| 823 | Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx); |
| 824 | ThreadRegistryLock l0(&ctx->thread_registry); |
| 825 | Lock slots_lock(&ctx->slot_mtx); |
| 826 | if (SpuriousRace(old)) |
| 827 | return; |
| 828 | if (!RestoreStack(type: EventType::kAccessExt, sid: s[1].sid(), epoch: s[1].epoch(), addr: addr1, |
| 829 | size: size1, typ: typ1, ptid: &tids[1], pstk: &traces[1], pmset: mset[1], ptag: &tags[1])) { |
| 830 | StoreShadow(sp: &ctx->last_spurious_race, s: old.raw()); |
| 831 | return; |
| 832 | } |
| 833 | |
| 834 | if (IsFiredSuppression(ctx, type: rep_typ, trace: traces[1])) |
| 835 | return; |
| 836 | |
| 837 | if (HandleRacyStacks(thr, traces)) |
| 838 | return; |
| 839 | |
| 840 | // If any of the accesses has a tag, treat this as an "external" race. |
| 841 | uptr tag = kExternalTagNone; |
| 842 | for (uptr i = 0; i < kMop; i++) { |
| 843 | if (tags[i] != kExternalTagNone) { |
| 844 | rep_typ = ReportTypeExternalRace; |
| 845 | tag = tags[i]; |
| 846 | break; |
| 847 | } |
| 848 | } |
| 849 | |
| 850 | new (rep) ScopedReport(rep_typ, tag); |
| 851 | for (uptr i = 0; i < kMop; i++) |
| 852 | rep->AddMemoryAccess(addr, external_tag: tags[i], s: s[i], tid: tids[i], stack: traces[i], mset: mset[i]); |
| 853 | |
| 854 | for (uptr i = 0; i < kMop; i++) { |
| 855 | ThreadContext *tctx = static_cast<ThreadContext *>( |
| 856 | ctx->thread_registry.GetThreadLocked(tid: tids[i])); |
| 857 | rep->AddThread(tctx); |
| 858 | } |
| 859 | |
| 860 | rep->AddLocation(addr: addr_min, size: addr_max - addr_min); |
| 861 | |
| 862 | if (flags()->print_full_thread_history) { |
| 863 | const ReportDesc *rep_desc = rep->GetReport(); |
| 864 | for (uptr i = 0; i < rep_desc->threads.Size(); i++) { |
| 865 | Tid parent_tid = rep_desc->threads[i]->parent_tid; |
| 866 | if (parent_tid == kMainTid || parent_tid == kInvalidTid) |
| 867 | continue; |
| 868 | ThreadContext *parent_tctx = static_cast<ThreadContext *>( |
| 869 | ctx->thread_registry.GetThreadLocked(tid: parent_tid)); |
| 870 | rep->AddThread(tctx: parent_tctx); |
| 871 | } |
| 872 | } |
| 873 | |
| 874 | #if !SANITIZER_GO |
| 875 | if (!((typ0 | typ1) & kAccessFree) && |
| 876 | s[1].epoch() <= thr->last_sleep_clock.Get(sid: s[1].sid())) |
| 877 | rep->AddSleep(stack_id: thr->last_sleep_stack_id); |
| 878 | #endif |
| 879 | |
| 880 | #if SANITIZER_APPLE |
| 881 | } // Close this scope to release the locks |
| 882 | #endif |
| 883 | OutputReport(thr, srep&: *rep); |
| 884 | |
| 885 | // Need to manually destroy this because we used placement new to allocate |
| 886 | rep->~ScopedReport(); |
| 887 | #if !SANITIZER_APPLE |
| 888 | } |
| 889 | #endif |
| 890 | } |
| 891 | |
| 892 | void PrintCurrentStack(ThreadState *thr, uptr pc) { |
| 893 | VarSizeStackTrace trace; |
| 894 | ObtainCurrentStack(thr, toppc: pc, stack: &trace); |
| 895 | PrintStack(stack: SymbolizeStack(trace)); |
| 896 | } |
| 897 | |
| 898 | // Always inlining PrintCurrentStack, because LocatePcInTrace assumes |
| 899 | // __sanitizer_print_stack_trace exists in the actual unwinded stack, but |
| 900 | // tail-call to PrintCurrentStack breaks this assumption because |
| 901 | // __sanitizer_print_stack_trace disappears after tail-call. |
| 902 | // However, this solution is not reliable enough, please see dvyukov's comment |
| 903 | // http://reviews.llvm.org/D19148#406208 |
| 904 | // Also see PR27280 comment 2 and 3 for breaking examples and analysis. |
| 905 | ALWAYS_INLINE USED void PrintCurrentStack(uptr pc, bool fast) { |
| 906 | #if !SANITIZER_GO |
| 907 | uptr bp = GET_CURRENT_FRAME(); |
| 908 | auto *ptrace = New<BufferedStackTrace>(); |
| 909 | ptrace->Unwind(pc, bp, context: nullptr, request_fast: fast); |
| 910 | |
| 911 | for (uptr i = 0; i < ptrace->size / 2; i++) { |
| 912 | uptr tmp = ptrace->trace_buffer[i]; |
| 913 | ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1]; |
| 914 | ptrace->trace_buffer[ptrace->size - i - 1] = tmp; |
| 915 | } |
| 916 | |
| 917 | if (ready_to_symbolize) { |
| 918 | PrintStack(stack: SymbolizeStack(trace: *ptrace)); |
| 919 | } else { |
| 920 | Printf( |
| 921 | format: "WARNING: PrintCurrentStack() has been called too early, before " |
| 922 | "symbolization is possible. Printing unsymbolized stack trace:\n" ); |
| 923 | for (unsigned int i = 0; i < ptrace->size; i++) |
| 924 | Printf(format: " #%u: 0x%zx\n" , i, ptrace->trace[i]); |
| 925 | } |
| 926 | #endif |
| 927 | } |
| 928 | |
| 929 | } // namespace __tsan |
| 930 | |
| 931 | using namespace __tsan; |
| 932 | |
| 933 | extern "C" { |
| 934 | SANITIZER_INTERFACE_ATTRIBUTE |
| 935 | void __sanitizer_print_stack_trace() { |
| 936 | PrintCurrentStack(pc: StackTrace::GetCurrentPc(), fast: false); |
| 937 | } |
| 938 | } // extern "C" |
| 939 | |