1//===-- sanitizer_posix_libcdep.cpp ---------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is shared between AddressSanitizer and ThreadSanitizer
10// run-time libraries and implements libc-dependent POSIX-specific functions
11// from sanitizer_libc.h.
12//===----------------------------------------------------------------------===//
13
14#include "sanitizer_platform.h"
15
16#if SANITIZER_POSIX
17
18#include "sanitizer_common.h"
19#include "sanitizer_flags.h"
20#include "sanitizer_platform_limits_netbsd.h"
21#include "sanitizer_platform_limits_posix.h"
22#include "sanitizer_platform_limits_solaris.h"
23#include "sanitizer_posix.h"
24#include "sanitizer_procmaps.h"
25
26#include <errno.h>
27#include <fcntl.h>
28#include <pthread.h>
29#include <signal.h>
30#include <stdlib.h>
31#include <sys/mman.h>
32#include <sys/resource.h>
33#include <sys/stat.h>
34#include <sys/time.h>
35#include <sys/types.h>
36#include <sys/wait.h>
37#include <unistd.h>
38
39#if SANITIZER_FREEBSD
40// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
41// that, it was never implemented. So just define it to zero.
42#undef MAP_NORESERVE
43#define MAP_NORESERVE 0
44#endif
45
46typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
47
48namespace __sanitizer {
49
50[[maybe_unused]] static atomic_uint8_t signal_handler_is_from_sanitizer[64];
51
52u32 GetUid() {
53 return getuid();
54}
55
56uptr GetThreadSelf() {
57 return (uptr)pthread_self();
58}
59
60void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
61 uptr page_size = GetPageSizeCached();
62 uptr beg_aligned = RoundUpTo(size: beg, boundary: page_size);
63 uptr end_aligned = RoundDownTo(x: end, boundary: page_size);
64 if (beg_aligned < end_aligned)
65 internal_madvise(addr: beg_aligned, length: end_aligned - beg_aligned,
66 SANITIZER_MADVISE_DONTNEED);
67}
68
69void SetShadowRegionHugePageMode(uptr addr, uptr size) {
70#ifdef MADV_NOHUGEPAGE // May not be defined on old systems.
71 if (common_flags()->no_huge_pages_for_shadow)
72 internal_madvise(addr, length: size, MADV_NOHUGEPAGE);
73 else
74 internal_madvise(addr, length: size, MADV_HUGEPAGE);
75#endif // MADV_NOHUGEPAGE
76}
77
78bool DontDumpShadowMemory(uptr addr, uptr length) {
79#if defined(MADV_DONTDUMP)
80 return internal_madvise(addr, length, MADV_DONTDUMP) == 0;
81#elif defined(MADV_NOCORE)
82 return internal_madvise(addr, length, MADV_NOCORE) == 0;
83#else
84 return true;
85#endif // MADV_DONTDUMP
86}
87
88static rlim_t getlim(int res) {
89 rlimit rlim;
90 CHECK_EQ(0, getrlimit(res, &rlim));
91 return rlim.rlim_cur;
92}
93
94static void setlim(int res, rlim_t lim) {
95 struct rlimit rlim;
96 if (getrlimit(resource: res, rlimits: &rlim)) {
97 Report(format: "ERROR: %s getrlimit() failed %d\n", SanitizerToolName, errno);
98 Die();
99 }
100 rlim.rlim_cur = lim;
101 if (setrlimit(resource: res, rlimits: &rlim)) {
102 Report(format: "ERROR: %s setrlimit() failed %d\n", SanitizerToolName, errno);
103 Die();
104 }
105}
106
107void DisableCoreDumperIfNecessary() {
108 if (common_flags()->disable_coredump) {
109 rlimit rlim;
110 CHECK_EQ(0, getrlimit(RLIMIT_CORE, &rlim));
111 // On Linux, if the kernel.core_pattern sysctl starts with a '|' (i.e. it
112 // is being piped to a coredump handler such as systemd-coredumpd), the
113 // kernel ignores RLIMIT_CORE (since we aren't creating a file in the file
114 // system) except for the magic value of 1, which disables coredumps when
115 // piping. 1 byte is too small for any kind of valid core dump, so it
116 // also disables coredumps if kernel.core_pattern creates files directly.
117 // While most piped coredump handlers do respect the crashing processes'
118 // RLIMIT_CORE, this is notable not the case for Debian's systemd-coredump
119 // due to a local patch that changes sysctl.d/50-coredump.conf to ignore
120 // the specified limit and instead use RLIM_INFINITY.
121 //
122 // The alternative to using RLIMIT_CORE=1 would be to use prctl() with the
123 // PR_SET_DUMPABLE flag, however that also prevents ptrace(), so makes it
124 // impossible to attach a debugger.
125 //
126 // Note: we use rlim_max in the Min() call here since that is the upper
127 // limit for what can be set without getting an EINVAL error.
128 rlim.rlim_cur = Min<rlim_t>(SANITIZER_LINUX ? 1 : 0, b: rlim.rlim_max);
129 CHECK_EQ(0, setrlimit(RLIMIT_CORE, &rlim));
130 }
131}
132
133bool StackSizeIsUnlimited() {
134 rlim_t stack_size = getlim(RLIMIT_STACK);
135 return (stack_size == RLIM_INFINITY);
136}
137
138void SetStackSizeLimitInBytes(uptr limit) {
139 setlim(RLIMIT_STACK, lim: (rlim_t)limit);
140 CHECK(!StackSizeIsUnlimited());
141}
142
143bool AddressSpaceIsUnlimited() {
144 rlim_t as_size = getlim(RLIMIT_AS);
145 return (as_size == RLIM_INFINITY);
146}
147
148void SetAddressSpaceUnlimited() {
149 setlim(RLIMIT_AS, RLIM_INFINITY);
150 CHECK(AddressSpaceIsUnlimited());
151}
152
153void Abort() {
154#if !SANITIZER_GO
155 // If we are handling SIGABRT, unhandle it first.
156 // TODO(vitalybuka): Check if handler belongs to sanitizer.
157 if (GetHandleSignalMode(SIGABRT) != kHandleSignalNo) {
158 struct sigaction sigact;
159 internal_memset(s: &sigact, c: 0, n: sizeof(sigact));
160 sigact.sa_handler = SIG_DFL;
161 internal_sigaction(SIGABRT, act: &sigact, oldact: nullptr);
162 }
163#endif
164
165 abort();
166}
167
168int Atexit(void (*function)(void)) {
169#if !SANITIZER_GO
170 return atexit(func: function);
171#else
172 return 0;
173#endif
174}
175
176bool CreateDir(const char *pathname) { return mkdir(path: pathname, mode: 0755) == 0; }
177
178bool SupportsColoredOutput(fd_t fd) {
179 return isatty(fd: fd) != 0;
180}
181
182#if !SANITIZER_GO
183// TODO(glider): different tools may require different altstack size.
184static uptr GetAltStackSize() {
185 // Note: since GLIBC_2.31, SIGSTKSZ may be a function call, so this may be
186 // more costly that you think. However GetAltStackSize is only call 2-3 times
187 // per thread so don't cache the evaluation.
188 return SIGSTKSZ * 4;
189}
190
191void SetAlternateSignalStack() {
192 stack_t altstack, oldstack;
193 CHECK_EQ(0, sigaltstack(nullptr, &oldstack));
194 // If the alternate stack is already in place, do nothing.
195 // Android always sets an alternate stack, but it's too small for us.
196 if (!SANITIZER_ANDROID && !(oldstack.ss_flags & SS_DISABLE)) return;
197 // TODO(glider): the mapped stack should have the MAP_STACK flag in the
198 // future. It is not required by man 2 sigaltstack now (they're using
199 // malloc()).
200 altstack.ss_size = GetAltStackSize();
201 altstack.ss_sp = (char *)MmapOrDie(size: altstack.ss_size, mem_type: __func__);
202 altstack.ss_flags = 0;
203 CHECK_EQ(0, sigaltstack(&altstack, nullptr));
204}
205
206void UnsetAlternateSignalStack() {
207 stack_t altstack, oldstack;
208 altstack.ss_sp = nullptr;
209 altstack.ss_flags = SS_DISABLE;
210 altstack.ss_size = GetAltStackSize(); // Some sane value required on Darwin.
211 CHECK_EQ(0, sigaltstack(&altstack, &oldstack));
212 UnmapOrDie(addr: oldstack.ss_sp, size: oldstack.ss_size);
213}
214
215bool IsSignalHandlerFromSanitizer(int signum) {
216 return atomic_load(a: &signal_handler_is_from_sanitizer[signum],
217 mo: memory_order_relaxed);
218}
219
220bool SetSignalHandlerFromSanitizer(int signum, bool new_state) {
221 if (signum < 0 || static_cast<unsigned>(signum) >=
222 ARRAY_SIZE(signal_handler_is_from_sanitizer))
223 return false;
224
225 return atomic_exchange(a: &signal_handler_is_from_sanitizer[signum], v: new_state,
226 mo: memory_order_relaxed);
227}
228
229static void MaybeInstallSigaction(int signum,
230 SignalHandlerType handler) {
231 if (GetHandleSignalMode(signum) == kHandleSignalNo) return;
232
233 struct sigaction sigact;
234 internal_memset(s: &sigact, c: 0, n: sizeof(sigact));
235 sigact.sa_sigaction = (sa_sigaction_t)handler;
236 // Do not block the signal from being received in that signal's handler.
237 // Clients are responsible for handling this correctly.
238 sigact.sa_flags = SA_SIGINFO | SA_NODEFER;
239 if (common_flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;
240 CHECK_EQ(0, internal_sigaction(signum, &sigact, nullptr));
241 VReport(1, "Installed the sigaction for signal %d\n", signum);
242
243 if (common_flags()->cloak_sanitizer_signal_handlers)
244 SetSignalHandlerFromSanitizer(signum, new_state: true);
245}
246
247void InstallDeadlySignalHandlers(SignalHandlerType handler) {
248 // Set the alternate signal stack for the main thread.
249 // This will cause SetAlternateSignalStack to be called twice, but the stack
250 // will be actually set only once.
251 if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
252 MaybeInstallSigaction(SIGSEGV, handler);
253 MaybeInstallSigaction(SIGBUS, handler);
254 MaybeInstallSigaction(SIGABRT, handler);
255 MaybeInstallSigaction(SIGFPE, handler);
256 MaybeInstallSigaction(SIGILL, handler);
257 MaybeInstallSigaction(SIGTRAP, handler);
258}
259
260bool SignalContext::IsStackOverflow() const {
261 // Access at a reasonable offset above SP, or slightly below it (to account
262 // for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is
263 // probably a stack overflow.
264#ifdef __s390__
265 // On s390, the fault address in siginfo points to start of the page, not
266 // to the precise word that was accessed. Mask off the low bits of sp to
267 // take it into account.
268 bool IsStackAccess = addr >= (sp & ~0xFFF) && addr < sp + 0xFFFF;
269#else
270 // Let's accept up to a page size away from top of stack. Things like stack
271 // probing can trigger accesses with such large offsets.
272 bool IsStackAccess = addr + GetPageSizeCached() > sp && addr < sp + 0xFFFF;
273#endif
274
275#if __powerpc__
276 // Large stack frames can be allocated with e.g.
277 // lis r0,-10000
278 // stdux r1,r1,r0 # store sp to [sp-10000] and update sp by -10000
279 // If the store faults then sp will not have been updated, so test above
280 // will not work, because the fault address will be more than just "slightly"
281 // below sp.
282 if (!IsStackAccess && IsAccessibleMemoryRange(pc, 4)) {
283 u32 inst = *(unsigned *)pc;
284 u32 ra = (inst >> 16) & 0x1F;
285 u32 opcd = inst >> 26;
286 u32 xo = (inst >> 1) & 0x3FF;
287 // Check for store-with-update to sp. The instructions we accept are:
288 // stbu rs,d(ra) stbux rs,ra,rb
289 // sthu rs,d(ra) sthux rs,ra,rb
290 // stwu rs,d(ra) stwux rs,ra,rb
291 // stdu rs,ds(ra) stdux rs,ra,rb
292 // where ra is r1 (the stack pointer).
293 if (ra == 1 &&
294 (opcd == 39 || opcd == 45 || opcd == 37 || opcd == 62 ||
295 (opcd == 31 && (xo == 247 || xo == 439 || xo == 183 || xo == 181))))
296 IsStackAccess = true;
297 }
298#endif // __powerpc__
299
300 // We also check si_code to filter out SEGV caused by something else other
301 // then hitting the guard page or unmapped memory, like, for example,
302 // unaligned memory access.
303 auto si = static_cast<const siginfo_t *>(siginfo);
304 return IsStackAccess &&
305 (si->si_code == si_SEGV_MAPERR || si->si_code == si_SEGV_ACCERR);
306}
307
308#endif // SANITIZER_GO
309
310static void SetNonBlock(int fd) {
311 int res = fcntl(fd: fd, F_GETFL, 0);
312 CHECK(!internal_iserror(res, nullptr));
313
314 res |= O_NONBLOCK;
315 res = fcntl(fd: fd, F_SETFL, res);
316 CHECK(!internal_iserror(res, nullptr));
317}
318
319bool IsAccessibleMemoryRange(uptr beg, uptr size) {
320 while (size) {
321 // `read` from `fds[0]` into a dummy buffer to free up the pipe buffer for
322 // more `write` is slower than just recreating a pipe.
323 int fds[2];
324 CHECK_EQ(0, pipe(fds));
325
326 auto cleanup = at_scope_exit(fn: [&]() {
327 internal_close(fd: fds[0]);
328 internal_close(fd: fds[1]);
329 });
330
331 SetNonBlock(fds[1]);
332
333 int write_errno;
334 uptr w = internal_write(fd: fds[1], buf: reinterpret_cast<char *>(beg), count: size);
335 if (internal_iserror(retval: w, rverrno: &write_errno)) {
336 if (write_errno == EINTR)
337 continue;
338 CHECK_EQ(EFAULT, write_errno);
339 return false;
340 }
341 size -= w;
342 beg += w;
343 }
344
345 return true;
346}
347
348bool TryMemCpy(void *dest, const void *src, uptr n) {
349 if (!n)
350 return true;
351 int fds[2];
352 CHECK_EQ(0, pipe(fds));
353
354 auto cleanup = at_scope_exit(fn: [&]() {
355 internal_close(fd: fds[0]);
356 internal_close(fd: fds[1]);
357 });
358
359 SetNonBlock(fds[0]);
360 SetNonBlock(fds[1]);
361
362 char *d = static_cast<char *>(dest);
363 const char *s = static_cast<const char *>(src);
364
365 while (n) {
366 int e;
367 uptr w = internal_write(fd: fds[1], buf: s, count: n);
368 if (internal_iserror(retval: w, rverrno: &e)) {
369 if (e == EINTR)
370 continue;
371 CHECK_EQ(EFAULT, e);
372 return false;
373 }
374 s += w;
375 n -= w;
376
377 while (w) {
378 uptr r = internal_read(fd: fds[0], buf: d, count: w);
379 if (internal_iserror(retval: r, rverrno: &e)) {
380 CHECK_EQ(EINTR, e);
381 continue;
382 }
383
384 d += r;
385 w -= r;
386 }
387 }
388
389 return true;
390}
391
392void PlatformPrepareForSandboxing(void *args) {
393 // Some kinds of sandboxes may forbid filesystem access, so we won't be able
394 // to read the file mappings from /proc/self/maps. Luckily, neither the
395 // process will be able to load additional libraries, so it's fine to use the
396 // cached mappings.
397 MemoryMappingLayout::CacheMemoryMappings();
398}
399
400static bool MmapFixed(uptr fixed_addr, uptr size, int additional_flags,
401 const char *name) {
402 size = RoundUpTo(size, boundary: GetPageSizeCached());
403 fixed_addr = RoundDownTo(x: fixed_addr, boundary: GetPageSizeCached());
404 uptr p =
405 MmapNamed(addr: (void *)fixed_addr, length: size, PROT_READ | PROT_WRITE,
406 MAP_PRIVATE | MAP_FIXED | additional_flags | MAP_ANON, name);
407 int reserrno;
408 if (internal_iserror(retval: p, rverrno: &reserrno)) {
409 Report(
410 format: "ERROR: %s failed to "
411 "allocate 0x%zx (%zd) bytes at address %p (errno: %d)\n",
412 SanitizerToolName, size, size, (void *)fixed_addr, reserrno);
413 return false;
414 }
415 IncreaseTotalMmap(size);
416 return true;
417}
418
419bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
420 return MmapFixed(fixed_addr, size, MAP_NORESERVE, name);
421}
422
423bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size, const char *name) {
424#if SANITIZER_FREEBSD
425 if (common_flags()->no_huge_pages_for_shadow)
426 return MmapFixedNoReserve(fixed_addr, size, name);
427 // MAP_NORESERVE is implicit with FreeBSD
428 return MmapFixed(fixed_addr, size, MAP_ALIGNED_SUPER, name);
429#else
430 bool r = MmapFixedNoReserve(fixed_addr, size, name);
431 if (r)
432 SetShadowRegionHugePageMode(addr: fixed_addr, size);
433 return r;
434#endif
435}
436
437uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {
438 base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size, name)
439 : MmapNoAccess(size);
440 size_ = size;
441 name_ = name;
442 (void)os_handle_; // unsupported
443 return reinterpret_cast<uptr>(base_);
444}
445
446// Uses fixed_addr for now.
447// Will use offset instead once we've implemented this function for real.
448uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size, const char *name) {
449 return reinterpret_cast<uptr>(
450 MmapFixedOrDieOnFatalError(fixed_addr, size, name));
451}
452
453uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size,
454 const char *name) {
455 return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size, name));
456}
457
458void ReservedAddressRange::Unmap(uptr addr, uptr size) {
459 CHECK_LE(size, size_);
460 if (addr == reinterpret_cast<uptr>(base_))
461 // If we unmap the whole range, just null out the base.
462 base_ = (size == size_) ? nullptr : reinterpret_cast<void*>(addr + size);
463 else
464 CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_);
465 size_ -= size;
466 UnmapOrDie(addr: reinterpret_cast<void*>(addr), size);
467}
468
469void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
470 return (void *)MmapNamed(addr: (void *)fixed_addr, length: size, PROT_NONE,
471 MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE | MAP_ANON,
472 name);
473}
474
475void *MmapNoAccess(uptr size) {
476 unsigned flags = MAP_PRIVATE | MAP_ANON | MAP_NORESERVE;
477 return (void *)internal_mmap(addr: nullptr, length: size, PROT_NONE, flags, fd: -1, offset: 0);
478}
479
480// This function is defined elsewhere if we intercepted pthread_attr_getstack.
481extern "C" {
482SANITIZER_WEAK_ATTRIBUTE int
483real_pthread_attr_getstack(void *attr, void **addr, size_t *size);
484} // extern "C"
485
486int internal_pthread_attr_getstack(void *attr, void **addr, uptr *size) {
487#if !SANITIZER_GO && !SANITIZER_APPLE
488 if (&real_pthread_attr_getstack)
489 return real_pthread_attr_getstack(attr: (pthread_attr_t *)attr, addr,
490 size: (size_t *)size);
491#endif
492 return pthread_attr_getstack(attr: (pthread_attr_t *)attr, stackaddr: addr, stacksize: (size_t *)size);
493}
494
495#if !SANITIZER_GO
496void AdjustStackSize(void *attr_) {
497 pthread_attr_t *attr = (pthread_attr_t *)attr_;
498 uptr stackaddr = 0;
499 uptr stacksize = 0;
500 internal_pthread_attr_getstack(attr, addr: (void **)&stackaddr, size: &stacksize);
501 // GLibC will return (0 - stacksize) as the stack address in the case when
502 // stacksize is set, but stackaddr is not.
503 bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);
504 // We place a lot of tool data into TLS, account for that.
505 const uptr minstacksize = GetTlsSize() + 128*1024;
506 if (stacksize < minstacksize) {
507 if (!stack_set) {
508 if (stacksize != 0) {
509 VPrintf(1, "Sanitizer: increasing stacksize %zu->%zu\n", stacksize,
510 minstacksize);
511 pthread_attr_setstacksize(attr: attr, stacksize: minstacksize);
512 }
513 } else {
514 Printf(format: "Sanitizer: pre-allocated stack size is insufficient: "
515 "%zu < %zu\n", stacksize, minstacksize);
516 Printf(format: "Sanitizer: pthread_create is likely to fail.\n");
517 }
518 }
519}
520#endif // !SANITIZER_GO
521
522pid_t StartSubprocess(const char *program, const char *const argv[],
523 const char *const envp[], fd_t stdin_fd, fd_t stdout_fd,
524 fd_t stderr_fd) {
525 auto file_closer = at_scope_exit(fn: [&] {
526 if (stdin_fd != kInvalidFd) {
527 internal_close(fd: stdin_fd);
528 }
529 if (stdout_fd != kInvalidFd) {
530 internal_close(fd: stdout_fd);
531 }
532 if (stderr_fd != kInvalidFd) {
533 internal_close(fd: stderr_fd);
534 }
535 });
536
537 int pid = internal_fork();
538
539 if (pid < 0) {
540 int rverrno;
541 if (internal_iserror(retval: pid, rverrno: &rverrno)) {
542 Report(format: "WARNING: failed to fork (errno %d)\n", rverrno);
543 }
544 return pid;
545 }
546
547 if (pid == 0) {
548 // Child subprocess
549 if (stdin_fd != kInvalidFd) {
550 internal_close(STDIN_FILENO);
551 internal_dup2(oldfd: stdin_fd, STDIN_FILENO);
552 internal_close(fd: stdin_fd);
553 }
554 if (stdout_fd != kInvalidFd) {
555 internal_close(STDOUT_FILENO);
556 internal_dup2(oldfd: stdout_fd, STDOUT_FILENO);
557 internal_close(fd: stdout_fd);
558 }
559 if (stderr_fd != kInvalidFd) {
560 internal_close(STDERR_FILENO);
561 internal_dup2(oldfd: stderr_fd, STDERR_FILENO);
562 internal_close(fd: stderr_fd);
563 }
564
565# if SANITIZER_FREEBSD
566 internal_close_range(3, ~static_cast<fd_t>(0), 0);
567# else
568 for (int fd = sysconf(_SC_OPEN_MAX); fd > 2; fd--) internal_close(fd);
569# endif
570
571 internal_execve(filename: program, argv: const_cast<char **>(&argv[0]),
572 envp: const_cast<char *const *>(envp));
573 internal__exit(exitcode: 1);
574 }
575
576 return pid;
577}
578
579bool IsProcessRunning(pid_t pid) {
580 int process_status;
581 uptr waitpid_status = internal_waitpid(pid, status: &process_status, WNOHANG);
582 int local_errno;
583 if (internal_iserror(retval: waitpid_status, rverrno: &local_errno)) {
584 VReport(1, "Waiting on the process failed (errno %d).\n", local_errno);
585 return false;
586 }
587 return waitpid_status == 0;
588}
589
590int WaitForProcess(pid_t pid) {
591 int process_status;
592 uptr waitpid_status = internal_waitpid(pid, status: &process_status, options: 0);
593 int local_errno;
594 if (internal_iserror(retval: waitpid_status, rverrno: &local_errno)) {
595 VReport(1, "Waiting on the process failed (errno %d).\n", local_errno);
596 return -1;
597 }
598 return process_status;
599}
600
601bool IsStateDetached(int state) {
602 return state == PTHREAD_CREATE_DETACHED;
603}
604
605} // namespace __sanitizer
606
607#endif // SANITIZER_POSIX
608