1//===-- sanitizer_stoptheworld_linux_libcdep.cpp --------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// See sanitizer_stoptheworld.h for details.
10// This implementation was inspired by Markus Gutschke's linuxthreads.cc.
11//
12//===----------------------------------------------------------------------===//
13
14#include "sanitizer_platform.h"
15
16#if SANITIZER_LINUX && \
17 (defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \
18 defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \
19 defined(__arm__) || defined(__hexagon__) || SANITIZER_RISCV64 || \
20 SANITIZER_LOONGARCH64)
21
22#include "sanitizer_stoptheworld.h"
23
24#include "sanitizer_platform_limits_posix.h"
25#include "sanitizer_atomic.h"
26
27#include <errno.h>
28#include <sched.h> // for CLONE_* definitions
29#include <stddef.h>
30#include <sys/prctl.h> // for PR_* definitions
31#include <sys/ptrace.h> // for PTRACE_* definitions
32#include <sys/types.h> // for pid_t
33#include <sys/uio.h> // for iovec
34#include <elf.h> // for NT_PRSTATUS
35#if (defined(__aarch64__) || defined(__powerpc64__) || \
36 defined(__hexagon__) || SANITIZER_RISCV64 || \
37 SANITIZER_LOONGARCH64) && !SANITIZER_ANDROID
38// GLIBC 2.20+ sys/user does not include asm/ptrace.h
39# include <asm/ptrace.h>
40#endif
41#include <sys/user.h> // for user_regs_struct
42# if SANITIZER_MIPS
43// clang-format off
44# include <asm/sgidefs.h> // <asm/sgidefs.h> must be included before <asm/reg.h>
45# include <asm/reg.h> // for mips SP register
46// clang-format on
47# endif
48# include <sys/wait.h> // for signal-related stuff
49
50# ifdef sa_handler
51# undef sa_handler
52# endif
53
54# ifdef sa_sigaction
55# undef sa_sigaction
56# endif
57
58# include "sanitizer_common.h"
59# include "sanitizer_flags.h"
60# include "sanitizer_libc.h"
61# include "sanitizer_linux.h"
62# include "sanitizer_mutex.h"
63# include "sanitizer_placement_new.h"
64
65// Sufficiently old kernel headers don't provide this value, but we can still
66// call prctl with it. If the runtime kernel is new enough, the prctl call will
67// have the desired effect; if the kernel is too old, the call will error and we
68// can ignore said error.
69#ifndef PR_SET_PTRACER
70#define PR_SET_PTRACER 0x59616d61
71#endif
72
73// This module works by spawning a Linux task which then attaches to every
74// thread in the caller process with ptrace. This suspends the threads, and
75// PTRACE_GETREGS can then be used to obtain their register state. The callback
76// supplied to StopTheWorld() is run in the tracer task while the threads are
77// suspended.
78// The tracer task must be placed in a different thread group for ptrace to
79// work, so it cannot be spawned as a pthread. Instead, we use the low-level
80// clone() interface (we want to share the address space with the caller
81// process, so we prefer clone() over fork()).
82//
83// We don't use any libc functions, relying instead on direct syscalls. There
84// are two reasons for this:
85// 1. calling a library function while threads are suspended could cause a
86// deadlock, if one of the treads happens to be holding a libc lock;
87// 2. it's generally not safe to call libc functions from the tracer task,
88// because clone() does not set up a thread-local storage for it. Any
89// thread-local variables used by libc will be shared between the tracer task
90// and the thread which spawned it.
91
92namespace __sanitizer {
93
94class SuspendedThreadsListLinux final : public SuspendedThreadsList {
95 public:
96 SuspendedThreadsListLinux() { thread_ids_.reserve(new_size: 1024); }
97
98 ThreadID GetThreadID(uptr index) const override;
99 uptr ThreadCount() const override;
100 bool ContainsTid(ThreadID thread_id) const;
101 void Append(ThreadID tid);
102
103 PtraceRegistersStatus GetRegistersAndSP(uptr index,
104 InternalMmapVector<uptr> *buffer,
105 uptr *sp) const override;
106
107 private:
108 InternalMmapVector<ThreadID> thread_ids_;
109};
110
111// Structure for passing arguments into the tracer thread.
112struct TracerThreadArgument {
113 StopTheWorldCallback callback;
114 void *callback_argument;
115 // The tracer thread waits on this mutex while the parent finishes its
116 // preparations.
117 Mutex mutex;
118 // Tracer thread signals its completion by setting done.
119 atomic_uintptr_t done;
120 uptr parent_pid;
121};
122
123// This class handles thread suspending/unsuspending in the tracer thread.
124class ThreadSuspender {
125 public:
126 explicit ThreadSuspender(pid_t pid, TracerThreadArgument *arg)
127 : arg(arg)
128 , pid_(pid) {
129 CHECK_GE(pid, 0);
130 }
131 bool SuspendAllThreads();
132 void ResumeAllThreads();
133 void KillAllThreads();
134 SuspendedThreadsListLinux &suspended_threads_list() {
135 return suspended_threads_list_;
136 }
137 TracerThreadArgument *arg;
138 private:
139 SuspendedThreadsListLinux suspended_threads_list_;
140 pid_t pid_;
141 bool SuspendThread(ThreadID thread_id);
142};
143
144bool ThreadSuspender::SuspendThread(ThreadID tid) {
145 int pterrno;
146 if (internal_iserror(retval: internal_ptrace(request: PTRACE_ATTACH, pid: tid, addr: nullptr, data: nullptr),
147 rverrno: &pterrno)) {
148 // Either the thread is dead, or something prevented us from attaching.
149 // Log this event and move on.
150 VReport(1, "Could not attach to thread %zu (errno %d).\n", (uptr)tid,
151 pterrno);
152 return false;
153 } else {
154 VReport(2, "Attached to thread %zu.\n", (uptr)tid);
155 // The thread is not guaranteed to stop before ptrace returns, so we must
156 // wait on it. Note: if the thread receives a signal concurrently,
157 // we can get notification about the signal before notification about stop.
158 // In such case we need to forward the signal to the thread, otherwise
159 // the signal will be missed (as we do PTRACE_DETACH with arg=0) and
160 // any logic relying on signals will break. After forwarding we need to
161 // continue to wait for stopping, because the thread is not stopped yet.
162 // We do ignore delivery of SIGSTOP, because we want to make stop-the-world
163 // as invisible as possible.
164 for (;;) {
165 int status;
166 uptr waitpid_status;
167 HANDLE_EINTR(waitpid_status, internal_waitpid(tid, &status, __WALL));
168 int wperrno;
169 if (internal_iserror(retval: waitpid_status, rverrno: &wperrno)) {
170 // Got a ECHILD error. I don't think this situation is possible, but it
171 // doesn't hurt to report it.
172 VReport(1, "Waiting on thread %zu failed, detaching (errno %d).\n",
173 (uptr)tid, wperrno);
174 internal_ptrace(request: PTRACE_DETACH, pid: tid, addr: nullptr, data: nullptr);
175 return false;
176 }
177 if (WIFSTOPPED(status) && WSTOPSIG(status) != SIGSTOP) {
178 internal_ptrace(request: PTRACE_CONT, pid: tid, addr: nullptr,
179 data: (void*)(uptr)WSTOPSIG(status));
180 continue;
181 }
182 break;
183 }
184 suspended_threads_list_.Append(tid);
185 return true;
186 }
187}
188
189void ThreadSuspender::ResumeAllThreads() {
190 for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++) {
191 pid_t tid = suspended_threads_list_.GetThreadID(index: i);
192 int pterrno;
193 if (!internal_iserror(retval: internal_ptrace(request: PTRACE_DETACH, pid: tid, addr: nullptr, data: nullptr),
194 rverrno: &pterrno)) {
195 VReport(2, "Detached from thread %d.\n", tid);
196 } else {
197 // Either the thread is dead, or we are already detached.
198 // The latter case is possible, for instance, if this function was called
199 // from a signal handler.
200 VReport(1, "Could not detach from thread %d (errno %d).\n", tid, pterrno);
201 }
202 }
203}
204
205void ThreadSuspender::KillAllThreads() {
206 for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++)
207 internal_ptrace(request: PTRACE_KILL, pid: suspended_threads_list_.GetThreadID(index: i),
208 addr: nullptr, data: nullptr);
209}
210
211bool ThreadSuspender::SuspendAllThreads() {
212 ThreadLister thread_lister(pid_);
213 bool retry = true;
214 InternalMmapVector<ThreadID> threads;
215 threads.reserve(new_size: 128);
216 for (int i = 0; i < 30 && retry; ++i) {
217 retry = false;
218 switch (thread_lister.ListThreads(threads: &threads)) {
219 case ThreadLister::Error:
220 ResumeAllThreads();
221 VReport(1, "Failed to list threads\n");
222 return false;
223 case ThreadLister::Incomplete:
224 VReport(1, "Incomplete list\n");
225 retry = true;
226 break;
227 case ThreadLister::Ok:
228 break;
229 }
230 for (ThreadID tid : threads) {
231 // Are we already attached to this thread?
232 // Currently this check takes linear time, however the number of threads
233 // is usually small.
234 if (suspended_threads_list_.ContainsTid(thread_id: tid))
235 continue;
236 if (SuspendThread(tid))
237 retry = true;
238 else
239 VReport(2, "%llu/status: %s\n", tid, thread_lister.LoadStatus(tid));
240 }
241 if (retry)
242 VReport(1, "SuspendAllThreads retry: %d\n", i);
243 }
244 return suspended_threads_list_.ThreadCount();
245}
246
247// Pointer to the ThreadSuspender instance for use in signal handler.
248static ThreadSuspender *thread_suspender_instance = nullptr;
249
250// Synchronous signals that should not be blocked.
251static const int kSyncSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS,
252 SIGXCPU, SIGXFSZ };
253
254static void TracerThreadDieCallback() {
255 // Generally a call to Die() in the tracer thread should be fatal to the
256 // parent process as well, because they share the address space.
257 // This really only works correctly if all the threads are suspended at this
258 // point. So we correctly handle calls to Die() from within the callback, but
259 // not those that happen before or after the callback. Hopefully there aren't
260 // a lot of opportunities for that to happen...
261 ThreadSuspender *inst = thread_suspender_instance;
262 if (inst && stoptheworld_tracer_pid == internal_getpid()) {
263 inst->KillAllThreads();
264 thread_suspender_instance = nullptr;
265 }
266}
267
268// Signal handler to wake up suspended threads when the tracer thread dies.
269static void TracerThreadSignalHandler(int signum, __sanitizer_siginfo *siginfo,
270 void *uctx) {
271 SignalContext ctx(siginfo, uctx);
272 Printf(format: "Tracer caught signal %d: addr=%p pc=%p sp=%p\n", signum,
273 (void *)ctx.addr, (void *)ctx.pc, (void *)ctx.sp);
274 ThreadSuspender *inst = thread_suspender_instance;
275 if (inst) {
276 if (signum == SIGABRT)
277 inst->KillAllThreads();
278 else
279 inst->ResumeAllThreads();
280 RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
281 thread_suspender_instance = nullptr;
282 atomic_store(a: &inst->arg->done, v: 1, mo: memory_order_relaxed);
283 }
284 internal__exit(exitcode: (signum == SIGABRT) ? 1 : 2);
285}
286
287// Size of alternative stack for signal handlers in the tracer thread.
288static const int kHandlerStackSize = 8192;
289
290// This function will be run as a cloned task.
291static int TracerThread(void* argument) {
292 TracerThreadArgument *tracer_thread_argument =
293 (TracerThreadArgument *)argument;
294
295 internal_prctl(PR_SET_PDEATHSIG, SIGKILL, arg3: 0, arg4: 0, arg5: 0);
296 // Check if parent is already dead.
297 if (internal_getppid() != tracer_thread_argument->parent_pid)
298 internal__exit(exitcode: 4);
299
300 // Wait for the parent thread to finish preparations.
301 tracer_thread_argument->mutex.Lock();
302 tracer_thread_argument->mutex.Unlock();
303
304 RAW_CHECK(AddDieCallback(TracerThreadDieCallback));
305
306 ThreadSuspender thread_suspender(internal_getppid(), tracer_thread_argument);
307 // Global pointer for the signal handler.
308 thread_suspender_instance = &thread_suspender;
309
310 // Alternate stack for signal handling.
311 InternalMmapVector<char> handler_stack_memory(kHandlerStackSize);
312 stack_t handler_stack;
313 internal_memset(s: &handler_stack, c: 0, n: sizeof(handler_stack));
314 handler_stack.ss_sp = handler_stack_memory.data();
315 handler_stack.ss_size = kHandlerStackSize;
316 internal_sigaltstack(ss: &handler_stack, oss: nullptr);
317
318 // Install our handler for synchronous signals. Other signals should be
319 // blocked by the mask we inherited from the parent thread.
320 for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) {
321 __sanitizer_sigaction act;
322 internal_memset(s: &act, c: 0, n: sizeof(act));
323 act.sigaction = TracerThreadSignalHandler;
324 act.sa_flags = SA_ONSTACK | SA_SIGINFO;
325 internal_sigaction_norestorer(signum: kSyncSignals[i], act: &act, oldact: 0);
326 }
327
328 int exit_code = 0;
329 if (!thread_suspender.SuspendAllThreads()) {
330 VReport(1, "Failed suspending threads.\n");
331 exit_code = 3;
332 } else {
333 tracer_thread_argument->callback(thread_suspender.suspended_threads_list(),
334 tracer_thread_argument->callback_argument);
335 thread_suspender.ResumeAllThreads();
336 exit_code = 0;
337 }
338 RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
339 thread_suspender_instance = nullptr;
340 atomic_store(a: &tracer_thread_argument->done, v: 1, mo: memory_order_relaxed);
341 return exit_code;
342}
343
344class ScopedStackSpaceWithGuard {
345 public:
346 explicit ScopedStackSpaceWithGuard(uptr stack_size) {
347 stack_size_ = stack_size;
348 guard_size_ = GetPageSizeCached();
349 // FIXME: Omitting MAP_STACK here works in current kernels but might break
350 // in the future.
351 guard_start_ = (uptr)MmapOrDie(size: stack_size_ + guard_size_,
352 mem_type: "ScopedStackWithGuard");
353 CHECK(MprotectNoAccess((uptr)guard_start_, guard_size_));
354 }
355 ~ScopedStackSpaceWithGuard() {
356 UnmapOrDie(addr: (void *)guard_start_, size: stack_size_ + guard_size_);
357 }
358 void *Bottom() const {
359 return (void *)(guard_start_ + stack_size_ + guard_size_);
360 }
361
362 private:
363 uptr stack_size_;
364 uptr guard_size_;
365 uptr guard_start_;
366};
367
368// We have a limitation on the stack frame size, so some stuff had to be moved
369// into globals.
370static __sanitizer_sigset_t blocked_sigset;
371static __sanitizer_sigset_t old_sigset;
372
373class StopTheWorldScope {
374 public:
375 StopTheWorldScope() {
376 // Make this process dumpable. Processes that are not dumpable cannot be
377 // attached to.
378 process_was_dumpable_ = internal_prctl(PR_GET_DUMPABLE, arg2: 0, arg3: 0, arg4: 0, arg5: 0);
379 if (!process_was_dumpable_)
380 internal_prctl(PR_SET_DUMPABLE, arg2: 1, arg3: 0, arg4: 0, arg5: 0);
381 }
382
383 ~StopTheWorldScope() {
384 // Restore the dumpable flag.
385 if (!process_was_dumpable_)
386 internal_prctl(PR_SET_DUMPABLE, arg2: 0, arg3: 0, arg4: 0, arg5: 0);
387 }
388
389 private:
390 int process_was_dumpable_;
391};
392
393// When sanitizer output is being redirected to file (i.e. by using log_path),
394// the tracer should write to the parent's log instead of trying to open a new
395// file. Alert the logging code to the fact that we have a tracer.
396struct ScopedSetTracerPID {
397 explicit ScopedSetTracerPID(uptr tracer_pid) {
398 stoptheworld_tracer_pid = tracer_pid;
399 stoptheworld_tracer_ppid = internal_getpid();
400 }
401 ~ScopedSetTracerPID() {
402 stoptheworld_tracer_pid = 0;
403 stoptheworld_tracer_ppid = 0;
404 }
405};
406
407// This detects whether ptrace is blocked (e.g., by seccomp), by forking and
408// then attempting ptrace.
409// This separate check is necessary because StopTheWorld() creates a thread
410// with a shared virtual address space and shared TLS, and therefore
411// cannot use waitpid() due to the shared errno.
412static void TestPTrace() {
413# if SANITIZER_SPARC
414 // internal_fork() on SPARC actually calls __fork(). We can't safely fork,
415 // because it's possible seccomp has been configured to disallow fork() but
416 // allow clone().
417 VReport(1, "WARNING: skipping TestPTrace() because this is SPARC\n");
418 VReport(1,
419 "If seccomp blocks ptrace, LeakSanitizer may hang without further "
420 "notice\n");
421 VReport(
422 1,
423 "If seccomp does not block ptrace, you can safely ignore this warning\n");
424# else
425 // Heuristic: only check the first time this is called. This is not always
426 // correct (e.g., user manually triggers leak detection, then updates
427 // seccomp, then leak detection is triggered again).
428 static bool checked = false;
429 if (checked)
430 return;
431 checked = true;
432
433 // Hopefully internal_fork() is not too expensive, thanks to copy-on-write.
434 // Besides, this is only called the first time.
435 // Note that internal_fork() on non-SPARC Linux actually calls
436 // SYSCALL(clone); thus, it is reasonable to use it because if seccomp kills
437 // TestPTrace(), it would have killed StopTheWorld() anyway.
438 int pid = internal_fork();
439
440 if (pid < 0) {
441 int rverrno;
442 if (internal_iserror(retval: pid, rverrno: &rverrno))
443 VReport(0, "WARNING: TestPTrace() failed to fork (errno %d)\n", rverrno);
444
445 // We don't abort the sanitizer - it's still worth letting the sanitizer
446 // try.
447 return;
448 }
449
450 if (pid == 0) {
451 // Child subprocess
452
453 // TODO: consider checking return value of internal_ptrace, to handle
454 // SCMP_ACT_ERRNO. However, be careful not to consume too many
455 // resources performing a proper ptrace.
456 internal_ptrace(request: PTRACE_ATTACH, pid: 0, addr: nullptr, data: nullptr);
457 internal__exit(exitcode: 0);
458 } else {
459 int wstatus;
460 internal_waitpid(pid, status: &wstatus, options: 0);
461
462 // Handle SCMP_ACT_KILL
463 if (WIFSIGNALED(wstatus)) {
464 VReport(0,
465 "WARNING: ptrace appears to be blocked (is seccomp enabled?). "
466 "LeakSanitizer may hang.\n");
467 VReport(0, "Child exited with signal %d.\n", WTERMSIG(wstatus));
468 // We don't abort the sanitizer - it's still worth letting the sanitizer
469 // try.
470 }
471 }
472# endif
473}
474
475void StopTheWorld(StopTheWorldCallback callback, void *argument) {
476 TestPTrace();
477
478 StopTheWorldScope in_stoptheworld;
479 // Prepare the arguments for TracerThread.
480 struct TracerThreadArgument tracer_thread_argument;
481 tracer_thread_argument.callback = callback;
482 tracer_thread_argument.callback_argument = argument;
483 tracer_thread_argument.parent_pid = internal_getpid();
484 atomic_store(a: &tracer_thread_argument.done, v: 0, mo: memory_order_relaxed);
485 const uptr kTracerStackSize = 2 * 1024 * 1024;
486 ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize);
487 // Block the execution of TracerThread until after we have set ptrace
488 // permissions.
489 tracer_thread_argument.mutex.Lock();
490 // Signal handling story.
491 // We don't want async signals to be delivered to the tracer thread,
492 // so we block all async signals before creating the thread. An async signal
493 // handler can temporary modify errno, which is shared with this thread.
494 // We ought to use pthread_sigmask here, because sigprocmask has undefined
495 // behavior in multithreaded programs. However, on linux sigprocmask is
496 // equivalent to pthread_sigmask with the exception that pthread_sigmask
497 // does not allow to block some signals used internally in pthread
498 // implementation. We are fine with blocking them here, we are really not
499 // going to pthread_cancel the thread.
500 // The tracer thread should not raise any synchronous signals. But in case it
501 // does, we setup a special handler for sync signals that properly kills the
502 // parent as well. Note: we don't pass CLONE_SIGHAND to clone, so handlers
503 // in the tracer thread won't interfere with user program. Double note: if a
504 // user does something along the lines of 'kill -11 pid', that can kill the
505 // process even if user setup own handler for SEGV.
506 // Thing to watch out for: this code should not change behavior of user code
507 // in any observable way. In particular it should not override user signal
508 // handlers.
509 internal_sigfillset(set: &blocked_sigset);
510 for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++)
511 internal_sigdelset(set: &blocked_sigset, signum: kSyncSignals[i]);
512 int rv = internal_sigprocmask(SIG_BLOCK, set: &blocked_sigset, oldset: &old_sigset);
513 CHECK_EQ(rv, 0);
514 uptr tracer_pid = internal_clone(
515 fn: TracerThread, child_stack: tracer_stack.Bottom(),
516 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED,
517 arg: &tracer_thread_argument, parent_tidptr: nullptr /* parent_tidptr */,
518 newtls: nullptr /* newtls */, child_tidptr: nullptr /* child_tidptr */);
519 internal_sigprocmask(SIG_SETMASK, set: &old_sigset, oldset: 0);
520 int local_errno = 0;
521 if (internal_iserror(retval: tracer_pid, rverrno: &local_errno)) {
522 VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno);
523 tracer_thread_argument.mutex.Unlock();
524 } else {
525 ScopedSetTracerPID scoped_set_tracer_pid(tracer_pid);
526 // On some systems we have to explicitly declare that we want to be traced
527 // by the tracer thread.
528 internal_prctl(PR_SET_PTRACER, arg2: tracer_pid, arg3: 0, arg4: 0, arg5: 0);
529 // Allow the tracer thread to start.
530 tracer_thread_argument.mutex.Unlock();
531 // NOTE: errno is shared between this thread and the tracer thread
532 // (clone was called without CLONE_SETTLS / newtls).
533 // internal_waitpid() may call syscall() which can access/spoil errno,
534 // so we can't call it now. Instead we for the tracer thread to finish using
535 // the spin loop below. Man page for sched_yield() says "In the Linux
536 // implementation, sched_yield() always succeeds", so let's hope it does not
537 // spoil errno. Note that this spin loop runs only for brief periods before
538 // the tracer thread has suspended us and when it starts unblocking threads.
539 while (atomic_load(a: &tracer_thread_argument.done, mo: memory_order_relaxed) == 0)
540 sched_yield();
541 // Now the tracer thread is about to exit and does not touch errno,
542 // wait for it.
543 for (;;) {
544 uptr waitpid_status = internal_waitpid(pid: tracer_pid, status: nullptr, __WALL);
545 if (!internal_iserror(retval: waitpid_status, rverrno: &local_errno))
546 break;
547 if (local_errno == EINTR)
548 continue;
549 VReport(1, "Waiting on the tracer thread failed (errno %d).\n",
550 local_errno);
551 break;
552 }
553 }
554}
555
556// Platform-specific methods from SuspendedThreadsList.
557#if SANITIZER_ANDROID && defined(__arm__)
558typedef pt_regs regs_struct;
559#define REG_SP ARM_sp
560
561#elif SANITIZER_LINUX && defined(__arm__)
562typedef user_regs regs_struct;
563#define REG_SP uregs[13]
564
565#elif defined(__i386__) || defined(__x86_64__)
566typedef user_regs_struct regs_struct;
567#if defined(__i386__)
568#define REG_SP esp
569#else
570#define REG_SP rsp
571#endif
572#define ARCH_IOVEC_FOR_GETREGSET
573// Support ptrace extensions even when compiled without required kernel support
574#ifndef NT_X86_XSTATE
575#define NT_X86_XSTATE 0x202
576#endif
577#ifndef PTRACE_GETREGSET
578#define PTRACE_GETREGSET 0x4204
579#endif
580// Compiler may use FP registers to store pointers.
581static constexpr uptr kExtraRegs[] = {NT_X86_XSTATE, NT_FPREGSET};
582
583#elif defined(__powerpc__) || defined(__powerpc64__)
584typedef pt_regs regs_struct;
585#define REG_SP gpr[PT_R1]
586
587#elif defined(__mips__)
588typedef struct user regs_struct;
589# define REG_SP regs[EF_R29]
590
591#elif defined(__aarch64__)
592typedef struct user_pt_regs regs_struct;
593#define REG_SP sp
594static constexpr uptr kExtraRegs[] = {0};
595#define ARCH_IOVEC_FOR_GETREGSET
596
597#elif defined(__loongarch__)
598typedef struct user_pt_regs regs_struct;
599#define REG_SP regs[3]
600static constexpr uptr kExtraRegs[] = {0};
601#define ARCH_IOVEC_FOR_GETREGSET
602
603#elif SANITIZER_RISCV64
604typedef struct user_regs_struct regs_struct;
605// sys/ucontext.h already defines REG_SP as 2. Undefine it first.
606#undef REG_SP
607#define REG_SP sp
608static constexpr uptr kExtraRegs[] = {0};
609#define ARCH_IOVEC_FOR_GETREGSET
610
611#elif defined(__s390__)
612typedef _user_regs_struct regs_struct;
613#define REG_SP gprs[15]
614static constexpr uptr kExtraRegs[] = {0};
615#define ARCH_IOVEC_FOR_GETREGSET
616
617#elif defined(__hexagon__)
618#include <asm/user.h>
619typedef struct user_regs_struct regs_struct;
620#define REG_SP r29
621static constexpr uptr kExtraRegs[] = {0};
622#define ARCH_IOVEC_FOR_GETREGSET
623
624#else
625#error "Unsupported architecture"
626#endif // SANITIZER_ANDROID && defined(__arm__)
627
628ThreadID SuspendedThreadsListLinux::GetThreadID(uptr index) const {
629 CHECK_LT(index, thread_ids_.size());
630 return thread_ids_[index];
631}
632
633uptr SuspendedThreadsListLinux::ThreadCount() const {
634 return thread_ids_.size();
635}
636
637bool SuspendedThreadsListLinux::ContainsTid(ThreadID thread_id) const {
638 for (uptr i = 0; i < thread_ids_.size(); i++) {
639 if (thread_ids_[i] == thread_id) return true;
640 }
641 return false;
642}
643
644void SuspendedThreadsListLinux::Append(ThreadID tid) {
645 thread_ids_.push_back(element: tid);
646}
647
648PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
649 uptr index, InternalMmapVector<uptr> *buffer, uptr *sp) const {
650 pid_t tid = GetThreadID(index);
651 constexpr uptr uptr_sz = sizeof(uptr);
652 int pterrno;
653#ifdef ARCH_IOVEC_FOR_GETREGSET
654 auto AppendF = [&](uptr regset) {
655 uptr size = buffer->size();
656 // NT_X86_XSTATE requires 64bit alignment.
657 uptr size_up = RoundUpTo(size, boundary: 8 / uptr_sz);
658 buffer->reserve(new_size: Max<uptr>(a: 1024, b: size_up));
659 struct iovec regset_io;
660 for (;; buffer->resize(new_size: buffer->capacity() * 2)) {
661 buffer->resize(new_size: buffer->capacity());
662 uptr available_bytes = (buffer->size() - size_up) * uptr_sz;
663 regset_io.iov_base = buffer->data() + size_up;
664 regset_io.iov_len = available_bytes;
665 bool fail =
666 internal_iserror(retval: internal_ptrace(PTRACE_GETREGSET, pid: tid,
667 addr: (void *)regset, data: (void *)&regset_io),
668 rverrno: &pterrno);
669 if (fail) {
670 VReport(1, "Could not get regset %p from thread %d (errno %d).\n",
671 (void *)regset, tid, pterrno);
672 buffer->resize(new_size: size);
673 return false;
674 }
675
676 // Far enough from the buffer size, no need to resize and repeat.
677 if (regset_io.iov_len + 64 < available_bytes)
678 break;
679 }
680 buffer->resize(new_size: size_up + RoundUpTo(size: regset_io.iov_len, boundary: uptr_sz) / uptr_sz);
681 return true;
682 };
683
684 buffer->clear();
685 bool fail = !AppendF(NT_PRSTATUS);
686 if (!fail) {
687 // Accept the first available and do not report errors.
688 for (uptr regs : kExtraRegs)
689 if (regs && AppendF(regs))
690 break;
691 }
692#else
693 buffer->resize(RoundUpTo(sizeof(regs_struct), uptr_sz) / uptr_sz);
694 bool fail = internal_iserror(
695 internal_ptrace(PTRACE_GETREGS, tid, nullptr, buffer->data()), &pterrno);
696 if (fail)
697 VReport(1, "Could not get registers from thread %d (errno %d).\n", tid,
698 pterrno);
699#endif
700 if (fail) {
701 // ESRCH means that the given thread is not suspended or already dead.
702 // Therefore it's unsafe to inspect its data (e.g. walk through stack) and
703 // we should notify caller about this.
704 return pterrno == ESRCH ? REGISTERS_UNAVAILABLE_FATAL
705 : REGISTERS_UNAVAILABLE;
706 }
707
708 *sp = reinterpret_cast<regs_struct *>(buffer->data())[0].REG_SP;
709 return REGISTERS_AVAILABLE;
710}
711
712} // namespace __sanitizer
713
714#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)
715 // || defined(__aarch64__) || defined(__powerpc64__)
716 // || defined(__s390__) || defined(__i386__) || defined(__arm__)
717 // || SANITIZER_LOONGARCH64
718