| 1 | //===-- sanitizer_fuchsia.cpp ---------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | // |
| 9 | // This file is shared between AddressSanitizer and other sanitizer |
| 10 | // run-time libraries and implements Fuchsia-specific functions from |
| 11 | // sanitizer_common.h. |
| 12 | //===----------------------------------------------------------------------===// |
| 13 | |
| 14 | #include "sanitizer_fuchsia.h" |
| 15 | #if SANITIZER_FUCHSIA |
| 16 | |
| 17 | # include <pthread.h> |
| 18 | # include <stdlib.h> |
| 19 | # include <unistd.h> |
| 20 | # include <zircon/errors.h> |
| 21 | # include <zircon/process.h> |
| 22 | # include <zircon/syscalls.h> |
| 23 | # include <zircon/utc.h> |
| 24 | |
| 25 | # include "sanitizer_common.h" |
| 26 | # include "sanitizer_interface_internal.h" |
| 27 | # include "sanitizer_libc.h" |
| 28 | # include "sanitizer_mutex.h" |
| 29 | |
| 30 | namespace __sanitizer { |
| 31 | |
| 32 | void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); } |
| 33 | |
| 34 | uptr internal_sched_yield() { |
| 35 | zx_status_t status = _zx_thread_legacy_yield(0u); |
| 36 | CHECK_EQ(status, ZX_OK); |
| 37 | return 0; // Why doesn't this return void? |
| 38 | } |
| 39 | |
| 40 | void internal_usleep(u64 useconds) { |
| 41 | zx_status_t status = _zx_nanosleep(_zx_deadline_after(ZX_USEC(useconds))); |
| 42 | CHECK_EQ(status, ZX_OK); |
| 43 | } |
| 44 | |
| 45 | u64 NanoTime() { |
| 46 | zx_handle_t utc_clock = _zx_utc_reference_get(); |
| 47 | CHECK_NE(utc_clock, ZX_HANDLE_INVALID); |
| 48 | zx_time_t time; |
| 49 | zx_status_t status = _zx_clock_read(utc_clock, &time); |
| 50 | CHECK_EQ(status, ZX_OK); |
| 51 | return time; |
| 52 | } |
| 53 | |
| 54 | u64 MonotonicNanoTime() { return _zx_clock_get_monotonic(); } |
| 55 | |
| 56 | uptr internal_getpid() { |
| 57 | zx_info_handle_basic_t info; |
| 58 | zx_status_t status = |
| 59 | _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info, |
| 60 | sizeof(info), NULL, NULL); |
| 61 | CHECK_EQ(status, ZX_OK); |
| 62 | uptr pid = static_cast<uptr>(info.koid); |
| 63 | CHECK_EQ(pid, info.koid); |
| 64 | return pid; |
| 65 | } |
| 66 | |
| 67 | int internal_dlinfo(void *handle, int request, void *p) { UNIMPLEMENTED(); } |
| 68 | |
| 69 | uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); } |
| 70 | |
| 71 | tid_t GetTid() { return GetThreadSelf(); } |
| 72 | |
| 73 | void Abort() { abort(); } |
| 74 | |
| 75 | int Atexit(void (*function)(void)) { return atexit(function); } |
| 76 | |
| 77 | void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) { |
| 78 | pthread_attr_t attr; |
| 79 | CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0); |
| 80 | void *base; |
| 81 | size_t size; |
| 82 | CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0); |
| 83 | CHECK_EQ(pthread_attr_destroy(&attr), 0); |
| 84 | |
| 85 | *stack_bottom = reinterpret_cast<uptr>(base); |
| 86 | *stack_top = *stack_bottom + size; |
| 87 | } |
| 88 | |
| 89 | void InitializePlatformEarly() {} |
| 90 | void CheckASLR() {} |
| 91 | void CheckMPROTECT() {} |
| 92 | void PlatformPrepareForSandboxing(void *args) {} |
| 93 | void DisableCoreDumperIfNecessary() {} |
| 94 | void InstallDeadlySignalHandlers(SignalHandlerType handler) {} |
| 95 | void SetAlternateSignalStack() {} |
| 96 | void UnsetAlternateSignalStack() {} |
| 97 | |
| 98 | bool SignalContext::IsStackOverflow() const { return false; } |
| 99 | void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); } |
| 100 | const char *SignalContext::Describe() const { UNIMPLEMENTED(); } |
| 101 | |
| 102 | void FutexWait(atomic_uint32_t *p, u32 cmp) { |
| 103 | zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(p), cmp, |
| 104 | ZX_HANDLE_INVALID, ZX_TIME_INFINITE); |
| 105 | if (status != ZX_ERR_BAD_STATE) // Normal race. |
| 106 | CHECK_EQ(status, ZX_OK); |
| 107 | } |
| 108 | |
| 109 | void FutexWake(atomic_uint32_t *p, u32 count) { |
| 110 | zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(p), count); |
| 111 | CHECK_EQ(status, ZX_OK); |
| 112 | } |
| 113 | |
| 114 | uptr GetPageSize() { return _zx_system_get_page_size(); } |
| 115 | |
| 116 | uptr GetMmapGranularity() { return _zx_system_get_page_size(); } |
| 117 | |
| 118 | sanitizer_shadow_bounds_t ShadowBounds; |
| 119 | |
| 120 | void InitShadowBounds() { ShadowBounds = __sanitizer_shadow_bounds(); } |
| 121 | |
| 122 | uptr GetMaxUserVirtualAddress() { |
| 123 | InitShadowBounds(); |
| 124 | return ShadowBounds.memory_limit - 1; |
| 125 | } |
| 126 | |
| 127 | uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); } |
| 128 | |
| 129 | bool ErrorIsOOM(error_t err) { return err == ZX_ERR_NO_MEMORY; } |
| 130 | |
| 131 | // For any sanitizer internal that needs to map something which can be unmapped |
| 132 | // later, first attempt to map to a pre-allocated VMAR. This helps reduce |
| 133 | // fragmentation from many small anonymous mmap calls. A good value for this |
| 134 | // VMAR size would be the total size of your typical sanitizer internal objects |
| 135 | // allocated in an "average" process lifetime. Examples of this include: |
| 136 | // FakeStack, LowLevelAllocator mappings, TwoLevelMap, InternalMmapVector, |
| 137 | // StackStore, CreateAsanThread, etc. |
| 138 | // |
| 139 | // This is roughly equal to the total sum of sanitizer internal mappings for a |
| 140 | // large test case. |
| 141 | constexpr size_t kSanitizerHeapVmarSize = 13ULL << 20; |
| 142 | static zx_handle_t gSanitizerHeapVmar = ZX_HANDLE_INVALID; |
| 143 | |
| 144 | static zx_status_t GetSanitizerHeapVmar(zx_handle_t *vmar) { |
| 145 | zx_status_t status = ZX_OK; |
| 146 | if (gSanitizerHeapVmar == ZX_HANDLE_INVALID) { |
| 147 | CHECK_EQ(kSanitizerHeapVmarSize % GetPageSizeCached(), 0); |
| 148 | uintptr_t base; |
| 149 | status = _zx_vmar_allocate( |
| 150 | _zx_vmar_root_self(), |
| 151 | ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0, |
| 152 | kSanitizerHeapVmarSize, &gSanitizerHeapVmar, &base); |
| 153 | } |
| 154 | *vmar = gSanitizerHeapVmar; |
| 155 | if (status == ZX_OK) |
| 156 | CHECK_NE(gSanitizerHeapVmar, ZX_HANDLE_INVALID); |
| 157 | return status; |
| 158 | } |
| 159 | |
| 160 | static zx_status_t TryVmoMapSanitizerVmar(zx_vm_option_t options, |
| 161 | size_t vmar_offset, zx_handle_t vmo, |
| 162 | size_t size, uintptr_t *addr, |
| 163 | zx_handle_t *vmar_used = nullptr) { |
| 164 | zx_handle_t vmar; |
| 165 | zx_status_t status = GetSanitizerHeapVmar(&vmar); |
| 166 | if (status != ZX_OK) |
| 167 | return status; |
| 168 | |
| 169 | status = _zx_vmar_map(gSanitizerHeapVmar, options, vmar_offset, vmo, |
| 170 | /*vmo_offset=*/0, size, addr); |
| 171 | if (vmar_used) |
| 172 | *vmar_used = gSanitizerHeapVmar; |
| 173 | if (status == ZX_ERR_NO_RESOURCES || status == ZX_ERR_INVALID_ARGS) { |
| 174 | // This means there's no space in the heap VMAR, so fallback to the root |
| 175 | // VMAR. |
| 176 | status = _zx_vmar_map(_zx_vmar_root_self(), options, vmar_offset, vmo, |
| 177 | /*vmo_offset=*/0, size, addr); |
| 178 | if (vmar_used) |
| 179 | *vmar_used = _zx_vmar_root_self(); |
| 180 | } |
| 181 | |
| 182 | return status; |
| 183 | } |
| 184 | |
| 185 | static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type, |
| 186 | bool raw_report, bool die_for_nomem) { |
| 187 | size = RoundUpTo(size, GetPageSize()); |
| 188 | |
| 189 | zx_handle_t vmo; |
| 190 | zx_status_t status = _zx_vmo_create(size, 0, &vmo); |
| 191 | if (status != ZX_OK) { |
| 192 | if (status != ZX_ERR_NO_MEMORY || die_for_nomem) |
| 193 | ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create" , status, |
| 194 | raw_report); |
| 195 | return nullptr; |
| 196 | } |
| 197 | _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type, |
| 198 | internal_strlen(mem_type)); |
| 199 | |
| 200 | uintptr_t addr; |
| 201 | status = TryVmoMapSanitizerVmar(ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, |
| 202 | /*vmar_offset=*/0, vmo, size, &addr); |
| 203 | _zx_handle_close(vmo); |
| 204 | |
| 205 | if (status != ZX_OK) { |
| 206 | if (status != ZX_ERR_NO_MEMORY || die_for_nomem) |
| 207 | ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map" , status, |
| 208 | raw_report); |
| 209 | return nullptr; |
| 210 | } |
| 211 | |
| 212 | IncreaseTotalMmap(size); |
| 213 | |
| 214 | return reinterpret_cast<void *>(addr); |
| 215 | } |
| 216 | |
| 217 | void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) { |
| 218 | return DoAnonymousMmapOrDie(size, mem_type, raw_report, true); |
| 219 | } |
| 220 | |
| 221 | void *MmapNoReserveOrDie(uptr size, const char *mem_type) { |
| 222 | return MmapOrDie(size, mem_type); |
| 223 | } |
| 224 | |
| 225 | void *MmapOrDieOnFatalError(uptr size, const char *mem_type) { |
| 226 | return DoAnonymousMmapOrDie(size, mem_type, false, false); |
| 227 | } |
| 228 | |
| 229 | uptr ReservedAddressRange::Init(uptr init_size, const char *name, |
| 230 | uptr fixed_addr) { |
| 231 | init_size = RoundUpTo(init_size, GetPageSize()); |
| 232 | DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID); |
| 233 | uintptr_t base; |
| 234 | zx_handle_t vmar; |
| 235 | zx_status_t status = _zx_vmar_allocate( |
| 236 | _zx_vmar_root_self(), |
| 237 | ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0, |
| 238 | init_size, &vmar, &base); |
| 239 | if (status != ZX_OK) |
| 240 | ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate" , status); |
| 241 | base_ = reinterpret_cast<void *>(base); |
| 242 | size_ = init_size; |
| 243 | name_ = name; |
| 244 | os_handle_ = vmar; |
| 245 | |
| 246 | return reinterpret_cast<uptr>(base_); |
| 247 | } |
| 248 | |
| 249 | static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size, |
| 250 | void *base, const char *name, bool die_for_nomem) { |
| 251 | uptr offset = fixed_addr - reinterpret_cast<uptr>(base); |
| 252 | map_size = RoundUpTo(map_size, GetPageSize()); |
| 253 | zx_handle_t vmo; |
| 254 | zx_status_t status = _zx_vmo_create(map_size, 0, &vmo); |
| 255 | if (status != ZX_OK) { |
| 256 | if (status != ZX_ERR_NO_MEMORY || die_for_nomem) |
| 257 | ReportMmapFailureAndDie(map_size, name, "zx_vmo_create" , status); |
| 258 | return 0; |
| 259 | } |
| 260 | _zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name)); |
| 261 | DCHECK_GE(base + size_, map_size + offset); |
| 262 | uintptr_t addr; |
| 263 | |
| 264 | status = |
| 265 | _zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC, |
| 266 | offset, vmo, 0, map_size, &addr); |
| 267 | _zx_handle_close(vmo); |
| 268 | if (status != ZX_OK) { |
| 269 | if (status != ZX_ERR_NO_MEMORY || die_for_nomem) { |
| 270 | ReportMmapFailureAndDie(map_size, name, "zx_vmar_map" , status); |
| 271 | } |
| 272 | return 0; |
| 273 | } |
| 274 | IncreaseTotalMmap(map_size); |
| 275 | return addr; |
| 276 | } |
| 277 | |
| 278 | uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size, |
| 279 | const char *name) { |
| 280 | return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, |
| 281 | name ? name : name_, false); |
| 282 | } |
| 283 | |
| 284 | uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size, |
| 285 | const char *name) { |
| 286 | return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_, |
| 287 | name ? name : name_, true); |
| 288 | } |
| 289 | |
| 290 | void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar, |
| 291 | bool raw_report) { |
| 292 | if (!addr || !size) |
| 293 | return; |
| 294 | size = RoundUpTo(size, GetPageSize()); |
| 295 | |
| 296 | zx_status_t status = |
| 297 | _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size); |
| 298 | if (status == ZX_ERR_INVALID_ARGS && target_vmar == gSanitizerHeapVmar) { |
| 299 | // If there wasn't any space in the heap vmar, the fallback was the root |
| 300 | // vmar. |
| 301 | status = _zx_vmar_unmap(_zx_vmar_root_self(), |
| 302 | reinterpret_cast<uintptr_t>(addr), size); |
| 303 | } |
| 304 | if (status != ZX_OK) |
| 305 | ReportMunmapFailureAndDie(addr, size, status, raw_report); |
| 306 | |
| 307 | DecreaseTotalMmap(size); |
| 308 | } |
| 309 | |
| 310 | void ReservedAddressRange::Unmap(uptr addr, uptr size) { |
| 311 | CHECK_LE(size, size_); |
| 312 | const zx_handle_t vmar = static_cast<zx_handle_t>(os_handle_); |
| 313 | if (addr == reinterpret_cast<uptr>(base_)) { |
| 314 | if (size == size_) { |
| 315 | // Destroying the vmar effectively unmaps the whole mapping. |
| 316 | _zx_vmar_destroy(vmar); |
| 317 | _zx_handle_close(vmar); |
| 318 | os_handle_ = static_cast<uptr>(ZX_HANDLE_INVALID); |
| 319 | DecreaseTotalMmap(size); |
| 320 | return; |
| 321 | } |
| 322 | } else { |
| 323 | CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_); |
| 324 | } |
| 325 | // Partial unmapping does not affect the fact that the initial range is still |
| 326 | // reserved, and the resulting unmapped memory can't be reused. |
| 327 | UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar, |
| 328 | /*raw_report=*/false); |
| 329 | } |
| 330 | |
| 331 | // This should never be called. |
| 332 | void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) { |
| 333 | UNIMPLEMENTED(); |
| 334 | } |
| 335 | |
| 336 | bool MprotectNoAccess(uptr addr, uptr size) { |
| 337 | return _zx_vmar_protect(_zx_vmar_root_self(), 0, addr, size) == ZX_OK; |
| 338 | } |
| 339 | |
| 340 | bool MprotectReadOnly(uptr addr, uptr size) { |
| 341 | return _zx_vmar_protect(_zx_vmar_root_self(), ZX_VM_PERM_READ, addr, size) == |
| 342 | ZX_OK; |
| 343 | } |
| 344 | |
| 345 | bool MprotectReadWrite(uptr addr, uptr size) { |
| 346 | return _zx_vmar_protect(_zx_vmar_root_self(), |
| 347 | ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, addr, |
| 348 | size) == ZX_OK; |
| 349 | } |
| 350 | |
| 351 | void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, |
| 352 | const char *mem_type) { |
| 353 | CHECK_GE(size, GetPageSize()); |
| 354 | CHECK(IsPowerOfTwo(size)); |
| 355 | CHECK(IsPowerOfTwo(alignment)); |
| 356 | |
| 357 | zx_handle_t vmo; |
| 358 | zx_status_t status = _zx_vmo_create(size, 0, &vmo); |
| 359 | if (status != ZX_OK) { |
| 360 | if (status != ZX_ERR_NO_MEMORY) |
| 361 | ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create" , status, false); |
| 362 | return nullptr; |
| 363 | } |
| 364 | _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type, |
| 365 | internal_strlen(mem_type)); |
| 366 | |
| 367 | // Map a larger size to get a chunk of address space big enough that |
| 368 | // it surely contains an aligned region of the requested size. Then |
| 369 | // overwrite the aligned middle portion with a mapping from the |
| 370 | // beginning of the VMO, and unmap the excess before and after. |
| 371 | size_t map_size = size + alignment; |
| 372 | uintptr_t addr; |
| 373 | zx_handle_t vmar_used; |
| 374 | status = TryVmoMapSanitizerVmar(ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, |
| 375 | /*vmar_offset=*/0, vmo, map_size, &addr, |
| 376 | &vmar_used); |
| 377 | if (status == ZX_OK) { |
| 378 | uintptr_t map_addr = addr; |
| 379 | uintptr_t map_end = map_addr + map_size; |
| 380 | addr = RoundUpTo(map_addr, alignment); |
| 381 | uintptr_t end = addr + size; |
| 382 | if (addr != map_addr) { |
| 383 | zx_info_vmar_t info; |
| 384 | status = _zx_object_get_info(vmar_used, ZX_INFO_VMAR, &info, sizeof(info), |
| 385 | NULL, NULL); |
| 386 | if (status == ZX_OK) { |
| 387 | uintptr_t new_addr; |
| 388 | status = _zx_vmar_map( |
| 389 | vmar_used, |
| 390 | ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE, |
| 391 | addr - info.base, vmo, 0, size, &new_addr); |
| 392 | if (status == ZX_OK) |
| 393 | CHECK_EQ(new_addr, addr); |
| 394 | } |
| 395 | } |
| 396 | if (status == ZX_OK && addr != map_addr) |
| 397 | status = _zx_vmar_unmap(vmar_used, map_addr, addr - map_addr); |
| 398 | if (status == ZX_OK && end != map_end) |
| 399 | status = _zx_vmar_unmap(vmar_used, end, map_end - end); |
| 400 | } |
| 401 | _zx_handle_close(vmo); |
| 402 | |
| 403 | if (status != ZX_OK) { |
| 404 | if (status != ZX_ERR_NO_MEMORY) |
| 405 | ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map" , status, false); |
| 406 | return nullptr; |
| 407 | } |
| 408 | |
| 409 | IncreaseTotalMmap(size); |
| 410 | |
| 411 | return reinterpret_cast<void *>(addr); |
| 412 | } |
| 413 | |
| 414 | void UnmapOrDie(void *addr, uptr size, bool raw_report) { |
| 415 | UnmapOrDieVmar(addr, size, gSanitizerHeapVmar, raw_report); |
| 416 | } |
| 417 | |
| 418 | void ReleaseMemoryPagesToOS(uptr beg, uptr end) { |
| 419 | uptr beg_aligned = RoundUpTo(beg, GetPageSize()); |
| 420 | uptr end_aligned = RoundDownTo(end, GetPageSize()); |
| 421 | if (beg_aligned < end_aligned) { |
| 422 | zx_handle_t root_vmar = _zx_vmar_root_self(); |
| 423 | CHECK_NE(root_vmar, ZX_HANDLE_INVALID); |
| 424 | zx_status_t status = |
| 425 | _zx_vmar_op_range(root_vmar, ZX_VMAR_OP_DECOMMIT, beg_aligned, |
| 426 | end_aligned - beg_aligned, nullptr, 0); |
| 427 | CHECK_EQ(status, ZX_OK); |
| 428 | } |
| 429 | } |
| 430 | |
| 431 | void DumpProcessMap() { |
| 432 | // TODO(mcgrathr): write it |
| 433 | return; |
| 434 | } |
| 435 | |
| 436 | bool IsAccessibleMemoryRange(uptr beg, uptr size) { |
| 437 | // TODO(mcgrathr): Figure out a better way. |
| 438 | zx_handle_t vmo; |
| 439 | zx_status_t status = _zx_vmo_create(size, 0, &vmo); |
| 440 | if (status == ZX_OK) { |
| 441 | status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size); |
| 442 | _zx_handle_close(vmo); |
| 443 | } |
| 444 | return status == ZX_OK; |
| 445 | } |
| 446 | |
| 447 | bool TryMemCpy(void *dest, const void *src, uptr n) { |
| 448 | // TODO: implement. |
| 449 | return false; |
| 450 | } |
| 451 | |
| 452 | // FIXME implement on this platform. |
| 453 | void GetMemoryProfile(fill_profile_f cb, uptr *stats) {} |
| 454 | |
| 455 | bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, |
| 456 | uptr *read_len, uptr max_len, error_t *errno_p) { |
| 457 | *errno_p = ZX_ERR_NOT_SUPPORTED; |
| 458 | return false; |
| 459 | } |
| 460 | |
| 461 | void RawWrite(const char *buffer) { |
| 462 | constexpr size_t size = 128; |
| 463 | static _Thread_local char line[size]; |
| 464 | static _Thread_local size_t lastLineEnd = 0; |
| 465 | static _Thread_local size_t cur = 0; |
| 466 | |
| 467 | while (*buffer) { |
| 468 | if (cur >= size) { |
| 469 | if (lastLineEnd == 0) |
| 470 | lastLineEnd = size; |
| 471 | __sanitizer_log_write(line, lastLineEnd); |
| 472 | internal_memmove(line, line + lastLineEnd, cur - lastLineEnd); |
| 473 | cur = cur - lastLineEnd; |
| 474 | lastLineEnd = 0; |
| 475 | } |
| 476 | if (*buffer == '\n') |
| 477 | lastLineEnd = cur + 1; |
| 478 | line[cur++] = *buffer++; |
| 479 | } |
| 480 | // Flush all complete lines before returning. |
| 481 | if (lastLineEnd != 0) { |
| 482 | __sanitizer_log_write(line, lastLineEnd); |
| 483 | internal_memmove(line, line + lastLineEnd, cur - lastLineEnd); |
| 484 | cur = cur - lastLineEnd; |
| 485 | lastLineEnd = 0; |
| 486 | } |
| 487 | } |
| 488 | |
| 489 | void CatastrophicErrorWrite(const char *buffer, uptr length) { |
| 490 | __sanitizer_log_write(buffer, length); |
| 491 | } |
| 492 | |
| 493 | char **StoredArgv; |
| 494 | char **StoredEnviron; |
| 495 | |
| 496 | char **GetArgv() { return StoredArgv; } |
| 497 | char **GetEnviron() { return StoredEnviron; } |
| 498 | |
| 499 | const char *GetEnv(const char *name) { |
| 500 | if (StoredEnviron) { |
| 501 | uptr NameLen = internal_strlen(name); |
| 502 | for (char **Env = StoredEnviron; *Env != 0; Env++) { |
| 503 | if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=') |
| 504 | return (*Env) + NameLen + 1; |
| 505 | } |
| 506 | } |
| 507 | return nullptr; |
| 508 | } |
| 509 | |
| 510 | uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) { |
| 511 | const char *argv0 = "<UNKNOWN>" ; |
| 512 | if (StoredArgv && StoredArgv[0]) { |
| 513 | argv0 = StoredArgv[0]; |
| 514 | } |
| 515 | internal_strncpy(buf, argv0, buf_len); |
| 516 | return internal_strlen(buf); |
| 517 | } |
| 518 | |
| 519 | uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) { |
| 520 | return ReadBinaryName(buf, buf_len); |
| 521 | } |
| 522 | |
| 523 | uptr MainThreadStackBase, MainThreadStackSize; |
| 524 | |
| 525 | bool GetRandom(void *buffer, uptr length, bool blocking) { |
| 526 | _zx_cprng_draw(buffer, length); |
| 527 | return true; |
| 528 | } |
| 529 | |
| 530 | u32 GetNumberOfCPUs() { return zx_system_get_num_cpus(); } |
| 531 | |
| 532 | uptr GetRSS() { UNIMPLEMENTED(); } |
| 533 | |
| 534 | void *internal_start_thread(void *(*func)(void *arg), void *arg) { return 0; } |
| 535 | void internal_join_thread(void *th) {} |
| 536 | |
| 537 | void InitializePlatformCommonFlags(CommonFlags *cf) {} |
| 538 | |
| 539 | } // namespace __sanitizer |
| 540 | |
| 541 | using namespace __sanitizer; |
| 542 | |
| 543 | extern "C" { |
| 544 | void __sanitizer_startup_hook(int argc, char **argv, char **envp, |
| 545 | void *stack_base, size_t stack_size) { |
| 546 | __sanitizer::StoredArgv = argv; |
| 547 | __sanitizer::StoredEnviron = envp; |
| 548 | __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base); |
| 549 | __sanitizer::MainThreadStackSize = stack_size; |
| 550 | |
| 551 | EarlySanitizerInit(); |
| 552 | } |
| 553 | |
| 554 | void __sanitizer_set_report_path(const char *path) { |
| 555 | // Handle the initialization code in each sanitizer, but no other calls. |
| 556 | // This setting is never consulted on Fuchsia. |
| 557 | DCHECK_EQ(path, common_flags()->log_path); |
| 558 | } |
| 559 | |
| 560 | void __sanitizer_set_report_fd(void *fd) { |
| 561 | UNREACHABLE("not available on Fuchsia" ); |
| 562 | } |
| 563 | |
| 564 | const char *__sanitizer_get_report_path() { |
| 565 | UNREACHABLE("not available on Fuchsia" ); |
| 566 | } |
| 567 | } // extern "C" |
| 568 | |
| 569 | #endif // SANITIZER_FUCHSIA |
| 570 | |