1//===-- asan_poisoning.cpp ------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address sanity checker.
10//
11// Shadow memory poisoning by ASan RTL and by user application.
12//===----------------------------------------------------------------------===//
13
14#include "asan_poisoning.h"
15
16#include "asan_report.h"
17#include "asan_stack.h"
18#include "sanitizer_common/sanitizer_atomic.h"
19#include "sanitizer_common/sanitizer_common.h"
20#include "sanitizer_common/sanitizer_flags.h"
21#include "sanitizer_common/sanitizer_interface_internal.h"
22#include "sanitizer_common/sanitizer_libc.h"
23#include "sanitizer_common/sanitizer_ring_buffer.h"
24#include "sanitizer_common/sanitizer_stackdepot.h"
25
26namespace __asan {
27
28using PoisonRecordRingBuffer = RingBuffer<PoisonRecord>;
29
30static atomic_uint8_t can_poison_memory;
31
32static Mutex poison_records_mutex;
33static PoisonRecordRingBuffer *poison_records
34 SANITIZER_GUARDED_BY(poison_records_mutex) = nullptr;
35
36void AddPoisonRecord(const PoisonRecord &new_record) {
37 if (flags()->poison_history_size <= 0)
38 return;
39
40 GenericScopedLock<Mutex> l(&poison_records_mutex);
41
42 if (poison_records == nullptr)
43 poison_records = PoisonRecordRingBuffer::New(Size: flags()->poison_history_size);
44
45 poison_records->push(t: new_record);
46}
47
48bool FindPoisonRecord(uptr addr, PoisonRecord &match) {
49 if (flags()->poison_history_size <= 0)
50 return false;
51
52 GenericScopedLock<Mutex> l(&poison_records_mutex);
53
54 if (poison_records) {
55 for (unsigned int i = 0; i < poison_records->size(); i++) {
56 PoisonRecord record = (*poison_records)[i];
57 if (record.begin <= addr && addr < record.end) {
58 internal_memcpy(dest: &match, src: &record, n: sizeof(record));
59 return true;
60 }
61 }
62 }
63
64 return false;
65}
66
67void SANITIZER_ACQUIRE(poison_records_mutex) AcquirePoisonRecords() {
68 poison_records_mutex.Lock();
69}
70
71void SANITIZER_RELEASE(poison_records_mutex) ReleasePoisonRecords() {
72 poison_records_mutex.Unlock();
73}
74
75void SetCanPoisonMemory(bool value) {
76 atomic_store(a: &can_poison_memory, v: value, mo: memory_order_release);
77}
78
79bool CanPoisonMemory() {
80 return atomic_load(a: &can_poison_memory, mo: memory_order_acquire);
81}
82
83void PoisonShadow(uptr addr, uptr size, u8 value) {
84 if (value && !CanPoisonMemory()) return;
85 CHECK(AddrIsAlignedByGranularity(addr));
86 CHECK(AddrIsInMem(addr));
87 CHECK(AddrIsAlignedByGranularity(addr + size));
88 CHECK(AddrIsInMem(addr + size - ASAN_SHADOW_GRANULARITY));
89 CHECK(REAL(memset));
90 FastPoisonShadow(aligned_beg: addr, aligned_size: size, value);
91}
92
93void PoisonShadowPartialRightRedzone(uptr addr,
94 uptr size,
95 uptr redzone_size,
96 u8 value) {
97 if (!CanPoisonMemory()) return;
98 CHECK(AddrIsAlignedByGranularity(addr));
99 CHECK(AddrIsInMem(addr));
100 FastPoisonShadowPartialRightRedzone(aligned_addr: addr, size, redzone_size, value);
101}
102
103struct ShadowSegmentEndpoint {
104 u8 *chunk;
105 s8 offset; // in [0, ASAN_SHADOW_GRANULARITY)
106 s8 value; // = *chunk;
107
108 explicit ShadowSegmentEndpoint(uptr address) {
109 chunk = (u8*)MemToShadow(p: address);
110 offset = address & (ASAN_SHADOW_GRANULARITY - 1);
111 value = *chunk;
112 }
113};
114
115void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
116 uptr end = ptr + size;
117 if (Verbosity()) {
118 Printf(format: "__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
119 poison ? "" : "un", (void *)ptr, (void *)end, size);
120 if (Verbosity() >= 2)
121 PRINT_CURRENT_STACK();
122 }
123 CHECK(size);
124 CHECK_LE(size, 4096);
125 CHECK(IsAligned(end, ASAN_SHADOW_GRANULARITY));
126 if (!IsAligned(a: ptr, ASAN_SHADOW_GRANULARITY)) {
127 *(u8 *)MemToShadow(p: ptr) =
128 poison ? static_cast<u8>(ptr % ASAN_SHADOW_GRANULARITY) : 0;
129 ptr |= ASAN_SHADOW_GRANULARITY - 1;
130 ptr++;
131 }
132 for (; ptr < end; ptr += ASAN_SHADOW_GRANULARITY)
133 *(u8*)MemToShadow(p: ptr) = poison ? kAsanIntraObjectRedzone : 0;
134}
135
136} // namespace __asan
137
138// ---------------------- Interface ---------------- {{{1
139using namespace __asan;
140
141// Current implementation of __asan_(un)poison_memory_region doesn't check
142// that user program (un)poisons the memory it owns. It poisons memory
143// conservatively, and unpoisons progressively to make sure asan shadow
144// mapping invariant is preserved (see detailed mapping description here:
145// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm).
146//
147// * if user asks to poison region [left, right), the program poisons
148// at least [left, AlignDown(right)).
149// * if user asks to unpoison region [left, right), the program unpoisons
150// at most [AlignDown(left), right).
151void __asan_poison_memory_region(void const volatile *addr, uptr size) {
152 if (!flags()->allow_user_poisoning || size == 0) return;
153 uptr beg_addr = (uptr)addr;
154 uptr end_addr = beg_addr + size;
155 VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,
156 (void *)end_addr);
157
158 if (flags()->poison_history_size > 0) {
159 GET_STACK_TRACE(/*max_size=*/16, /*fast=*/false);
160 u32 current_tid = GetCurrentTidOrInvalid();
161
162 u32 stack_id = StackDepotPut(stack);
163
164 PoisonRecord record;
165 record.stack_id = stack_id;
166 record.thread_id = current_tid;
167 record.begin = beg_addr;
168 record.end = end_addr;
169 AddPoisonRecord(new_record: record);
170 }
171
172 ShadowSegmentEndpoint beg(beg_addr);
173 ShadowSegmentEndpoint end(end_addr);
174 if (beg.chunk == end.chunk) {
175 CHECK_LT(beg.offset, end.offset);
176 s8 value = beg.value;
177 CHECK_EQ(value, end.value);
178 // We can only poison memory if the byte in end.offset is unaddressable.
179 // No need to re-poison memory if it is poisoned already.
180 if (value > 0 && value <= end.offset) {
181 if (beg.offset > 0) {
182 *beg.chunk = Min(a: value, b: beg.offset);
183 } else {
184 *beg.chunk = kAsanUserPoisonedMemoryMagic;
185 }
186 }
187 return;
188 }
189 CHECK_LT(beg.chunk, end.chunk);
190 if (beg.offset > 0) {
191 // Mark bytes from beg.offset as unaddressable.
192 if (beg.value == 0) {
193 *beg.chunk = beg.offset;
194 } else {
195 *beg.chunk = Min(a: beg.value, b: beg.offset);
196 }
197 beg.chunk++;
198 }
199 REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk);
200 // Poison if byte in end.offset is unaddressable.
201 if (end.value > 0 && end.value <= end.offset) {
202 *end.chunk = kAsanUserPoisonedMemoryMagic;
203 }
204}
205
206void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
207 if (!flags()->allow_user_poisoning || size == 0) return;
208 uptr beg_addr = (uptr)addr;
209 uptr end_addr = beg_addr + size;
210 VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,
211 (void *)end_addr);
212
213 // Note: we don't need to update the poison tracking here. Since the shadow
214 // memory will be unpoisoned, the poison tracking ring buffer entries will be
215 // ignored.
216
217 ShadowSegmentEndpoint beg(beg_addr);
218 ShadowSegmentEndpoint end(end_addr);
219 if (beg.chunk == end.chunk) {
220 CHECK_LT(beg.offset, end.offset);
221 s8 value = beg.value;
222 CHECK_EQ(value, end.value);
223 // We unpoison memory bytes up to enbytes up to end.offset if it is not
224 // unpoisoned already.
225 if (value != 0) {
226 *beg.chunk = Max(a: value, b: end.offset);
227 }
228 return;
229 }
230 CHECK_LT(beg.chunk, end.chunk);
231 REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk);
232 if (end.offset > 0 && end.value != 0) {
233 *end.chunk = Max(a: end.value, b: end.offset);
234 }
235}
236
237int __asan_address_is_poisoned(void const volatile *addr) {
238 return __asan::AddressIsPoisoned(a: (uptr)addr);
239}
240
241uptr __asan_region_is_poisoned(uptr beg, uptr size) {
242 if (!size)
243 return 0;
244 uptr end = beg + size;
245 if (!AddrIsInMem(a: beg))
246 return beg;
247 if (!AddrIsInMem(a: end))
248 return end;
249 CHECK_LT(beg, end);
250 // First check the first and the last application bytes,
251 // then check the ASAN_SHADOW_GRANULARITY-aligned region by calling
252 // mem_is_zero on the corresponding shadow.
253 if (!__asan::AddressIsPoisoned(a: beg) && !__asan::AddressIsPoisoned(a: end - 1)) {
254 uptr aligned_b = RoundUpTo(size: beg, ASAN_SHADOW_GRANULARITY);
255 uptr aligned_e = RoundDownTo(x: end, ASAN_SHADOW_GRANULARITY);
256 if (aligned_e <= aligned_b)
257 return 0;
258 uptr shadow_beg = MemToShadow(p: aligned_b);
259 uptr shadow_end = MemToShadow(p: aligned_e);
260 CHECK_LE(shadow_beg, shadow_end);
261 if (__sanitizer::mem_is_zero(mem: (const char*)shadow_beg,
262 size: shadow_end - shadow_beg))
263 return 0;
264 }
265 // The fast check failed, so we have a poisoned byte somewhere.
266 // Find it slowly.
267 for (; beg < end; beg++)
268 if (__asan::AddressIsPoisoned(a: beg))
269 return beg;
270 UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found");
271 return 0;
272}
273
274#define CHECK_SMALL_REGION(p, size, isWrite) \
275 do { \
276 uptr __p = reinterpret_cast<uptr>(p); \
277 uptr __size = size; \
278 if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \
279 __asan::AddressIsPoisoned(__p + __size - 1))) { \
280 GET_CURRENT_PC_BP_SP; \
281 uptr __bad = __asan_region_is_poisoned(__p, __size); \
282 __asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\
283 } \
284 } while (false)
285
286
287extern "C" SANITIZER_INTERFACE_ATTRIBUTE
288u16 __sanitizer_unaligned_load16(const uu16 *p) {
289 CHECK_SMALL_REGION(p, sizeof(*p), false);
290 return *p;
291}
292
293extern "C" SANITIZER_INTERFACE_ATTRIBUTE
294u32 __sanitizer_unaligned_load32(const uu32 *p) {
295 CHECK_SMALL_REGION(p, sizeof(*p), false);
296 return *p;
297}
298
299extern "C" SANITIZER_INTERFACE_ATTRIBUTE
300u64 __sanitizer_unaligned_load64(const uu64 *p) {
301 CHECK_SMALL_REGION(p, sizeof(*p), false);
302 return *p;
303}
304
305extern "C" SANITIZER_INTERFACE_ATTRIBUTE
306void __sanitizer_unaligned_store16(uu16 *p, u16 x) {
307 CHECK_SMALL_REGION(p, sizeof(*p), true);
308 *p = x;
309}
310
311extern "C" SANITIZER_INTERFACE_ATTRIBUTE
312void __sanitizer_unaligned_store32(uu32 *p, u32 x) {
313 CHECK_SMALL_REGION(p, sizeof(*p), true);
314 *p = x;
315}
316
317extern "C" SANITIZER_INTERFACE_ATTRIBUTE
318void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
319 CHECK_SMALL_REGION(p, sizeof(*p), true);
320 *p = x;
321}
322
323extern "C" SANITIZER_INTERFACE_ATTRIBUTE
324void __asan_poison_cxx_array_cookie(uptr p) {
325 if (SANITIZER_WORDSIZE != 64) return;
326 if (!flags()->poison_array_cookie) return;
327 uptr s = MEM_TO_SHADOW(p);
328 *reinterpret_cast<u8*>(s) = kAsanArrayCookieMagic;
329}
330
331extern "C" SANITIZER_INTERFACE_ATTRIBUTE
332uptr __asan_load_cxx_array_cookie(uptr *p) {
333 if (SANITIZER_WORDSIZE != 64) return *p;
334 if (!flags()->poison_array_cookie) return *p;
335 uptr s = MEM_TO_SHADOW(reinterpret_cast<uptr>(p));
336 u8 sval = *reinterpret_cast<u8*>(s);
337 if (sval == kAsanArrayCookieMagic) return *p;
338 // If sval is not kAsanArrayCookieMagic it can only be freed memory,
339 // which means that we are going to get double-free. So, return 0 to avoid
340 // infinite loop of destructors. We don't want to report a double-free here
341 // though, so print a warning just in case.
342 // CHECK_EQ(sval, kAsanHeapFreeMagic);
343 if (sval == kAsanHeapFreeMagic) {
344 Report(format: "AddressSanitizer: loaded array cookie from free-d memory; "
345 "expect a double-free report\n");
346 return 0;
347 }
348 // The cookie may remain unpoisoned if e.g. it comes from a custom
349 // operator new defined inside a class.
350 return *p;
351}
352
353// This is a simplified version of __asan_(un)poison_memory_region, which
354// assumes that left border of region to be poisoned is properly aligned.
355static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
356 if (size == 0) return;
357 uptr aligned_size = size & ~(ASAN_SHADOW_GRANULARITY - 1);
358 PoisonShadow(addr, size: aligned_size,
359 value: do_poison ? kAsanStackUseAfterScopeMagic : 0);
360 if (size == aligned_size)
361 return;
362 s8 end_offset = (s8)(size - aligned_size);
363 s8* shadow_end = (s8*)MemToShadow(p: addr + aligned_size);
364 s8 end_value = *shadow_end;
365 if (do_poison) {
366 // If possible, mark all the bytes mapping to last shadow byte as
367 // unaddressable.
368 if (end_value > 0 && end_value <= end_offset)
369 *shadow_end = (s8)kAsanStackUseAfterScopeMagic;
370 } else {
371 // If necessary, mark few first bytes mapping to last shadow byte
372 // as addressable
373 if (end_value != 0)
374 *shadow_end = Max(a: end_value, b: end_offset);
375 }
376}
377
378void __asan_set_shadow_00(uptr addr, uptr size) {
379 REAL(memset)((void *)addr, 0, size);
380}
381
382void __asan_set_shadow_01(uptr addr, uptr size) {
383 REAL(memset)((void *)addr, 0x01, size);
384}
385
386void __asan_set_shadow_02(uptr addr, uptr size) {
387 REAL(memset)((void *)addr, 0x02, size);
388}
389
390void __asan_set_shadow_03(uptr addr, uptr size) {
391 REAL(memset)((void *)addr, 0x03, size);
392}
393
394void __asan_set_shadow_04(uptr addr, uptr size) {
395 REAL(memset)((void *)addr, 0x04, size);
396}
397
398void __asan_set_shadow_05(uptr addr, uptr size) {
399 REAL(memset)((void *)addr, 0x05, size);
400}
401
402void __asan_set_shadow_06(uptr addr, uptr size) {
403 REAL(memset)((void *)addr, 0x06, size);
404}
405
406void __asan_set_shadow_07(uptr addr, uptr size) {
407 REAL(memset)((void *)addr, 0x07, size);
408}
409
410void __asan_set_shadow_f1(uptr addr, uptr size) {
411 REAL(memset)((void *)addr, 0xf1, size);
412}
413
414void __asan_set_shadow_f2(uptr addr, uptr size) {
415 REAL(memset)((void *)addr, 0xf2, size);
416}
417
418void __asan_set_shadow_f3(uptr addr, uptr size) {
419 REAL(memset)((void *)addr, 0xf3, size);
420}
421
422void __asan_set_shadow_f5(uptr addr, uptr size) {
423 REAL(memset)((void *)addr, 0xf5, size);
424}
425
426void __asan_set_shadow_f8(uptr addr, uptr size) {
427 REAL(memset)((void *)addr, 0xf8, size);
428}
429
430void __asan_poison_stack_memory(uptr addr, uptr size) {
431 VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
432 PoisonAlignedStackMemory(addr, size, do_poison: true);
433}
434
435void __asan_unpoison_stack_memory(uptr addr, uptr size) {
436 VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size);
437 PoisonAlignedStackMemory(addr, size, do_poison: false);
438}
439
440static void FixUnalignedStorage(uptr storage_beg, uptr storage_end,
441 uptr &old_beg, uptr &old_end, uptr &new_beg,
442 uptr &new_end) {
443 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
444 if (UNLIKELY(!AddrIsAlignedByGranularity(storage_end))) {
445 uptr end_down = RoundDownTo(x: storage_end, boundary: granularity);
446 // Ignore the last unaligned granule if the storage is followed by
447 // unpoisoned byte, because we can't poison the prefix anyway. Don't call
448 // AddressIsPoisoned at all if container changes does not affect the last
449 // granule at all.
450 if ((((old_end != new_end) && Max(a: old_end, b: new_end) > end_down) ||
451 ((old_beg != new_beg) && Max(a: old_beg, b: new_beg) > end_down)) &&
452 !AddressIsPoisoned(a: storage_end)) {
453 old_beg = Min(a: end_down, b: old_beg);
454 old_end = Min(a: end_down, b: old_end);
455 new_beg = Min(a: end_down, b: new_beg);
456 new_end = Min(a: end_down, b: new_end);
457 }
458 }
459
460 // Handle misaligned begin and cut it off.
461 if (UNLIKELY(!AddrIsAlignedByGranularity(storage_beg))) {
462 uptr beg_up = RoundUpTo(size: storage_beg, boundary: granularity);
463 // The first unaligned granule needs special handling only if we had bytes
464 // there before and will have none after.
465 if ((new_beg == new_end || new_beg >= beg_up) && old_beg != old_end &&
466 old_beg < beg_up) {
467 // Keep granule prefix outside of the storage unpoisoned.
468 uptr beg_down = RoundDownTo(x: storage_beg, boundary: granularity);
469 *(u8 *)MemToShadow(p: beg_down) = storage_beg - beg_down;
470 old_beg = Max(a: beg_up, b: old_beg);
471 old_end = Max(a: beg_up, b: old_end);
472 new_beg = Max(a: beg_up, b: new_beg);
473 new_end = Max(a: beg_up, b: new_end);
474 }
475 }
476}
477
478void __sanitizer_annotate_contiguous_container(const void *beg_p,
479 const void *end_p,
480 const void *old_mid_p,
481 const void *new_mid_p) {
482 if (!flags()->detect_container_overflow)
483 return;
484 VPrintf(3, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
485 new_mid_p);
486 uptr storage_beg = reinterpret_cast<uptr>(beg_p);
487 uptr storage_end = reinterpret_cast<uptr>(end_p);
488 uptr old_end = reinterpret_cast<uptr>(old_mid_p);
489 uptr new_end = reinterpret_cast<uptr>(new_mid_p);
490 uptr old_beg = storage_beg;
491 uptr new_beg = storage_beg;
492 uptr granularity = ASAN_SHADOW_GRANULARITY;
493 if (!(storage_beg <= old_end && storage_beg <= new_end &&
494 old_end <= storage_end && new_end <= storage_end)) {
495 GET_STACK_TRACE_FATAL_HERE;
496 ReportBadParamsToAnnotateContiguousContainer(beg: storage_beg, end: storage_end,
497 old_mid: old_end, new_mid: new_end, stack: &stack);
498 }
499 CHECK_LE(storage_end - storage_beg,
500 FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
501
502 if (old_end == new_end)
503 return; // Nothing to do here.
504
505 FixUnalignedStorage(storage_beg, storage_end, old_beg, old_end, new_beg,
506 new_end);
507
508 uptr a = RoundDownTo(x: Min(a: old_end, b: new_end), boundary: granularity);
509 uptr c = RoundUpTo(size: Max(a: old_end, b: new_end), boundary: granularity);
510 uptr d1 = RoundDownTo(x: old_end, boundary: granularity);
511 // uptr d2 = RoundUpTo(old_mid, granularity);
512 // Currently we should be in this state:
513 // [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
514 // Make a quick sanity check that we are indeed in this state.
515 //
516 // FIXME: Two of these three checks are disabled until we fix
517 // https://github.com/google/sanitizers/issues/258.
518 // if (d1 != d2)
519 // DCHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
520 //
521 // NOTE: curly brackets for the "if" below to silence a MSVC warning.
522 if (a + granularity <= d1) {
523 DCHECK_EQ(*(u8 *)MemToShadow(a), 0);
524 }
525 // if (d2 + granularity <= c && c <= end)
526 // DCHECK_EQ(*(u8 *)MemToShadow(c - granularity),
527 // kAsanContiguousContainerOOBMagic);
528
529 uptr b1 = RoundDownTo(x: new_end, boundary: granularity);
530 uptr b2 = RoundUpTo(size: new_end, boundary: granularity);
531 // New state:
532 // [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good.
533 if (b1 > a)
534 PoisonShadow(addr: a, size: b1 - a, value: 0);
535 else if (c > b2)
536 PoisonShadow(addr: b2, size: c - b2, value: kAsanContiguousContainerOOBMagic);
537 if (b1 != b2) {
538 CHECK_EQ(b2 - b1, granularity);
539 *(u8 *)MemToShadow(p: b1) = static_cast<u8>(new_end - b1);
540 }
541}
542
543// Annotates a double ended contiguous memory area like std::deque's chunk.
544// It allows detecting buggy accesses to allocated but not used begining
545// or end items of such a container.
546void __sanitizer_annotate_double_ended_contiguous_container(
547 const void *storage_beg_p, const void *storage_end_p,
548 const void *old_container_beg_p, const void *old_container_end_p,
549 const void *new_container_beg_p, const void *new_container_end_p) {
550 if (!flags()->detect_container_overflow)
551 return;
552
553 VPrintf(3, "contiguous_container: %p %p %p %p %p %p\n", storage_beg_p,
554 storage_end_p, old_container_beg_p, old_container_end_p,
555 new_container_beg_p, new_container_end_p);
556
557 uptr storage_beg = reinterpret_cast<uptr>(storage_beg_p);
558 uptr storage_end = reinterpret_cast<uptr>(storage_end_p);
559 uptr old_beg = reinterpret_cast<uptr>(old_container_beg_p);
560 uptr old_end = reinterpret_cast<uptr>(old_container_end_p);
561 uptr new_beg = reinterpret_cast<uptr>(new_container_beg_p);
562 uptr new_end = reinterpret_cast<uptr>(new_container_end_p);
563
564 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
565
566 if (!(old_beg <= old_end && new_beg <= new_end) ||
567 !(storage_beg <= new_beg && new_end <= storage_end) ||
568 !(storage_beg <= old_beg && old_end <= storage_end)) {
569 GET_STACK_TRACE_FATAL_HERE;
570 ReportBadParamsToAnnotateDoubleEndedContiguousContainer(
571 storage_beg, storage_end, old_container_beg: old_beg, old_container_end: old_end, new_container_beg: new_beg, new_container_end: new_end, stack: &stack);
572 }
573 CHECK_LE(storage_end - storage_beg,
574 FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
575
576 if ((old_beg == old_end && new_beg == new_end) ||
577 (old_beg == new_beg && old_end == new_end))
578 return; // Nothing to do here.
579
580 FixUnalignedStorage(storage_beg, storage_end, old_beg, old_end, new_beg,
581 new_end);
582
583 // Handle non-intersecting new/old containers separately have simpler
584 // intersecting case.
585 if (old_beg == old_end || new_beg == new_end || new_end <= old_beg ||
586 old_end <= new_beg) {
587 if (old_beg != old_end) {
588 // Poisoning the old container.
589 uptr a = RoundDownTo(x: old_beg, boundary: granularity);
590 uptr b = RoundUpTo(size: old_end, boundary: granularity);
591 PoisonShadow(addr: a, size: b - a, value: kAsanContiguousContainerOOBMagic);
592 }
593
594 if (new_beg != new_end) {
595 // Unpoisoning the new container.
596 uptr a = RoundDownTo(x: new_beg, boundary: granularity);
597 uptr b = RoundDownTo(x: new_end, boundary: granularity);
598 PoisonShadow(addr: a, size: b - a, value: 0);
599 if (!AddrIsAlignedByGranularity(a: new_end))
600 *(u8 *)MemToShadow(p: b) = static_cast<u8>(new_end - b);
601 }
602
603 return;
604 }
605
606 // Intersection of old and new containers is not empty.
607 CHECK_LT(new_beg, old_end);
608 CHECK_GT(new_end, old_beg);
609
610 if (new_beg < old_beg) {
611 // Round down because we can't poison prefixes.
612 uptr a = RoundDownTo(x: new_beg, boundary: granularity);
613 // Round down and ignore the [c, old_beg) as its state defined by unchanged
614 // [old_beg, old_end).
615 uptr c = RoundDownTo(x: old_beg, boundary: granularity);
616 PoisonShadow(addr: a, size: c - a, value: 0);
617 } else if (new_beg > old_beg) {
618 // Round down and poison [a, old_beg) because it was unpoisoned only as a
619 // prefix.
620 uptr a = RoundDownTo(x: old_beg, boundary: granularity);
621 // Round down and ignore the [c, new_beg) as its state defined by unchanged
622 // [new_beg, old_end).
623 uptr c = RoundDownTo(x: new_beg, boundary: granularity);
624
625 PoisonShadow(addr: a, size: c - a, value: kAsanContiguousContainerOOBMagic);
626 }
627
628 if (new_end > old_end) {
629 // Round down to poison the prefix.
630 uptr a = RoundDownTo(x: old_end, boundary: granularity);
631 // Round down and handle remainder below.
632 uptr c = RoundDownTo(x: new_end, boundary: granularity);
633 PoisonShadow(addr: a, size: c - a, value: 0);
634 if (!AddrIsAlignedByGranularity(a: new_end))
635 *(u8 *)MemToShadow(p: c) = static_cast<u8>(new_end - c);
636 } else if (new_end < old_end) {
637 // Round up and handle remained below.
638 uptr a2 = RoundUpTo(size: new_end, boundary: granularity);
639 // Round up to poison entire granule as we had nothing in [old_end, c2).
640 uptr c2 = RoundUpTo(size: old_end, boundary: granularity);
641 PoisonShadow(addr: a2, size: c2 - a2, value: kAsanContiguousContainerOOBMagic);
642
643 if (!AddrIsAlignedByGranularity(a: new_end)) {
644 uptr a = RoundDownTo(x: new_end, boundary: granularity);
645 *(u8 *)MemToShadow(p: a) = static_cast<u8>(new_end - a);
646 }
647 }
648}
649
650// Marks the specified number of bytes in a granule as accessible or
651// poisones the whole granule with kAsanContiguousContainerOOBMagic value.
652static void SetContainerGranule(uptr ptr, u8 n) {
653 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
654 u8 s = (n == granularity) ? 0 : (n ? n : kAsanContiguousContainerOOBMagic);
655 *(u8 *)MemToShadow(p: ptr) = s;
656}
657
658// Performs a byte-by-byte copy of ASan annotations (shadow memory values).
659// Result may be different due to ASan limitations, but result cannot lead
660// to false positives (more memory than requested may get unpoisoned).
661static void SlowCopyContainerAnnotations(uptr src_beg, uptr src_end,
662 uptr dst_beg, uptr dst_end) {
663 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
664 uptr dst_end_down = RoundDownTo(x: dst_end, boundary: granularity);
665 uptr src_ptr = src_beg;
666 uptr dst_ptr = dst_beg;
667
668 while (dst_ptr < dst_end) {
669 uptr granule_beg = RoundDownTo(x: dst_ptr, boundary: granularity);
670 uptr granule_end = granule_beg + granularity;
671 uptr unpoisoned_bytes = 0;
672
673 uptr end = Min(a: granule_end, b: dst_end);
674 for (; dst_ptr != end; ++dst_ptr, ++src_ptr)
675 if (!AddressIsPoisoned(a: src_ptr))
676 unpoisoned_bytes = dst_ptr - granule_beg + 1;
677
678 if (dst_ptr == dst_end && dst_end != dst_end_down &&
679 !AddressIsPoisoned(a: dst_end))
680 continue;
681
682 if (unpoisoned_bytes != 0 || granule_beg >= dst_beg)
683 SetContainerGranule(ptr: granule_beg, n: unpoisoned_bytes);
684 else if (!AddressIsPoisoned(a: dst_beg))
685 SetContainerGranule(ptr: granule_beg, n: dst_beg - granule_beg);
686 }
687}
688
689// Performs a byte-by-byte copy of ASan annotations (shadow memory values),
690// going through bytes in reversed order, but not reversing annotations.
691// Result may be different due to ASan limitations, but result cannot lead
692// to false positives (more memory than requested may get unpoisoned).
693static void SlowReversedCopyContainerAnnotations(uptr src_beg, uptr src_end,
694 uptr dst_beg, uptr dst_end) {
695 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
696 uptr dst_end_down = RoundDownTo(x: dst_end, boundary: granularity);
697 uptr src_ptr = src_end;
698 uptr dst_ptr = dst_end;
699
700 while (dst_ptr > dst_beg) {
701 uptr granule_beg = RoundDownTo(x: dst_ptr - 1, boundary: granularity);
702 uptr unpoisoned_bytes = 0;
703
704 uptr end = Max(a: granule_beg, b: dst_beg);
705 for (; dst_ptr != end; --dst_ptr, --src_ptr)
706 if (unpoisoned_bytes == 0 && !AddressIsPoisoned(a: src_ptr - 1))
707 unpoisoned_bytes = dst_ptr - granule_beg;
708
709 if (dst_ptr >= dst_end_down && !AddressIsPoisoned(a: dst_end))
710 continue;
711
712 if (granule_beg == dst_ptr || unpoisoned_bytes != 0)
713 SetContainerGranule(ptr: granule_beg, n: unpoisoned_bytes);
714 else if (!AddressIsPoisoned(a: dst_beg))
715 SetContainerGranule(ptr: granule_beg, n: dst_beg - granule_beg);
716 }
717}
718
719// A helper function for __sanitizer_copy_contiguous_container_annotations,
720// has assumption about begin and end of the container.
721// Should not be used stand alone.
722static void CopyContainerFirstGranuleAnnotation(uptr src_beg, uptr dst_beg) {
723 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
724 // First granule
725 uptr src_beg_down = RoundDownTo(x: src_beg, boundary: granularity);
726 uptr dst_beg_down = RoundDownTo(x: dst_beg, boundary: granularity);
727 if (dst_beg_down == dst_beg)
728 return;
729 if (!AddressIsPoisoned(a: src_beg))
730 *(u8 *)MemToShadow(p: dst_beg_down) = *(u8 *)MemToShadow(p: src_beg_down);
731 else if (!AddressIsPoisoned(a: dst_beg))
732 SetContainerGranule(ptr: dst_beg_down, n: dst_beg - dst_beg_down);
733}
734
735// A helper function for __sanitizer_copy_contiguous_container_annotations,
736// has assumption about begin and end of the container.
737// Should not be used stand alone.
738static void CopyContainerLastGranuleAnnotation(uptr src_end, uptr dst_end) {
739 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
740 // Last granule
741 uptr src_end_down = RoundDownTo(x: src_end, boundary: granularity);
742 uptr dst_end_down = RoundDownTo(x: dst_end, boundary: granularity);
743 if (dst_end_down == dst_end || !AddressIsPoisoned(a: dst_end))
744 return;
745 if (AddressIsPoisoned(a: src_end))
746 *(u8 *)MemToShadow(p: dst_end_down) = *(u8 *)MemToShadow(p: src_end_down);
747 else
748 SetContainerGranule(ptr: dst_end_down, n: src_end - src_end_down);
749}
750
751// This function copies ASan memory annotations (poisoned/unpoisoned states)
752// from one buffer to another.
753// It's main purpose is to help with relocating trivially relocatable objects,
754// which memory may be poisoned, without calling copy constructor.
755// However, it does not move memory content itself, only annotations.
756// If the buffers aren't aligned (the distance between buffers isn't
757// granule-aligned)
758// // src_beg % granularity != dst_beg % granularity
759// the function handles this by going byte by byte, slowing down performance.
760// The old buffer annotations are not removed. If necessary,
761// user can unpoison old buffer with __asan_unpoison_memory_region.
762void __sanitizer_copy_contiguous_container_annotations(const void *src_beg_p,
763 const void *src_end_p,
764 const void *dst_beg_p,
765 const void *dst_end_p) {
766 if (!flags()->detect_container_overflow)
767 return;
768
769 VPrintf(3, "contiguous_container_src: %p %p\n", src_beg_p, src_end_p);
770 VPrintf(3, "contiguous_container_dst: %p %p\n", dst_beg_p, dst_end_p);
771
772 uptr src_beg = reinterpret_cast<uptr>(src_beg_p);
773 uptr src_end = reinterpret_cast<uptr>(src_end_p);
774 uptr dst_beg = reinterpret_cast<uptr>(dst_beg_p);
775 uptr dst_end = reinterpret_cast<uptr>(dst_end_p);
776
777 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
778
779 if (src_beg > src_end || (dst_end - dst_beg) != (src_end - src_beg)) {
780 GET_STACK_TRACE_FATAL_HERE;
781 ReportBadParamsToCopyContiguousContainerAnnotations(
782 old_storage_beg: src_beg, old_storage_end: src_end, new_storage_beg: dst_beg, new_storage_end: dst_end, stack: &stack);
783 }
784
785 if (src_beg == src_end || src_beg == dst_beg)
786 return;
787 // Due to support for overlapping buffers, we may have to copy elements
788 // in reversed order, when destination buffer starts in the middle of
789 // the source buffer (or shares first granule with it).
790 //
791 // When buffers are not granule-aligned (or distance between them,
792 // to be specific), annotatios have to be copied byte by byte.
793 //
794 // The only remaining edge cases involve edge granules,
795 // when the container starts or ends within a granule.
796 uptr src_beg_up = RoundUpTo(size: src_beg, boundary: granularity);
797 uptr src_end_up = RoundUpTo(size: src_end, boundary: granularity);
798 bool copy_in_reversed_order = src_beg < dst_beg && dst_beg <= src_end_up;
799 if (src_beg % granularity != dst_beg % granularity ||
800 RoundDownTo(x: dst_end - 1, boundary: granularity) <= dst_beg) {
801 if (copy_in_reversed_order)
802 SlowReversedCopyContainerAnnotations(src_beg, src_end, dst_beg, dst_end);
803 else
804 SlowCopyContainerAnnotations(src_beg, src_end, dst_beg, dst_end);
805 return;
806 }
807
808 // As buffers are granule-aligned, we can just copy annotations of granules
809 // from the middle.
810 uptr dst_beg_up = RoundUpTo(size: dst_beg, boundary: granularity);
811 uptr dst_end_down = RoundDownTo(x: dst_end, boundary: granularity);
812 if (copy_in_reversed_order)
813 CopyContainerLastGranuleAnnotation(src_end, dst_end);
814 else
815 CopyContainerFirstGranuleAnnotation(src_beg, dst_beg);
816
817 if (dst_beg_up < dst_end_down) {
818 internal_memmove(dest: (u8 *)MemToShadow(p: dst_beg_up),
819 src: (u8 *)MemToShadow(p: src_beg_up),
820 n: (dst_end_down - dst_beg_up) / granularity);
821 }
822
823 if (copy_in_reversed_order)
824 CopyContainerFirstGranuleAnnotation(src_beg, dst_beg);
825 else
826 CopyContainerLastGranuleAnnotation(src_end, dst_end);
827}
828
829static const void *FindBadAddress(uptr begin, uptr end, bool poisoned) {
830 CHECK_LE(begin, end);
831 constexpr uptr kMaxRangeToCheck = 32;
832 if (end - begin > kMaxRangeToCheck * 2) {
833 if (auto *bad = FindBadAddress(begin, end: begin + kMaxRangeToCheck, poisoned))
834 return bad;
835 if (auto *bad = FindBadAddress(begin: end - kMaxRangeToCheck, end, poisoned))
836 return bad;
837 }
838
839 for (uptr i = begin; i < end; ++i)
840 if (AddressIsPoisoned(a: i) != poisoned)
841 return reinterpret_cast<const void *>(i);
842 return nullptr;
843}
844
845const void *__sanitizer_contiguous_container_find_bad_address(
846 const void *beg_p, const void *mid_p, const void *end_p) {
847 if (!flags()->detect_container_overflow)
848 return nullptr;
849 uptr granularity = ASAN_SHADOW_GRANULARITY;
850 uptr beg = reinterpret_cast<uptr>(beg_p);
851 uptr end = reinterpret_cast<uptr>(end_p);
852 uptr mid = reinterpret_cast<uptr>(mid_p);
853 CHECK_LE(beg, mid);
854 CHECK_LE(mid, end);
855 // If the byte after the storage is unpoisoned, everything in the granule
856 // before must stay unpoisoned.
857 uptr annotations_end =
858 (!AddrIsAlignedByGranularity(a: end) && !AddressIsPoisoned(a: end))
859 ? RoundDownTo(x: end, boundary: granularity)
860 : end;
861 beg = Min(a: beg, b: annotations_end);
862 mid = Min(a: mid, b: annotations_end);
863 if (auto *bad = FindBadAddress(begin: beg, end: mid, poisoned: false))
864 return bad;
865 if (auto *bad = FindBadAddress(begin: mid, end: annotations_end, poisoned: true))
866 return bad;
867 return FindBadAddress(begin: annotations_end, end, poisoned: false);
868}
869
870int __sanitizer_verify_contiguous_container(const void *beg_p,
871 const void *mid_p,
872 const void *end_p) {
873 return __sanitizer_contiguous_container_find_bad_address(beg_p, mid_p,
874 end_p) == nullptr;
875}
876
877const void *__sanitizer_double_ended_contiguous_container_find_bad_address(
878 const void *storage_beg_p, const void *container_beg_p,
879 const void *container_end_p, const void *storage_end_p) {
880 if (!flags()->detect_container_overflow)
881 return nullptr;
882 uptr granularity = ASAN_SHADOW_GRANULARITY;
883 uptr storage_beg = reinterpret_cast<uptr>(storage_beg_p);
884 uptr storage_end = reinterpret_cast<uptr>(storage_end_p);
885 uptr beg = reinterpret_cast<uptr>(container_beg_p);
886 uptr end = reinterpret_cast<uptr>(container_end_p);
887
888 // The prefix of the firs granule of the container is unpoisoned.
889 if (beg != end)
890 beg = Max(a: storage_beg, b: RoundDownTo(x: beg, boundary: granularity));
891
892 // If the byte after the storage is unpoisoned, the prefix of the last granule
893 // is unpoisoned.
894 uptr annotations_end = (!AddrIsAlignedByGranularity(a: storage_end) &&
895 !AddressIsPoisoned(a: storage_end))
896 ? RoundDownTo(x: storage_end, boundary: granularity)
897 : storage_end;
898 storage_beg = Min(a: storage_beg, b: annotations_end);
899 beg = Min(a: beg, b: annotations_end);
900 end = Min(a: end, b: annotations_end);
901
902 if (auto *bad = FindBadAddress(begin: storage_beg, end: beg, poisoned: true))
903 return bad;
904 if (auto *bad = FindBadAddress(begin: beg, end, poisoned: false))
905 return bad;
906 if (auto *bad = FindBadAddress(begin: end, end: annotations_end, poisoned: true))
907 return bad;
908 return FindBadAddress(begin: annotations_end, end: storage_end, poisoned: false);
909}
910
911int __sanitizer_verify_double_ended_contiguous_container(
912 const void *storage_beg_p, const void *container_beg_p,
913 const void *container_end_p, const void *storage_end_p) {
914 return __sanitizer_double_ended_contiguous_container_find_bad_address(
915 storage_beg_p, container_beg_p, container_end_p, storage_end_p) ==
916 nullptr;
917}
918
919extern "C" SANITIZER_INTERFACE_ATTRIBUTE
920void __asan_poison_intra_object_redzone(uptr ptr, uptr size) {
921 AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, poison: true);
922}
923
924extern "C" SANITIZER_INTERFACE_ATTRIBUTE
925void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) {
926 AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, poison: false);
927}
928
929// --- Implementation of LSan-specific functions --- {{{1
930namespace __lsan {
931bool WordIsPoisoned(uptr addr) {
932 return (__asan_region_is_poisoned(beg: addr, size: sizeof(uptr)) != 0);
933}
934}
935