1//===-- asan_poisoning.cpp ------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address sanity checker.
10//
11// Shadow memory poisoning by ASan RTL and by user application.
12//===----------------------------------------------------------------------===//
13
14#include "asan_poisoning.h"
15
16#include "asan_report.h"
17#include "asan_stack.h"
18#include "sanitizer_common/sanitizer_atomic.h"
19#include "sanitizer_common/sanitizer_common.h"
20#include "sanitizer_common/sanitizer_flags.h"
21#include "sanitizer_common/sanitizer_interface_internal.h"
22#include "sanitizer_common/sanitizer_libc.h"
23#include "sanitizer_common/sanitizer_ring_buffer.h"
24#include "sanitizer_common/sanitizer_stackdepot.h"
25
26namespace __asan {
27
28using PoisonRecordRingBuffer = RingBuffer<PoisonRecord>;
29
30static atomic_uint8_t can_poison_memory;
31
32static Mutex poison_records_mutex;
33static PoisonRecordRingBuffer *poison_records
34 SANITIZER_GUARDED_BY(poison_records_mutex) = nullptr;
35
36void AddPoisonRecord(const PoisonRecord &new_record) {
37 if (flags()->poison_history_size <= 0)
38 return;
39
40 GenericScopedLock<Mutex> l(&poison_records_mutex);
41
42 if (poison_records == nullptr)
43 poison_records = PoisonRecordRingBuffer::New(Size: flags()->poison_history_size);
44
45 poison_records->push(t: new_record);
46}
47
48bool FindPoisonRecord(uptr addr, PoisonRecord &match) {
49 if (flags()->poison_history_size <= 0)
50 return false;
51
52 GenericScopedLock<Mutex> l(&poison_records_mutex);
53
54 if (poison_records) {
55 for (unsigned int i = 0; i < poison_records->size(); i++) {
56 PoisonRecord record = (*poison_records)[i];
57 if (record.begin <= addr && addr < record.end) {
58 internal_memcpy(dest: &match, src: &record, n: sizeof(record));
59 return true;
60 }
61 }
62 }
63
64 return false;
65}
66
67void SANITIZER_ACQUIRE(poison_records_mutex) AcquirePoisonRecords() {
68 poison_records_mutex.Lock();
69}
70
71void SANITIZER_RELEASE(poison_records_mutex) ReleasePoisonRecords() {
72 poison_records_mutex.Unlock();
73}
74
75void SetCanPoisonMemory(bool value) {
76 atomic_store(a: &can_poison_memory, v: value, mo: memory_order_release);
77}
78
79bool CanPoisonMemory() {
80 return atomic_load(a: &can_poison_memory, mo: memory_order_acquire);
81}
82
83void PoisonShadow(uptr addr, uptr size, u8 value) {
84 if (value && !CanPoisonMemory()) return;
85 CHECK(AddrIsAlignedByGranularity(addr));
86 CHECK(AddrIsInMem(addr));
87 CHECK(AddrIsAlignedByGranularity(addr + size));
88 CHECK(AddrIsInMem(addr + size - ASAN_SHADOW_GRANULARITY));
89 CHECK(REAL(memset));
90 FastPoisonShadow(aligned_beg: addr, aligned_size: size, value);
91}
92
93void PoisonShadowPartialRightRedzone(uptr addr,
94 uptr size,
95 uptr redzone_size,
96 u8 value) {
97 if (!CanPoisonMemory()) return;
98 CHECK(AddrIsAlignedByGranularity(addr));
99 CHECK(AddrIsInMem(addr));
100 FastPoisonShadowPartialRightRedzone(aligned_addr: addr, size, redzone_size, value);
101}
102
103struct ShadowSegmentEndpoint {
104 u8 *chunk;
105 s8 offset; // in [0, ASAN_SHADOW_GRANULARITY)
106 s8 value; // = *chunk;
107
108 explicit ShadowSegmentEndpoint(uptr address) {
109 chunk = (u8*)MemToShadow(p: address);
110 offset = address & (ASAN_SHADOW_GRANULARITY - 1);
111 value = *chunk;
112 }
113};
114
115void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
116 uptr end = ptr + size;
117 if (Verbosity()) {
118 Printf(format: "__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
119 poison ? "" : "un", (void *)ptr, (void *)end, size);
120 if (Verbosity() >= 2)
121 PRINT_CURRENT_STACK();
122 }
123 CHECK(size);
124 CHECK_LE(size, 4096);
125 CHECK(IsAligned(end, ASAN_SHADOW_GRANULARITY));
126 if (!IsAligned(a: ptr, ASAN_SHADOW_GRANULARITY)) {
127 *(u8 *)MemToShadow(p: ptr) =
128 poison ? static_cast<u8>(ptr % ASAN_SHADOW_GRANULARITY) : 0;
129 ptr |= ASAN_SHADOW_GRANULARITY - 1;
130 ptr++;
131 }
132 for (; ptr < end; ptr += ASAN_SHADOW_GRANULARITY)
133 *(u8*)MemToShadow(p: ptr) = poison ? kAsanIntraObjectRedzone : 0;
134}
135
136} // namespace __asan
137
138// ---------------------- Interface ---------------- {{{1
139using namespace __asan;
140
141// Current implementation of __asan_(un)poison_memory_region doesn't check
142// that user program (un)poisons the memory it owns. It poisons memory
143// conservatively, and unpoisons progressively to make sure asan shadow
144// mapping invariant is preserved (see detailed mapping description here:
145// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm).
146//
147// * if user asks to poison region [left, right), the program poisons
148// at least [left, AlignDown(right)).
149// * if user asks to unpoison region [left, right), the program unpoisons
150// at most [AlignDown(left), right).
151void __asan_poison_memory_region(void const volatile *addr, uptr size) {
152 if (!flags()->allow_user_poisoning || size == 0) return;
153 uptr beg_addr = (uptr)addr;
154 uptr end_addr = beg_addr + size;
155 VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,
156 (void *)end_addr);
157
158 if (flags()->poison_history_size > 0) {
159 GET_STACK_TRACE(/*max_size=*/16, /*fast=*/false);
160 u32 current_tid = GetCurrentTidOrInvalid();
161
162 u32 stack_id = StackDepotPut(stack);
163
164 PoisonRecord record;
165 record.stack_id = stack_id;
166 record.thread_id = current_tid;
167 record.begin = beg_addr;
168 record.end = end_addr;
169 AddPoisonRecord(new_record: record);
170 }
171
172 ShadowSegmentEndpoint beg(beg_addr);
173 ShadowSegmentEndpoint end(end_addr);
174 if (beg.chunk == end.chunk) {
175 CHECK_LT(beg.offset, end.offset);
176 s8 value = beg.value;
177 CHECK_EQ(value, end.value);
178 // We can only poison memory if the byte in end.offset is unaddressable.
179 // No need to re-poison memory if it is poisoned already.
180 if (value > 0 && value <= end.offset) {
181 if (beg.offset > 0) {
182 *beg.chunk = Min(a: value, b: beg.offset);
183 } else {
184 *beg.chunk = kAsanUserPoisonedMemoryMagic;
185 }
186 }
187 return;
188 }
189 CHECK_LT(beg.chunk, end.chunk);
190 if (beg.offset > 0) {
191 // Mark bytes from beg.offset as unaddressable.
192 if (beg.value == 0) {
193 *beg.chunk = beg.offset;
194 } else {
195 *beg.chunk = Min(a: beg.value, b: beg.offset);
196 }
197 beg.chunk++;
198 }
199 REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk);
200 // Poison if byte in end.offset is unaddressable.
201 if (end.value > 0 && end.value <= end.offset) {
202 *end.chunk = kAsanUserPoisonedMemoryMagic;
203 }
204}
205
206void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
207 if (!flags()->allow_user_poisoning || size == 0) return;
208 uptr beg_addr = (uptr)addr;
209 uptr end_addr = beg_addr + size;
210 VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,
211 (void *)end_addr);
212
213 // Note: we don't need to update the poison tracking here. Since the shadow
214 // memory will be unpoisoned, the poison tracking ring buffer entries will be
215 // ignored.
216
217 ShadowSegmentEndpoint beg(beg_addr);
218 ShadowSegmentEndpoint end(end_addr);
219 if (beg.chunk == end.chunk) {
220 CHECK_LT(beg.offset, end.offset);
221 s8 value = beg.value;
222 CHECK_EQ(value, end.value);
223 // We unpoison memory bytes up to enbytes up to end.offset if it is not
224 // unpoisoned already.
225 if (value != 0) {
226 *beg.chunk = Max(a: value, b: end.offset);
227 }
228 return;
229 }
230 CHECK_LT(beg.chunk, end.chunk);
231 REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk);
232 if (end.offset > 0 && end.value != 0) {
233 *end.chunk = Max(a: end.value, b: end.offset);
234 }
235}
236
237int __asan_address_is_poisoned(void const volatile *addr) {
238 return __asan::AddressIsPoisoned(a: (uptr)addr);
239}
240
241uptr __asan_region_is_poisoned(uptr beg, uptr size) {
242 if (!size)
243 return 0;
244 uptr last = beg + size - 1;
245 if (!AddrIsInMem(a: beg))
246 return beg;
247 if (!AddrIsInMem(a: last))
248 return last;
249 CHECK_LE(beg, last);
250 // First check the last application byte, i.e. last granule, then check
251 // the ASAN_SHADOW_GRANULARITY-aligned region by calling mem_is_zero
252 // on the corresponding shadow (first granule is fully checked).
253 if (!__asan::AddressIsPoisoned(a: last)) {
254 uptr aligned_b = RoundDownTo(x: beg, ASAN_SHADOW_GRANULARITY);
255 uptr aligned_e = RoundDownTo(x: last, ASAN_SHADOW_GRANULARITY);
256 if (aligned_b == aligned_e) // one granule case => last check is enough.
257 return 0;
258 CHECK_LT(aligned_b, aligned_e);
259 uptr shadow_beg = MemToShadow(p: aligned_b);
260 uptr shadow_end = MemToShadow(p: aligned_e);
261 CHECK_LT(shadow_beg, shadow_end);
262 if (__sanitizer::mem_is_zero(mem: (const char*)shadow_beg,
263 size: shadow_end - shadow_beg))
264 return 0;
265 }
266 // The fast check failed, so we have a poisoned byte somewhere.
267 // Find it slowly.
268 for (; beg <= last; beg++)
269 if (__asan::AddressIsPoisoned(a: beg))
270 return beg;
271 UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found");
272 return 0;
273}
274
275#define CHECK_SMALL_REGION(p, size, isWrite) \
276 do { \
277 uptr __p = reinterpret_cast<uptr>(p); \
278 uptr __size = size; \
279 if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \
280 __asan::AddressIsPoisoned(__p + __size - 1))) { \
281 GET_CURRENT_PC_BP_SP; \
282 uptr __bad = __asan_region_is_poisoned(__p, __size); \
283 __asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\
284 } \
285 } while (false)
286
287
288extern "C" SANITIZER_INTERFACE_ATTRIBUTE
289u16 __sanitizer_unaligned_load16(const uu16 *p) {
290 CHECK_SMALL_REGION(p, sizeof(*p), false);
291 return *p;
292}
293
294extern "C" SANITIZER_INTERFACE_ATTRIBUTE
295u32 __sanitizer_unaligned_load32(const uu32 *p) {
296 CHECK_SMALL_REGION(p, sizeof(*p), false);
297 return *p;
298}
299
300extern "C" SANITIZER_INTERFACE_ATTRIBUTE
301u64 __sanitizer_unaligned_load64(const uu64 *p) {
302 CHECK_SMALL_REGION(p, sizeof(*p), false);
303 return *p;
304}
305
306extern "C" SANITIZER_INTERFACE_ATTRIBUTE
307void __sanitizer_unaligned_store16(uu16 *p, u16 x) {
308 CHECK_SMALL_REGION(p, sizeof(*p), true);
309 *p = x;
310}
311
312extern "C" SANITIZER_INTERFACE_ATTRIBUTE
313void __sanitizer_unaligned_store32(uu32 *p, u32 x) {
314 CHECK_SMALL_REGION(p, sizeof(*p), true);
315 *p = x;
316}
317
318extern "C" SANITIZER_INTERFACE_ATTRIBUTE
319void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
320 CHECK_SMALL_REGION(p, sizeof(*p), true);
321 *p = x;
322}
323
324extern "C" SANITIZER_INTERFACE_ATTRIBUTE
325void __asan_poison_cxx_array_cookie(uptr p) {
326 if (SANITIZER_WORDSIZE != 64) return;
327 if (!flags()->poison_array_cookie) return;
328 uptr s = MEM_TO_SHADOW(p);
329 *reinterpret_cast<u8*>(s) = kAsanArrayCookieMagic;
330}
331
332extern "C" SANITIZER_INTERFACE_ATTRIBUTE
333uptr __asan_load_cxx_array_cookie(uptr *p) {
334 if (SANITIZER_WORDSIZE != 64) return *p;
335 if (!flags()->poison_array_cookie) return *p;
336 uptr s = MEM_TO_SHADOW(reinterpret_cast<uptr>(p));
337 u8 sval = *reinterpret_cast<u8*>(s);
338 if (sval == kAsanArrayCookieMagic) return *p;
339 // If sval is not kAsanArrayCookieMagic it can only be freed memory,
340 // which means that we are going to get double-free. So, return 0 to avoid
341 // infinite loop of destructors. We don't want to report a double-free here
342 // though, so print a warning just in case.
343 // CHECK_EQ(sval, kAsanHeapFreeMagic);
344 if (sval == kAsanHeapFreeMagic) {
345 Report(format: "AddressSanitizer: loaded array cookie from free-d memory; "
346 "expect a double-free report\n");
347 return 0;
348 }
349 // The cookie may remain unpoisoned if e.g. it comes from a custom
350 // operator new defined inside a class.
351 return *p;
352}
353
354// This is a simplified version of __asan_(un)poison_memory_region, which
355// assumes that left border of region to be poisoned is properly aligned.
356static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
357 if (size == 0) return;
358 uptr aligned_size = size & ~(ASAN_SHADOW_GRANULARITY - 1);
359 PoisonShadow(addr, size: aligned_size,
360 value: do_poison ? kAsanStackUseAfterScopeMagic : 0);
361 if (size == aligned_size)
362 return;
363 s8 end_offset = (s8)(size - aligned_size);
364 s8* shadow_end = (s8*)MemToShadow(p: addr + aligned_size);
365 s8 end_value = *shadow_end;
366 if (do_poison) {
367 // If possible, mark all the bytes mapping to last shadow byte as
368 // unaddressable.
369 if (end_value > 0 && end_value <= end_offset)
370 *shadow_end = (s8)kAsanStackUseAfterScopeMagic;
371 } else {
372 // If necessary, mark few first bytes mapping to last shadow byte
373 // as addressable
374 if (end_value != 0)
375 *shadow_end = Max(a: end_value, b: end_offset);
376 }
377}
378
379void __asan_set_shadow_00(uptr addr, uptr size) {
380 REAL(memset)((void *)addr, 0, size);
381}
382
383void __asan_set_shadow_01(uptr addr, uptr size) {
384 REAL(memset)((void *)addr, 0x01, size);
385}
386
387void __asan_set_shadow_02(uptr addr, uptr size) {
388 REAL(memset)((void *)addr, 0x02, size);
389}
390
391void __asan_set_shadow_03(uptr addr, uptr size) {
392 REAL(memset)((void *)addr, 0x03, size);
393}
394
395void __asan_set_shadow_04(uptr addr, uptr size) {
396 REAL(memset)((void *)addr, 0x04, size);
397}
398
399void __asan_set_shadow_05(uptr addr, uptr size) {
400 REAL(memset)((void *)addr, 0x05, size);
401}
402
403void __asan_set_shadow_06(uptr addr, uptr size) {
404 REAL(memset)((void *)addr, 0x06, size);
405}
406
407void __asan_set_shadow_07(uptr addr, uptr size) {
408 REAL(memset)((void *)addr, 0x07, size);
409}
410
411void __asan_set_shadow_f1(uptr addr, uptr size) {
412 REAL(memset)((void *)addr, 0xf1, size);
413}
414
415void __asan_set_shadow_f2(uptr addr, uptr size) {
416 REAL(memset)((void *)addr, 0xf2, size);
417}
418
419void __asan_set_shadow_f3(uptr addr, uptr size) {
420 REAL(memset)((void *)addr, 0xf3, size);
421}
422
423void __asan_set_shadow_f5(uptr addr, uptr size) {
424 REAL(memset)((void *)addr, 0xf5, size);
425}
426
427void __asan_set_shadow_f8(uptr addr, uptr size) {
428 REAL(memset)((void *)addr, 0xf8, size);
429}
430
431void __asan_poison_stack_memory(uptr addr, uptr size) {
432 VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
433 PoisonAlignedStackMemory(addr, size, do_poison: true);
434}
435
436void __asan_unpoison_stack_memory(uptr addr, uptr size) {
437 VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size);
438 PoisonAlignedStackMemory(addr, size, do_poison: false);
439}
440
441static void FixUnalignedStorage(uptr storage_beg, uptr storage_end,
442 uptr &old_beg, uptr &old_end, uptr &new_beg,
443 uptr &new_end) {
444 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
445 if (UNLIKELY(!AddrIsAlignedByGranularity(storage_end))) {
446 uptr end_down = RoundDownTo(x: storage_end, boundary: granularity);
447 // Ignore the last unaligned granule if the storage is followed by
448 // unpoisoned byte, because we can't poison the prefix anyway. Don't call
449 // AddressIsPoisoned at all if container changes does not affect the last
450 // granule at all.
451 if ((((old_end != new_end) && Max(a: old_end, b: new_end) > end_down) ||
452 ((old_beg != new_beg) && Max(a: old_beg, b: new_beg) > end_down)) &&
453 !AddressIsPoisoned(a: storage_end)) {
454 old_beg = Min(a: end_down, b: old_beg);
455 old_end = Min(a: end_down, b: old_end);
456 new_beg = Min(a: end_down, b: new_beg);
457 new_end = Min(a: end_down, b: new_end);
458 }
459 }
460
461 // Handle misaligned begin and cut it off.
462 if (UNLIKELY(!AddrIsAlignedByGranularity(storage_beg))) {
463 uptr beg_up = RoundUpTo(size: storage_beg, boundary: granularity);
464 // The first unaligned granule needs special handling only if we had bytes
465 // there before and will have none after.
466 if ((new_beg == new_end || new_beg >= beg_up) && old_beg != old_end &&
467 old_beg < beg_up) {
468 // Keep granule prefix outside of the storage unpoisoned.
469 uptr beg_down = RoundDownTo(x: storage_beg, boundary: granularity);
470 *(u8 *)MemToShadow(p: beg_down) = storage_beg - beg_down;
471 old_beg = Max(a: beg_up, b: old_beg);
472 old_end = Max(a: beg_up, b: old_end);
473 new_beg = Max(a: beg_up, b: new_beg);
474 new_end = Max(a: beg_up, b: new_end);
475 }
476 }
477}
478
479void __sanitizer_annotate_contiguous_container(const void *beg_p,
480 const void *end_p,
481 const void *old_mid_p,
482 const void *new_mid_p) {
483 if (!flags()->detect_container_overflow)
484 return;
485 VPrintf(3, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
486 new_mid_p);
487 uptr storage_beg = reinterpret_cast<uptr>(beg_p);
488 uptr storage_end = reinterpret_cast<uptr>(end_p);
489 uptr old_end = reinterpret_cast<uptr>(old_mid_p);
490 uptr new_end = reinterpret_cast<uptr>(new_mid_p);
491 uptr old_beg = storage_beg;
492 uptr new_beg = storage_beg;
493 uptr granularity = ASAN_SHADOW_GRANULARITY;
494 if (!(storage_beg <= old_end && storage_beg <= new_end &&
495 old_end <= storage_end && new_end <= storage_end)) {
496 GET_STACK_TRACE_FATAL_HERE;
497 ReportBadParamsToAnnotateContiguousContainer(beg: storage_beg, end: storage_end,
498 old_mid: old_end, new_mid: new_end, stack: &stack);
499 }
500 CHECK_LE(storage_end - storage_beg,
501 FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
502
503 if (old_end == new_end)
504 return; // Nothing to do here.
505
506 FixUnalignedStorage(storage_beg, storage_end, old_beg, old_end, new_beg,
507 new_end);
508
509 uptr a = RoundDownTo(x: Min(a: old_end, b: new_end), boundary: granularity);
510 uptr c = RoundUpTo(size: Max(a: old_end, b: new_end), boundary: granularity);
511 uptr d1 = RoundDownTo(x: old_end, boundary: granularity);
512 // uptr d2 = RoundUpTo(old_mid, granularity);
513 // Currently we should be in this state:
514 // [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
515 // Make a quick sanity check that we are indeed in this state.
516 //
517 // FIXME: Two of these three checks are disabled until we fix
518 // https://github.com/google/sanitizers/issues/258.
519 // if (d1 != d2)
520 // DCHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
521 //
522 // NOTE: curly brackets for the "if" below to silence a MSVC warning.
523 if (a + granularity <= d1) {
524 DCHECK_EQ(*(u8 *)MemToShadow(a), 0);
525 }
526 // if (d2 + granularity <= c && c <= end)
527 // DCHECK_EQ(*(u8 *)MemToShadow(c - granularity),
528 // kAsanContiguousContainerOOBMagic);
529
530 uptr b1 = RoundDownTo(x: new_end, boundary: granularity);
531 uptr b2 = RoundUpTo(size: new_end, boundary: granularity);
532 // New state:
533 // [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good.
534 if (b1 > a)
535 PoisonShadow(addr: a, size: b1 - a, value: 0);
536 else if (c > b2)
537 PoisonShadow(addr: b2, size: c - b2, value: kAsanContiguousContainerOOBMagic);
538 if (b1 != b2) {
539 CHECK_EQ(b2 - b1, granularity);
540 *(u8 *)MemToShadow(p: b1) = static_cast<u8>(new_end - b1);
541 }
542}
543
544// Annotates a double ended contiguous memory area like std::deque's chunk.
545// It allows detecting buggy accesses to allocated but not used begining
546// or end items of such a container.
547void __sanitizer_annotate_double_ended_contiguous_container(
548 const void *storage_beg_p, const void *storage_end_p,
549 const void *old_container_beg_p, const void *old_container_end_p,
550 const void *new_container_beg_p, const void *new_container_end_p) {
551 if (!flags()->detect_container_overflow)
552 return;
553
554 VPrintf(3, "contiguous_container: %p %p %p %p %p %p\n", storage_beg_p,
555 storage_end_p, old_container_beg_p, old_container_end_p,
556 new_container_beg_p, new_container_end_p);
557
558 uptr storage_beg = reinterpret_cast<uptr>(storage_beg_p);
559 uptr storage_end = reinterpret_cast<uptr>(storage_end_p);
560 uptr old_beg = reinterpret_cast<uptr>(old_container_beg_p);
561 uptr old_end = reinterpret_cast<uptr>(old_container_end_p);
562 uptr new_beg = reinterpret_cast<uptr>(new_container_beg_p);
563 uptr new_end = reinterpret_cast<uptr>(new_container_end_p);
564
565 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
566
567 if (!(old_beg <= old_end && new_beg <= new_end) ||
568 !(storage_beg <= new_beg && new_end <= storage_end) ||
569 !(storage_beg <= old_beg && old_end <= storage_end)) {
570 GET_STACK_TRACE_FATAL_HERE;
571 ReportBadParamsToAnnotateDoubleEndedContiguousContainer(
572 storage_beg, storage_end, old_container_beg: old_beg, old_container_end: old_end, new_container_beg: new_beg, new_container_end: new_end, stack: &stack);
573 }
574 CHECK_LE(storage_end - storage_beg,
575 FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
576
577 if ((old_beg == old_end && new_beg == new_end) ||
578 (old_beg == new_beg && old_end == new_end))
579 return; // Nothing to do here.
580
581 FixUnalignedStorage(storage_beg, storage_end, old_beg, old_end, new_beg,
582 new_end);
583
584 // Handle non-intersecting new/old containers separately have simpler
585 // intersecting case.
586 if (old_beg == old_end || new_beg == new_end || new_end <= old_beg ||
587 old_end <= new_beg) {
588 if (old_beg != old_end) {
589 // Poisoning the old container.
590 uptr a = RoundDownTo(x: old_beg, boundary: granularity);
591 uptr b = RoundUpTo(size: old_end, boundary: granularity);
592 PoisonShadow(addr: a, size: b - a, value: kAsanContiguousContainerOOBMagic);
593 }
594
595 if (new_beg != new_end) {
596 // Unpoisoning the new container.
597 uptr a = RoundDownTo(x: new_beg, boundary: granularity);
598 uptr b = RoundDownTo(x: new_end, boundary: granularity);
599 PoisonShadow(addr: a, size: b - a, value: 0);
600 if (!AddrIsAlignedByGranularity(a: new_end))
601 *(u8 *)MemToShadow(p: b) = static_cast<u8>(new_end - b);
602 }
603
604 return;
605 }
606
607 // Intersection of old and new containers is not empty.
608 CHECK_LT(new_beg, old_end);
609 CHECK_GT(new_end, old_beg);
610
611 if (new_beg < old_beg) {
612 // Round down because we can't poison prefixes.
613 uptr a = RoundDownTo(x: new_beg, boundary: granularity);
614 // Round down and ignore the [c, old_beg) as its state defined by unchanged
615 // [old_beg, old_end).
616 uptr c = RoundDownTo(x: old_beg, boundary: granularity);
617 PoisonShadow(addr: a, size: c - a, value: 0);
618 } else if (new_beg > old_beg) {
619 // Round down and poison [a, old_beg) because it was unpoisoned only as a
620 // prefix.
621 uptr a = RoundDownTo(x: old_beg, boundary: granularity);
622 // Round down and ignore the [c, new_beg) as its state defined by unchanged
623 // [new_beg, old_end).
624 uptr c = RoundDownTo(x: new_beg, boundary: granularity);
625
626 PoisonShadow(addr: a, size: c - a, value: kAsanContiguousContainerOOBMagic);
627 }
628
629 if (new_end > old_end) {
630 // Round down to poison the prefix.
631 uptr a = RoundDownTo(x: old_end, boundary: granularity);
632 // Round down and handle remainder below.
633 uptr c = RoundDownTo(x: new_end, boundary: granularity);
634 PoisonShadow(addr: a, size: c - a, value: 0);
635 if (!AddrIsAlignedByGranularity(a: new_end))
636 *(u8 *)MemToShadow(p: c) = static_cast<u8>(new_end - c);
637 } else if (new_end < old_end) {
638 // Round up and handle remained below.
639 uptr a2 = RoundUpTo(size: new_end, boundary: granularity);
640 // Round up to poison entire granule as we had nothing in [old_end, c2).
641 uptr c2 = RoundUpTo(size: old_end, boundary: granularity);
642 PoisonShadow(addr: a2, size: c2 - a2, value: kAsanContiguousContainerOOBMagic);
643
644 if (!AddrIsAlignedByGranularity(a: new_end)) {
645 uptr a = RoundDownTo(x: new_end, boundary: granularity);
646 *(u8 *)MemToShadow(p: a) = static_cast<u8>(new_end - a);
647 }
648 }
649}
650
651// Marks the specified number of bytes in a granule as accessible or
652// poisones the whole granule with kAsanContiguousContainerOOBMagic value.
653static void SetContainerGranule(uptr ptr, u8 n) {
654 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
655 u8 s = (n == granularity) ? 0 : (n ? n : kAsanContiguousContainerOOBMagic);
656 *(u8 *)MemToShadow(p: ptr) = s;
657}
658
659// Performs a byte-by-byte copy of ASan annotations (shadow memory values).
660// Result may be different due to ASan limitations, but result cannot lead
661// to false positives (more memory than requested may get unpoisoned).
662static void SlowCopyContainerAnnotations(uptr src_beg, uptr src_end,
663 uptr dst_beg, uptr dst_end) {
664 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
665 uptr dst_end_down = RoundDownTo(x: dst_end, boundary: granularity);
666 uptr src_ptr = src_beg;
667 uptr dst_ptr = dst_beg;
668
669 while (dst_ptr < dst_end) {
670 uptr granule_beg = RoundDownTo(x: dst_ptr, boundary: granularity);
671 uptr granule_end = granule_beg + granularity;
672 uptr unpoisoned_bytes = 0;
673
674 uptr end = Min(a: granule_end, b: dst_end);
675 for (; dst_ptr != end; ++dst_ptr, ++src_ptr)
676 if (!AddressIsPoisoned(a: src_ptr))
677 unpoisoned_bytes = dst_ptr - granule_beg + 1;
678
679 if (dst_ptr == dst_end && dst_end != dst_end_down &&
680 !AddressIsPoisoned(a: dst_end))
681 continue;
682
683 if (unpoisoned_bytes != 0 || granule_beg >= dst_beg)
684 SetContainerGranule(ptr: granule_beg, n: unpoisoned_bytes);
685 else if (!AddressIsPoisoned(a: dst_beg))
686 SetContainerGranule(ptr: granule_beg, n: dst_beg - granule_beg);
687 }
688}
689
690// Performs a byte-by-byte copy of ASan annotations (shadow memory values),
691// going through bytes in reversed order, but not reversing annotations.
692// Result may be different due to ASan limitations, but result cannot lead
693// to false positives (more memory than requested may get unpoisoned).
694static void SlowReversedCopyContainerAnnotations(uptr src_beg, uptr src_end,
695 uptr dst_beg, uptr dst_end) {
696 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
697 uptr dst_end_down = RoundDownTo(x: dst_end, boundary: granularity);
698 uptr src_ptr = src_end;
699 uptr dst_ptr = dst_end;
700
701 while (dst_ptr > dst_beg) {
702 uptr granule_beg = RoundDownTo(x: dst_ptr - 1, boundary: granularity);
703 uptr unpoisoned_bytes = 0;
704
705 uptr end = Max(a: granule_beg, b: dst_beg);
706 for (; dst_ptr != end; --dst_ptr, --src_ptr)
707 if (unpoisoned_bytes == 0 && !AddressIsPoisoned(a: src_ptr - 1))
708 unpoisoned_bytes = dst_ptr - granule_beg;
709
710 if (dst_ptr >= dst_end_down && !AddressIsPoisoned(a: dst_end))
711 continue;
712
713 if (granule_beg == dst_ptr || unpoisoned_bytes != 0)
714 SetContainerGranule(ptr: granule_beg, n: unpoisoned_bytes);
715 else if (!AddressIsPoisoned(a: dst_beg))
716 SetContainerGranule(ptr: granule_beg, n: dst_beg - granule_beg);
717 }
718}
719
720// A helper function for __sanitizer_copy_contiguous_container_annotations,
721// has assumption about begin and end of the container.
722// Should not be used stand alone.
723static void CopyContainerFirstGranuleAnnotation(uptr src_beg, uptr dst_beg) {
724 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
725 // First granule
726 uptr src_beg_down = RoundDownTo(x: src_beg, boundary: granularity);
727 uptr dst_beg_down = RoundDownTo(x: dst_beg, boundary: granularity);
728 if (dst_beg_down == dst_beg)
729 return;
730 if (!AddressIsPoisoned(a: src_beg))
731 *(u8 *)MemToShadow(p: dst_beg_down) = *(u8 *)MemToShadow(p: src_beg_down);
732 else if (!AddressIsPoisoned(a: dst_beg))
733 SetContainerGranule(ptr: dst_beg_down, n: dst_beg - dst_beg_down);
734}
735
736// A helper function for __sanitizer_copy_contiguous_container_annotations,
737// has assumption about begin and end of the container.
738// Should not be used stand alone.
739static void CopyContainerLastGranuleAnnotation(uptr src_end, uptr dst_end) {
740 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
741 // Last granule
742 uptr src_end_down = RoundDownTo(x: src_end, boundary: granularity);
743 uptr dst_end_down = RoundDownTo(x: dst_end, boundary: granularity);
744 if (dst_end_down == dst_end || !AddressIsPoisoned(a: dst_end))
745 return;
746 if (AddressIsPoisoned(a: src_end))
747 *(u8 *)MemToShadow(p: dst_end_down) = *(u8 *)MemToShadow(p: src_end_down);
748 else
749 SetContainerGranule(ptr: dst_end_down, n: src_end - src_end_down);
750}
751
752// This function copies ASan memory annotations (poisoned/unpoisoned states)
753// from one buffer to another.
754// It's main purpose is to help with relocating trivially relocatable objects,
755// which memory may be poisoned, without calling copy constructor.
756// However, it does not move memory content itself, only annotations.
757// If the buffers aren't aligned (the distance between buffers isn't
758// granule-aligned)
759// // src_beg % granularity != dst_beg % granularity
760// the function handles this by going byte by byte, slowing down performance.
761// The old buffer annotations are not removed. If necessary,
762// user can unpoison old buffer with __asan_unpoison_memory_region.
763void __sanitizer_copy_contiguous_container_annotations(const void *src_beg_p,
764 const void *src_end_p,
765 const void *dst_beg_p,
766 const void *dst_end_p) {
767 if (!flags()->detect_container_overflow)
768 return;
769
770 VPrintf(3, "contiguous_container_src: %p %p\n", src_beg_p, src_end_p);
771 VPrintf(3, "contiguous_container_dst: %p %p\n", dst_beg_p, dst_end_p);
772
773 uptr src_beg = reinterpret_cast<uptr>(src_beg_p);
774 uptr src_end = reinterpret_cast<uptr>(src_end_p);
775 uptr dst_beg = reinterpret_cast<uptr>(dst_beg_p);
776 uptr dst_end = reinterpret_cast<uptr>(dst_end_p);
777
778 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
779
780 if (src_beg > src_end || (dst_end - dst_beg) != (src_end - src_beg)) {
781 GET_STACK_TRACE_FATAL_HERE;
782 ReportBadParamsToCopyContiguousContainerAnnotations(
783 old_storage_beg: src_beg, old_storage_end: src_end, new_storage_beg: dst_beg, new_storage_end: dst_end, stack: &stack);
784 }
785
786 if (src_beg == src_end || src_beg == dst_beg)
787 return;
788 // Due to support for overlapping buffers, we may have to copy elements
789 // in reversed order, when destination buffer starts in the middle of
790 // the source buffer (or shares first granule with it).
791 //
792 // When buffers are not granule-aligned (or distance between them,
793 // to be specific), annotatios have to be copied byte by byte.
794 //
795 // The only remaining edge cases involve edge granules,
796 // when the container starts or ends within a granule.
797 uptr src_beg_up = RoundUpTo(size: src_beg, boundary: granularity);
798 uptr src_end_up = RoundUpTo(size: src_end, boundary: granularity);
799 bool copy_in_reversed_order = src_beg < dst_beg && dst_beg <= src_end_up;
800 if (src_beg % granularity != dst_beg % granularity ||
801 RoundDownTo(x: dst_end - 1, boundary: granularity) <= dst_beg) {
802 if (copy_in_reversed_order)
803 SlowReversedCopyContainerAnnotations(src_beg, src_end, dst_beg, dst_end);
804 else
805 SlowCopyContainerAnnotations(src_beg, src_end, dst_beg, dst_end);
806 return;
807 }
808
809 // As buffers are granule-aligned, we can just copy annotations of granules
810 // from the middle.
811 uptr dst_beg_up = RoundUpTo(size: dst_beg, boundary: granularity);
812 uptr dst_end_down = RoundDownTo(x: dst_end, boundary: granularity);
813 if (copy_in_reversed_order)
814 CopyContainerLastGranuleAnnotation(src_end, dst_end);
815 else
816 CopyContainerFirstGranuleAnnotation(src_beg, dst_beg);
817
818 if (dst_beg_up < dst_end_down) {
819 internal_memmove(dest: (u8 *)MemToShadow(p: dst_beg_up),
820 src: (u8 *)MemToShadow(p: src_beg_up),
821 n: (dst_end_down - dst_beg_up) / granularity);
822 }
823
824 if (copy_in_reversed_order)
825 CopyContainerFirstGranuleAnnotation(src_beg, dst_beg);
826 else
827 CopyContainerLastGranuleAnnotation(src_end, dst_end);
828}
829
830static const void *FindBadAddress(uptr begin, uptr end, bool poisoned) {
831 CHECK_LE(begin, end);
832 constexpr uptr kMaxRangeToCheck = 32;
833 if (end - begin > kMaxRangeToCheck * 2) {
834 if (auto *bad = FindBadAddress(begin, end: begin + kMaxRangeToCheck, poisoned))
835 return bad;
836 if (auto *bad = FindBadAddress(begin: end - kMaxRangeToCheck, end, poisoned))
837 return bad;
838 }
839
840 for (uptr i = begin; i < end; ++i)
841 if (AddressIsPoisoned(a: i) != poisoned)
842 return reinterpret_cast<const void *>(i);
843 return nullptr;
844}
845
846const void *__sanitizer_contiguous_container_find_bad_address(
847 const void *beg_p, const void *mid_p, const void *end_p) {
848 if (!flags()->detect_container_overflow)
849 return nullptr;
850 uptr granularity = ASAN_SHADOW_GRANULARITY;
851 uptr beg = reinterpret_cast<uptr>(beg_p);
852 uptr end = reinterpret_cast<uptr>(end_p);
853 uptr mid = reinterpret_cast<uptr>(mid_p);
854 CHECK_LE(beg, mid);
855 CHECK_LE(mid, end);
856 // If the byte after the storage is unpoisoned, everything in the granule
857 // before must stay unpoisoned.
858 uptr annotations_end =
859 (!AddrIsAlignedByGranularity(a: end) && !AddressIsPoisoned(a: end))
860 ? RoundDownTo(x: end, boundary: granularity)
861 : end;
862 beg = Min(a: beg, b: annotations_end);
863 mid = Min(a: mid, b: annotations_end);
864 if (auto *bad = FindBadAddress(begin: beg, end: mid, poisoned: false))
865 return bad;
866 if (auto *bad = FindBadAddress(begin: mid, end: annotations_end, poisoned: true))
867 return bad;
868 return FindBadAddress(begin: annotations_end, end, poisoned: false);
869}
870
871int __sanitizer_verify_contiguous_container(const void *beg_p,
872 const void *mid_p,
873 const void *end_p) {
874 return __sanitizer_contiguous_container_find_bad_address(beg_p, mid_p,
875 end_p) == nullptr;
876}
877
878const void *__sanitizer_double_ended_contiguous_container_find_bad_address(
879 const void *storage_beg_p, const void *container_beg_p,
880 const void *container_end_p, const void *storage_end_p) {
881 if (!flags()->detect_container_overflow)
882 return nullptr;
883 uptr granularity = ASAN_SHADOW_GRANULARITY;
884 uptr storage_beg = reinterpret_cast<uptr>(storage_beg_p);
885 uptr storage_end = reinterpret_cast<uptr>(storage_end_p);
886 uptr beg = reinterpret_cast<uptr>(container_beg_p);
887 uptr end = reinterpret_cast<uptr>(container_end_p);
888
889 // The prefix of the firs granule of the container is unpoisoned.
890 if (beg != end)
891 beg = Max(a: storage_beg, b: RoundDownTo(x: beg, boundary: granularity));
892
893 // If the byte after the storage is unpoisoned, the prefix of the last granule
894 // is unpoisoned.
895 uptr annotations_end = (!AddrIsAlignedByGranularity(a: storage_end) &&
896 !AddressIsPoisoned(a: storage_end))
897 ? RoundDownTo(x: storage_end, boundary: granularity)
898 : storage_end;
899 storage_beg = Min(a: storage_beg, b: annotations_end);
900 beg = Min(a: beg, b: annotations_end);
901 end = Min(a: end, b: annotations_end);
902
903 if (auto *bad = FindBadAddress(begin: storage_beg, end: beg, poisoned: true))
904 return bad;
905 if (auto *bad = FindBadAddress(begin: beg, end, poisoned: false))
906 return bad;
907 if (auto *bad = FindBadAddress(begin: end, end: annotations_end, poisoned: true))
908 return bad;
909 return FindBadAddress(begin: annotations_end, end: storage_end, poisoned: false);
910}
911
912int __sanitizer_verify_double_ended_contiguous_container(
913 const void *storage_beg_p, const void *container_beg_p,
914 const void *container_end_p, const void *storage_end_p) {
915 return __sanitizer_double_ended_contiguous_container_find_bad_address(
916 storage_beg_p, container_beg_p, container_end_p, storage_end_p) ==
917 nullptr;
918}
919
920extern "C" SANITIZER_INTERFACE_ATTRIBUTE
921void __asan_poison_intra_object_redzone(uptr ptr, uptr size) {
922 AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, poison: true);
923}
924
925extern "C" SANITIZER_INTERFACE_ATTRIBUTE
926void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) {
927 AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, poison: false);
928}
929
930// --- Implementation of LSan-specific functions --- {{{1
931namespace __lsan {
932bool WordIsPoisoned(uptr addr) {
933 return (__asan_region_is_poisoned(beg: addr, size: sizeof(uptr)) != 0);
934}
935}
936