1//===-- asan_poisoning.cpp ------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address sanity checker.
10//
11// Shadow memory poisoning by ASan RTL and by user application.
12//===----------------------------------------------------------------------===//
13
14#include "asan_poisoning.h"
15
16#include "asan_report.h"
17#include "asan_stack.h"
18#include "sanitizer_common/sanitizer_atomic.h"
19#include "sanitizer_common/sanitizer_common.h"
20#include "sanitizer_common/sanitizer_flags.h"
21#include "sanitizer_common/sanitizer_interface_internal.h"
22#include "sanitizer_common/sanitizer_libc.h"
23#include "sanitizer_common/sanitizer_ring_buffer.h"
24#include "sanitizer_common/sanitizer_stackdepot.h"
25
26namespace __asan {
27
28using PoisonRecordRingBuffer = RingBuffer<PoisonRecord>;
29
30static atomic_uint8_t can_poison_memory;
31
32static Mutex poison_records_mutex;
33static PoisonRecordRingBuffer *poison_records
34 SANITIZER_GUARDED_BY(poison_records_mutex) = nullptr;
35
36void AddPoisonRecord(const PoisonRecord &new_record) {
37 if (flags()->poison_history_size <= 0)
38 return;
39
40 GenericScopedLock<Mutex> l(&poison_records_mutex);
41
42 if (poison_records == nullptr)
43 poison_records = PoisonRecordRingBuffer::New(Size: flags()->poison_history_size);
44
45 poison_records->push(t: new_record);
46}
47
48bool FindPoisonRecord(uptr addr, PoisonRecord &match) {
49 if (flags()->poison_history_size <= 0)
50 return false;
51
52 GenericScopedLock<Mutex> l(&poison_records_mutex);
53
54 if (poison_records) {
55 for (unsigned int i = 0; i < poison_records->size(); i++) {
56 PoisonRecord record = (*poison_records)[i];
57 if (record.begin <= addr && addr < record.end) {
58 internal_memcpy(dest: &match, src: &record, n: sizeof(record));
59 return true;
60 }
61 }
62 }
63
64 return false;
65}
66
67void SANITIZER_ACQUIRE(poison_records_mutex) AcquirePoisonRecords() {
68 poison_records_mutex.Lock();
69}
70
71void SANITIZER_RELEASE(poison_records_mutex) ReleasePoisonRecords() {
72 poison_records_mutex.Unlock();
73}
74
75void SetCanPoisonMemory(bool value) {
76 atomic_store(a: &can_poison_memory, v: value, mo: memory_order_release);
77}
78
79bool CanPoisonMemory() {
80 return atomic_load(a: &can_poison_memory, mo: memory_order_acquire);
81}
82
83void PoisonShadow(uptr addr, uptr size, u8 value) {
84 if (value && !CanPoisonMemory()) return;
85 CHECK(AddrIsAlignedByGranularity(addr));
86 CHECK(AddrIsInMem(addr));
87 CHECK(AddrIsAlignedByGranularity(addr + size));
88 CHECK(AddrIsInMem(addr + size - ASAN_SHADOW_GRANULARITY));
89 CHECK(REAL(memset));
90 FastPoisonShadow(aligned_beg: addr, aligned_size: size, value);
91}
92
93void PoisonShadowPartialRightRedzone(uptr addr,
94 uptr size,
95 uptr redzone_size,
96 u8 value) {
97 if (!CanPoisonMemory()) return;
98 CHECK(AddrIsAlignedByGranularity(addr));
99 CHECK(AddrIsInMem(addr));
100 FastPoisonShadowPartialRightRedzone(aligned_addr: addr, size, redzone_size, value);
101}
102
103struct ShadowSegmentEndpoint {
104 u8 *chunk;
105 s8 offset; // in [0, ASAN_SHADOW_GRANULARITY)
106 s8 value; // = *chunk;
107
108 explicit ShadowSegmentEndpoint(uptr address) {
109 chunk = (u8*)MemToShadow(p: address);
110 offset = address & (ASAN_SHADOW_GRANULARITY - 1);
111 value = *chunk;
112 }
113};
114
115void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
116 uptr end = ptr + size;
117 if (Verbosity()) {
118 Printf(format: "__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
119 poison ? "" : "un", (void *)ptr, (void *)end, size);
120 if (Verbosity() >= 2)
121 PRINT_CURRENT_STACK();
122 }
123 CHECK(size);
124 CHECK_LE(size, 4096);
125 CHECK(IsAligned(end, ASAN_SHADOW_GRANULARITY));
126 if (!IsAligned(a: ptr, ASAN_SHADOW_GRANULARITY)) {
127 *(u8 *)MemToShadow(p: ptr) =
128 poison ? static_cast<u8>(ptr % ASAN_SHADOW_GRANULARITY) : 0;
129 ptr |= ASAN_SHADOW_GRANULARITY - 1;
130 ptr++;
131 }
132 for (; ptr < end; ptr += ASAN_SHADOW_GRANULARITY)
133 *(u8*)MemToShadow(p: ptr) = poison ? kAsanIntraObjectRedzone : 0;
134}
135
136} // namespace __asan
137
138// ---------------------- Interface ---------------- {{{1
139using namespace __asan;
140
141// Current implementation of __asan_(un)poison_memory_region doesn't check
142// that user program (un)poisons the memory it owns. It poisons memory
143// conservatively, and unpoisons progressively to make sure asan shadow
144// mapping invariant is preserved (see detailed mapping description here:
145// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm).
146//
147// * if user asks to poison region [left, right), the program poisons
148// at least [left, AlignDown(right)).
149// * if user asks to unpoison region [left, right), the program unpoisons
150// at most [AlignDown(left), right).
151void __asan_poison_memory_region(void const volatile *addr, uptr size) {
152 if (!flags()->allow_user_poisoning || size == 0) return;
153 uptr beg_addr = (uptr)addr;
154 uptr end_addr = beg_addr + size;
155 VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,
156 (void *)end_addr);
157
158 if (flags()->poison_history_size > 0) {
159 GET_STACK_TRACE(/*max_size=*/16, /*fast=*/false);
160 u32 current_tid = GetCurrentTidOrInvalid();
161
162 u32 stack_id = StackDepotPut(stack);
163
164 PoisonRecord record;
165 record.stack_id = stack_id;
166 record.thread_id = current_tid;
167 record.begin = beg_addr;
168 record.end = end_addr;
169 AddPoisonRecord(new_record: record);
170 }
171
172 ShadowSegmentEndpoint beg(beg_addr);
173 ShadowSegmentEndpoint end(end_addr);
174 if (beg.chunk == end.chunk) {
175 CHECK_LT(beg.offset, end.offset);
176 s8 value = beg.value;
177 CHECK_EQ(value, end.value);
178 // We can only poison memory if the byte in end.offset is unaddressable.
179 // No need to re-poison memory if it is poisoned already.
180 if (value > 0 && value <= end.offset) {
181 if (beg.offset > 0) {
182 *beg.chunk = Min(a: value, b: beg.offset);
183 } else {
184 *beg.chunk = kAsanUserPoisonedMemoryMagic;
185 }
186 }
187 return;
188 }
189 CHECK_LT(beg.chunk, end.chunk);
190 if (beg.offset > 0) {
191 // Mark bytes from beg.offset as unaddressable.
192 if (beg.value == 0) {
193 *beg.chunk = beg.offset;
194 } else {
195 *beg.chunk = Min(a: beg.value, b: beg.offset);
196 }
197 beg.chunk++;
198 }
199 REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk);
200 // Poison if byte in end.offset is unaddressable.
201 if (end.value > 0 && end.value <= end.offset) {
202 *end.chunk = kAsanUserPoisonedMemoryMagic;
203 }
204}
205
206void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
207 if (!flags()->allow_user_poisoning || size == 0) return;
208 uptr beg_addr = (uptr)addr;
209 uptr end_addr = beg_addr + size;
210 VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,
211 (void *)end_addr);
212
213 // Note: we don't need to update the poison tracking here. Since the shadow
214 // memory will be unpoisoned, the poison tracking ring buffer entries will be
215 // ignored.
216
217 ShadowSegmentEndpoint beg(beg_addr);
218 ShadowSegmentEndpoint end(end_addr);
219 if (beg.chunk == end.chunk) {
220 CHECK_LT(beg.offset, end.offset);
221 s8 value = beg.value;
222 CHECK_EQ(value, end.value);
223 // We unpoison memory bytes up to enbytes up to end.offset if it is not
224 // unpoisoned already.
225 if (value != 0) {
226 *beg.chunk = Max(a: value, b: end.offset);
227 }
228 return;
229 }
230 CHECK_LT(beg.chunk, end.chunk);
231 REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk);
232 if (end.offset > 0 && end.value != 0) {
233 *end.chunk = Max(a: end.value, b: end.offset);
234 }
235}
236
237int __asan_address_is_poisoned(void const volatile *addr) {
238 return __asan::AddressIsPoisoned(a: (uptr)addr);
239}
240
241uptr __asan_region_is_poisoned(uptr beg, uptr size) {
242 if (!size)
243 return 0;
244 uptr end = beg + size;
245 if (!AddrIsInMem(a: beg))
246 return beg;
247 if (!AddrIsInMem(a: end))
248 return end;
249 CHECK_LT(beg, end);
250 uptr aligned_b = RoundUpTo(size: beg, ASAN_SHADOW_GRANULARITY);
251 uptr aligned_e = RoundDownTo(x: end, ASAN_SHADOW_GRANULARITY);
252 uptr shadow_beg = MemToShadow(p: aligned_b);
253 uptr shadow_end = MemToShadow(p: aligned_e);
254 // First check the first and the last application bytes,
255 // then check the ASAN_SHADOW_GRANULARITY-aligned region by calling
256 // mem_is_zero on the corresponding shadow.
257 if (!__asan::AddressIsPoisoned(a: beg) && !__asan::AddressIsPoisoned(a: end - 1) &&
258 (shadow_end <= shadow_beg ||
259 __sanitizer::mem_is_zero(mem: (const char *)shadow_beg,
260 size: shadow_end - shadow_beg)))
261 return 0;
262 // The fast check failed, so we have a poisoned byte somewhere.
263 // Find it slowly.
264 for (; beg < end; beg++)
265 if (__asan::AddressIsPoisoned(a: beg))
266 return beg;
267 UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found");
268 return 0;
269}
270
271#define CHECK_SMALL_REGION(p, size, isWrite) \
272 do { \
273 uptr __p = reinterpret_cast<uptr>(p); \
274 uptr __size = size; \
275 if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \
276 __asan::AddressIsPoisoned(__p + __size - 1))) { \
277 GET_CURRENT_PC_BP_SP; \
278 uptr __bad = __asan_region_is_poisoned(__p, __size); \
279 __asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\
280 } \
281 } while (false)
282
283
284extern "C" SANITIZER_INTERFACE_ATTRIBUTE
285u16 __sanitizer_unaligned_load16(const uu16 *p) {
286 CHECK_SMALL_REGION(p, sizeof(*p), false);
287 return *p;
288}
289
290extern "C" SANITIZER_INTERFACE_ATTRIBUTE
291u32 __sanitizer_unaligned_load32(const uu32 *p) {
292 CHECK_SMALL_REGION(p, sizeof(*p), false);
293 return *p;
294}
295
296extern "C" SANITIZER_INTERFACE_ATTRIBUTE
297u64 __sanitizer_unaligned_load64(const uu64 *p) {
298 CHECK_SMALL_REGION(p, sizeof(*p), false);
299 return *p;
300}
301
302extern "C" SANITIZER_INTERFACE_ATTRIBUTE
303void __sanitizer_unaligned_store16(uu16 *p, u16 x) {
304 CHECK_SMALL_REGION(p, sizeof(*p), true);
305 *p = x;
306}
307
308extern "C" SANITIZER_INTERFACE_ATTRIBUTE
309void __sanitizer_unaligned_store32(uu32 *p, u32 x) {
310 CHECK_SMALL_REGION(p, sizeof(*p), true);
311 *p = x;
312}
313
314extern "C" SANITIZER_INTERFACE_ATTRIBUTE
315void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
316 CHECK_SMALL_REGION(p, sizeof(*p), true);
317 *p = x;
318}
319
320extern "C" SANITIZER_INTERFACE_ATTRIBUTE
321void __asan_poison_cxx_array_cookie(uptr p) {
322 if (SANITIZER_WORDSIZE != 64) return;
323 if (!flags()->poison_array_cookie) return;
324 uptr s = MEM_TO_SHADOW(p);
325 *reinterpret_cast<u8*>(s) = kAsanArrayCookieMagic;
326}
327
328extern "C" SANITIZER_INTERFACE_ATTRIBUTE
329uptr __asan_load_cxx_array_cookie(uptr *p) {
330 if (SANITIZER_WORDSIZE != 64) return *p;
331 if (!flags()->poison_array_cookie) return *p;
332 uptr s = MEM_TO_SHADOW(reinterpret_cast<uptr>(p));
333 u8 sval = *reinterpret_cast<u8*>(s);
334 if (sval == kAsanArrayCookieMagic) return *p;
335 // If sval is not kAsanArrayCookieMagic it can only be freed memory,
336 // which means that we are going to get double-free. So, return 0 to avoid
337 // infinite loop of destructors. We don't want to report a double-free here
338 // though, so print a warning just in case.
339 // CHECK_EQ(sval, kAsanHeapFreeMagic);
340 if (sval == kAsanHeapFreeMagic) {
341 Report(format: "AddressSanitizer: loaded array cookie from free-d memory; "
342 "expect a double-free report\n");
343 return 0;
344 }
345 // The cookie may remain unpoisoned if e.g. it comes from a custom
346 // operator new defined inside a class.
347 return *p;
348}
349
350// This is a simplified version of __asan_(un)poison_memory_region, which
351// assumes that left border of region to be poisoned is properly aligned.
352static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
353 if (size == 0) return;
354 uptr aligned_size = size & ~(ASAN_SHADOW_GRANULARITY - 1);
355 PoisonShadow(addr, size: aligned_size,
356 value: do_poison ? kAsanStackUseAfterScopeMagic : 0);
357 if (size == aligned_size)
358 return;
359 s8 end_offset = (s8)(size - aligned_size);
360 s8* shadow_end = (s8*)MemToShadow(p: addr + aligned_size);
361 s8 end_value = *shadow_end;
362 if (do_poison) {
363 // If possible, mark all the bytes mapping to last shadow byte as
364 // unaddressable.
365 if (end_value > 0 && end_value <= end_offset)
366 *shadow_end = (s8)kAsanStackUseAfterScopeMagic;
367 } else {
368 // If necessary, mark few first bytes mapping to last shadow byte
369 // as addressable
370 if (end_value != 0)
371 *shadow_end = Max(a: end_value, b: end_offset);
372 }
373}
374
375void __asan_set_shadow_00(uptr addr, uptr size) {
376 REAL(memset)((void *)addr, 0, size);
377}
378
379void __asan_set_shadow_01(uptr addr, uptr size) {
380 REAL(memset)((void *)addr, 0x01, size);
381}
382
383void __asan_set_shadow_02(uptr addr, uptr size) {
384 REAL(memset)((void *)addr, 0x02, size);
385}
386
387void __asan_set_shadow_03(uptr addr, uptr size) {
388 REAL(memset)((void *)addr, 0x03, size);
389}
390
391void __asan_set_shadow_04(uptr addr, uptr size) {
392 REAL(memset)((void *)addr, 0x04, size);
393}
394
395void __asan_set_shadow_05(uptr addr, uptr size) {
396 REAL(memset)((void *)addr, 0x05, size);
397}
398
399void __asan_set_shadow_06(uptr addr, uptr size) {
400 REAL(memset)((void *)addr, 0x06, size);
401}
402
403void __asan_set_shadow_07(uptr addr, uptr size) {
404 REAL(memset)((void *)addr, 0x07, size);
405}
406
407void __asan_set_shadow_f1(uptr addr, uptr size) {
408 REAL(memset)((void *)addr, 0xf1, size);
409}
410
411void __asan_set_shadow_f2(uptr addr, uptr size) {
412 REAL(memset)((void *)addr, 0xf2, size);
413}
414
415void __asan_set_shadow_f3(uptr addr, uptr size) {
416 REAL(memset)((void *)addr, 0xf3, size);
417}
418
419void __asan_set_shadow_f5(uptr addr, uptr size) {
420 REAL(memset)((void *)addr, 0xf5, size);
421}
422
423void __asan_set_shadow_f8(uptr addr, uptr size) {
424 REAL(memset)((void *)addr, 0xf8, size);
425}
426
427void __asan_poison_stack_memory(uptr addr, uptr size) {
428 VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
429 PoisonAlignedStackMemory(addr, size, do_poison: true);
430}
431
432void __asan_unpoison_stack_memory(uptr addr, uptr size) {
433 VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size);
434 PoisonAlignedStackMemory(addr, size, do_poison: false);
435}
436
437static void FixUnalignedStorage(uptr storage_beg, uptr storage_end,
438 uptr &old_beg, uptr &old_end, uptr &new_beg,
439 uptr &new_end) {
440 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
441 if (UNLIKELY(!AddrIsAlignedByGranularity(storage_end))) {
442 uptr end_down = RoundDownTo(x: storage_end, boundary: granularity);
443 // Ignore the last unaligned granule if the storage is followed by
444 // unpoisoned byte, because we can't poison the prefix anyway. Don't call
445 // AddressIsPoisoned at all if container changes does not affect the last
446 // granule at all.
447 if ((((old_end != new_end) && Max(a: old_end, b: new_end) > end_down) ||
448 ((old_beg != new_beg) && Max(a: old_beg, b: new_beg) > end_down)) &&
449 !AddressIsPoisoned(a: storage_end)) {
450 old_beg = Min(a: end_down, b: old_beg);
451 old_end = Min(a: end_down, b: old_end);
452 new_beg = Min(a: end_down, b: new_beg);
453 new_end = Min(a: end_down, b: new_end);
454 }
455 }
456
457 // Handle misaligned begin and cut it off.
458 if (UNLIKELY(!AddrIsAlignedByGranularity(storage_beg))) {
459 uptr beg_up = RoundUpTo(size: storage_beg, boundary: granularity);
460 // The first unaligned granule needs special handling only if we had bytes
461 // there before and will have none after.
462 if ((new_beg == new_end || new_beg >= beg_up) && old_beg != old_end &&
463 old_beg < beg_up) {
464 // Keep granule prefix outside of the storage unpoisoned.
465 uptr beg_down = RoundDownTo(x: storage_beg, boundary: granularity);
466 *(u8 *)MemToShadow(p: beg_down) = storage_beg - beg_down;
467 old_beg = Max(a: beg_up, b: old_beg);
468 old_end = Max(a: beg_up, b: old_end);
469 new_beg = Max(a: beg_up, b: new_beg);
470 new_end = Max(a: beg_up, b: new_end);
471 }
472 }
473}
474
475void __sanitizer_annotate_contiguous_container(const void *beg_p,
476 const void *end_p,
477 const void *old_mid_p,
478 const void *new_mid_p) {
479 if (!flags()->detect_container_overflow)
480 return;
481 VPrintf(3, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
482 new_mid_p);
483 uptr storage_beg = reinterpret_cast<uptr>(beg_p);
484 uptr storage_end = reinterpret_cast<uptr>(end_p);
485 uptr old_end = reinterpret_cast<uptr>(old_mid_p);
486 uptr new_end = reinterpret_cast<uptr>(new_mid_p);
487 uptr old_beg = storage_beg;
488 uptr new_beg = storage_beg;
489 uptr granularity = ASAN_SHADOW_GRANULARITY;
490 if (!(storage_beg <= old_end && storage_beg <= new_end &&
491 old_end <= storage_end && new_end <= storage_end)) {
492 GET_STACK_TRACE_FATAL_HERE;
493 ReportBadParamsToAnnotateContiguousContainer(beg: storage_beg, end: storage_end,
494 old_mid: old_end, new_mid: new_end, stack: &stack);
495 }
496 CHECK_LE(storage_end - storage_beg,
497 FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
498
499 if (old_end == new_end)
500 return; // Nothing to do here.
501
502 FixUnalignedStorage(storage_beg, storage_end, old_beg, old_end, new_beg,
503 new_end);
504
505 uptr a = RoundDownTo(x: Min(a: old_end, b: new_end), boundary: granularity);
506 uptr c = RoundUpTo(size: Max(a: old_end, b: new_end), boundary: granularity);
507 uptr d1 = RoundDownTo(x: old_end, boundary: granularity);
508 // uptr d2 = RoundUpTo(old_mid, granularity);
509 // Currently we should be in this state:
510 // [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
511 // Make a quick sanity check that we are indeed in this state.
512 //
513 // FIXME: Two of these three checks are disabled until we fix
514 // https://github.com/google/sanitizers/issues/258.
515 // if (d1 != d2)
516 // DCHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
517 //
518 // NOTE: curly brackets for the "if" below to silence a MSVC warning.
519 if (a + granularity <= d1) {
520 DCHECK_EQ(*(u8 *)MemToShadow(a), 0);
521 }
522 // if (d2 + granularity <= c && c <= end)
523 // DCHECK_EQ(*(u8 *)MemToShadow(c - granularity),
524 // kAsanContiguousContainerOOBMagic);
525
526 uptr b1 = RoundDownTo(x: new_end, boundary: granularity);
527 uptr b2 = RoundUpTo(size: new_end, boundary: granularity);
528 // New state:
529 // [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good.
530 if (b1 > a)
531 PoisonShadow(addr: a, size: b1 - a, value: 0);
532 else if (c > b2)
533 PoisonShadow(addr: b2, size: c - b2, value: kAsanContiguousContainerOOBMagic);
534 if (b1 != b2) {
535 CHECK_EQ(b2 - b1, granularity);
536 *(u8 *)MemToShadow(p: b1) = static_cast<u8>(new_end - b1);
537 }
538}
539
540// Annotates a double ended contiguous memory area like std::deque's chunk.
541// It allows detecting buggy accesses to allocated but not used begining
542// or end items of such a container.
543void __sanitizer_annotate_double_ended_contiguous_container(
544 const void *storage_beg_p, const void *storage_end_p,
545 const void *old_container_beg_p, const void *old_container_end_p,
546 const void *new_container_beg_p, const void *new_container_end_p) {
547 if (!flags()->detect_container_overflow)
548 return;
549
550 VPrintf(3, "contiguous_container: %p %p %p %p %p %p\n", storage_beg_p,
551 storage_end_p, old_container_beg_p, old_container_end_p,
552 new_container_beg_p, new_container_end_p);
553
554 uptr storage_beg = reinterpret_cast<uptr>(storage_beg_p);
555 uptr storage_end = reinterpret_cast<uptr>(storage_end_p);
556 uptr old_beg = reinterpret_cast<uptr>(old_container_beg_p);
557 uptr old_end = reinterpret_cast<uptr>(old_container_end_p);
558 uptr new_beg = reinterpret_cast<uptr>(new_container_beg_p);
559 uptr new_end = reinterpret_cast<uptr>(new_container_end_p);
560
561 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
562
563 if (!(old_beg <= old_end && new_beg <= new_end) ||
564 !(storage_beg <= new_beg && new_end <= storage_end) ||
565 !(storage_beg <= old_beg && old_end <= storage_end)) {
566 GET_STACK_TRACE_FATAL_HERE;
567 ReportBadParamsToAnnotateDoubleEndedContiguousContainer(
568 storage_beg, storage_end, old_container_beg: old_beg, old_container_end: old_end, new_container_beg: new_beg, new_container_end: new_end, stack: &stack);
569 }
570 CHECK_LE(storage_end - storage_beg,
571 FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
572
573 if ((old_beg == old_end && new_beg == new_end) ||
574 (old_beg == new_beg && old_end == new_end))
575 return; // Nothing to do here.
576
577 FixUnalignedStorage(storage_beg, storage_end, old_beg, old_end, new_beg,
578 new_end);
579
580 // Handle non-intersecting new/old containers separately have simpler
581 // intersecting case.
582 if (old_beg == old_end || new_beg == new_end || new_end <= old_beg ||
583 old_end <= new_beg) {
584 if (old_beg != old_end) {
585 // Poisoning the old container.
586 uptr a = RoundDownTo(x: old_beg, boundary: granularity);
587 uptr b = RoundUpTo(size: old_end, boundary: granularity);
588 PoisonShadow(addr: a, size: b - a, value: kAsanContiguousContainerOOBMagic);
589 }
590
591 if (new_beg != new_end) {
592 // Unpoisoning the new container.
593 uptr a = RoundDownTo(x: new_beg, boundary: granularity);
594 uptr b = RoundDownTo(x: new_end, boundary: granularity);
595 PoisonShadow(addr: a, size: b - a, value: 0);
596 if (!AddrIsAlignedByGranularity(a: new_end))
597 *(u8 *)MemToShadow(p: b) = static_cast<u8>(new_end - b);
598 }
599
600 return;
601 }
602
603 // Intersection of old and new containers is not empty.
604 CHECK_LT(new_beg, old_end);
605 CHECK_GT(new_end, old_beg);
606
607 if (new_beg < old_beg) {
608 // Round down because we can't poison prefixes.
609 uptr a = RoundDownTo(x: new_beg, boundary: granularity);
610 // Round down and ignore the [c, old_beg) as its state defined by unchanged
611 // [old_beg, old_end).
612 uptr c = RoundDownTo(x: old_beg, boundary: granularity);
613 PoisonShadow(addr: a, size: c - a, value: 0);
614 } else if (new_beg > old_beg) {
615 // Round down and poison [a, old_beg) because it was unpoisoned only as a
616 // prefix.
617 uptr a = RoundDownTo(x: old_beg, boundary: granularity);
618 // Round down and ignore the [c, new_beg) as its state defined by unchanged
619 // [new_beg, old_end).
620 uptr c = RoundDownTo(x: new_beg, boundary: granularity);
621
622 PoisonShadow(addr: a, size: c - a, value: kAsanContiguousContainerOOBMagic);
623 }
624
625 if (new_end > old_end) {
626 // Round down to poison the prefix.
627 uptr a = RoundDownTo(x: old_end, boundary: granularity);
628 // Round down and handle remainder below.
629 uptr c = RoundDownTo(x: new_end, boundary: granularity);
630 PoisonShadow(addr: a, size: c - a, value: 0);
631 if (!AddrIsAlignedByGranularity(a: new_end))
632 *(u8 *)MemToShadow(p: c) = static_cast<u8>(new_end - c);
633 } else if (new_end < old_end) {
634 // Round up and handle remained below.
635 uptr a2 = RoundUpTo(size: new_end, boundary: granularity);
636 // Round up to poison entire granule as we had nothing in [old_end, c2).
637 uptr c2 = RoundUpTo(size: old_end, boundary: granularity);
638 PoisonShadow(addr: a2, size: c2 - a2, value: kAsanContiguousContainerOOBMagic);
639
640 if (!AddrIsAlignedByGranularity(a: new_end)) {
641 uptr a = RoundDownTo(x: new_end, boundary: granularity);
642 *(u8 *)MemToShadow(p: a) = static_cast<u8>(new_end - a);
643 }
644 }
645}
646
647// Marks the specified number of bytes in a granule as accessible or
648// poisones the whole granule with kAsanContiguousContainerOOBMagic value.
649static void SetContainerGranule(uptr ptr, u8 n) {
650 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
651 u8 s = (n == granularity) ? 0 : (n ? n : kAsanContiguousContainerOOBMagic);
652 *(u8 *)MemToShadow(p: ptr) = s;
653}
654
655// Performs a byte-by-byte copy of ASan annotations (shadow memory values).
656// Result may be different due to ASan limitations, but result cannot lead
657// to false positives (more memory than requested may get unpoisoned).
658static void SlowCopyContainerAnnotations(uptr src_beg, uptr src_end,
659 uptr dst_beg, uptr dst_end) {
660 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
661 uptr dst_end_down = RoundDownTo(x: dst_end, boundary: granularity);
662 uptr src_ptr = src_beg;
663 uptr dst_ptr = dst_beg;
664
665 while (dst_ptr < dst_end) {
666 uptr granule_beg = RoundDownTo(x: dst_ptr, boundary: granularity);
667 uptr granule_end = granule_beg + granularity;
668 uptr unpoisoned_bytes = 0;
669
670 uptr end = Min(a: granule_end, b: dst_end);
671 for (; dst_ptr != end; ++dst_ptr, ++src_ptr)
672 if (!AddressIsPoisoned(a: src_ptr))
673 unpoisoned_bytes = dst_ptr - granule_beg + 1;
674
675 if (dst_ptr == dst_end && dst_end != dst_end_down &&
676 !AddressIsPoisoned(a: dst_end))
677 continue;
678
679 if (unpoisoned_bytes != 0 || granule_beg >= dst_beg)
680 SetContainerGranule(ptr: granule_beg, n: unpoisoned_bytes);
681 else if (!AddressIsPoisoned(a: dst_beg))
682 SetContainerGranule(ptr: granule_beg, n: dst_beg - granule_beg);
683 }
684}
685
686// Performs a byte-by-byte copy of ASan annotations (shadow memory values),
687// going through bytes in reversed order, but not reversing annotations.
688// Result may be different due to ASan limitations, but result cannot lead
689// to false positives (more memory than requested may get unpoisoned).
690static void SlowReversedCopyContainerAnnotations(uptr src_beg, uptr src_end,
691 uptr dst_beg, uptr dst_end) {
692 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
693 uptr dst_end_down = RoundDownTo(x: dst_end, boundary: granularity);
694 uptr src_ptr = src_end;
695 uptr dst_ptr = dst_end;
696
697 while (dst_ptr > dst_beg) {
698 uptr granule_beg = RoundDownTo(x: dst_ptr - 1, boundary: granularity);
699 uptr unpoisoned_bytes = 0;
700
701 uptr end = Max(a: granule_beg, b: dst_beg);
702 for (; dst_ptr != end; --dst_ptr, --src_ptr)
703 if (unpoisoned_bytes == 0 && !AddressIsPoisoned(a: src_ptr - 1))
704 unpoisoned_bytes = dst_ptr - granule_beg;
705
706 if (dst_ptr >= dst_end_down && !AddressIsPoisoned(a: dst_end))
707 continue;
708
709 if (granule_beg == dst_ptr || unpoisoned_bytes != 0)
710 SetContainerGranule(ptr: granule_beg, n: unpoisoned_bytes);
711 else if (!AddressIsPoisoned(a: dst_beg))
712 SetContainerGranule(ptr: granule_beg, n: dst_beg - granule_beg);
713 }
714}
715
716// A helper function for __sanitizer_copy_contiguous_container_annotations,
717// has assumption about begin and end of the container.
718// Should not be used stand alone.
719static void CopyContainerFirstGranuleAnnotation(uptr src_beg, uptr dst_beg) {
720 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
721 // First granule
722 uptr src_beg_down = RoundDownTo(x: src_beg, boundary: granularity);
723 uptr dst_beg_down = RoundDownTo(x: dst_beg, boundary: granularity);
724 if (dst_beg_down == dst_beg)
725 return;
726 if (!AddressIsPoisoned(a: src_beg))
727 *(u8 *)MemToShadow(p: dst_beg_down) = *(u8 *)MemToShadow(p: src_beg_down);
728 else if (!AddressIsPoisoned(a: dst_beg))
729 SetContainerGranule(ptr: dst_beg_down, n: dst_beg - dst_beg_down);
730}
731
732// A helper function for __sanitizer_copy_contiguous_container_annotations,
733// has assumption about begin and end of the container.
734// Should not be used stand alone.
735static void CopyContainerLastGranuleAnnotation(uptr src_end, uptr dst_end) {
736 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
737 // Last granule
738 uptr src_end_down = RoundDownTo(x: src_end, boundary: granularity);
739 uptr dst_end_down = RoundDownTo(x: dst_end, boundary: granularity);
740 if (dst_end_down == dst_end || !AddressIsPoisoned(a: dst_end))
741 return;
742 if (AddressIsPoisoned(a: src_end))
743 *(u8 *)MemToShadow(p: dst_end_down) = *(u8 *)MemToShadow(p: src_end_down);
744 else
745 SetContainerGranule(ptr: dst_end_down, n: src_end - src_end_down);
746}
747
748// This function copies ASan memory annotations (poisoned/unpoisoned states)
749// from one buffer to another.
750// It's main purpose is to help with relocating trivially relocatable objects,
751// which memory may be poisoned, without calling copy constructor.
752// However, it does not move memory content itself, only annotations.
753// If the buffers aren't aligned (the distance between buffers isn't
754// granule-aligned)
755// // src_beg % granularity != dst_beg % granularity
756// the function handles this by going byte by byte, slowing down performance.
757// The old buffer annotations are not removed. If necessary,
758// user can unpoison old buffer with __asan_unpoison_memory_region.
759void __sanitizer_copy_contiguous_container_annotations(const void *src_beg_p,
760 const void *src_end_p,
761 const void *dst_beg_p,
762 const void *dst_end_p) {
763 if (!flags()->detect_container_overflow)
764 return;
765
766 VPrintf(3, "contiguous_container_src: %p %p\n", src_beg_p, src_end_p);
767 VPrintf(3, "contiguous_container_dst: %p %p\n", dst_beg_p, dst_end_p);
768
769 uptr src_beg = reinterpret_cast<uptr>(src_beg_p);
770 uptr src_end = reinterpret_cast<uptr>(src_end_p);
771 uptr dst_beg = reinterpret_cast<uptr>(dst_beg_p);
772 uptr dst_end = reinterpret_cast<uptr>(dst_end_p);
773
774 constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
775
776 if (src_beg > src_end || (dst_end - dst_beg) != (src_end - src_beg)) {
777 GET_STACK_TRACE_FATAL_HERE;
778 ReportBadParamsToCopyContiguousContainerAnnotations(
779 old_storage_beg: src_beg, old_storage_end: src_end, new_storage_beg: dst_beg, new_storage_end: dst_end, stack: &stack);
780 }
781
782 if (src_beg == src_end || src_beg == dst_beg)
783 return;
784 // Due to support for overlapping buffers, we may have to copy elements
785 // in reversed order, when destination buffer starts in the middle of
786 // the source buffer (or shares first granule with it).
787 //
788 // When buffers are not granule-aligned (or distance between them,
789 // to be specific), annotatios have to be copied byte by byte.
790 //
791 // The only remaining edge cases involve edge granules,
792 // when the container starts or ends within a granule.
793 uptr src_beg_up = RoundUpTo(size: src_beg, boundary: granularity);
794 uptr src_end_up = RoundUpTo(size: src_end, boundary: granularity);
795 bool copy_in_reversed_order = src_beg < dst_beg && dst_beg <= src_end_up;
796 if (src_beg % granularity != dst_beg % granularity ||
797 RoundDownTo(x: dst_end - 1, boundary: granularity) <= dst_beg) {
798 if (copy_in_reversed_order)
799 SlowReversedCopyContainerAnnotations(src_beg, src_end, dst_beg, dst_end);
800 else
801 SlowCopyContainerAnnotations(src_beg, src_end, dst_beg, dst_end);
802 return;
803 }
804
805 // As buffers are granule-aligned, we can just copy annotations of granules
806 // from the middle.
807 uptr dst_beg_up = RoundUpTo(size: dst_beg, boundary: granularity);
808 uptr dst_end_down = RoundDownTo(x: dst_end, boundary: granularity);
809 if (copy_in_reversed_order)
810 CopyContainerLastGranuleAnnotation(src_end, dst_end);
811 else
812 CopyContainerFirstGranuleAnnotation(src_beg, dst_beg);
813
814 if (dst_beg_up < dst_end_down) {
815 internal_memmove(dest: (u8 *)MemToShadow(p: dst_beg_up),
816 src: (u8 *)MemToShadow(p: src_beg_up),
817 n: (dst_end_down - dst_beg_up) / granularity);
818 }
819
820 if (copy_in_reversed_order)
821 CopyContainerFirstGranuleAnnotation(src_beg, dst_beg);
822 else
823 CopyContainerLastGranuleAnnotation(src_end, dst_end);
824}
825
826static const void *FindBadAddress(uptr begin, uptr end, bool poisoned) {
827 CHECK_LE(begin, end);
828 constexpr uptr kMaxRangeToCheck = 32;
829 if (end - begin > kMaxRangeToCheck * 2) {
830 if (auto *bad = FindBadAddress(begin, end: begin + kMaxRangeToCheck, poisoned))
831 return bad;
832 if (auto *bad = FindBadAddress(begin: end - kMaxRangeToCheck, end, poisoned))
833 return bad;
834 }
835
836 for (uptr i = begin; i < end; ++i)
837 if (AddressIsPoisoned(a: i) != poisoned)
838 return reinterpret_cast<const void *>(i);
839 return nullptr;
840}
841
842const void *__sanitizer_contiguous_container_find_bad_address(
843 const void *beg_p, const void *mid_p, const void *end_p) {
844 if (!flags()->detect_container_overflow)
845 return nullptr;
846 uptr granularity = ASAN_SHADOW_GRANULARITY;
847 uptr beg = reinterpret_cast<uptr>(beg_p);
848 uptr end = reinterpret_cast<uptr>(end_p);
849 uptr mid = reinterpret_cast<uptr>(mid_p);
850 CHECK_LE(beg, mid);
851 CHECK_LE(mid, end);
852 // If the byte after the storage is unpoisoned, everything in the granule
853 // before must stay unpoisoned.
854 uptr annotations_end =
855 (!AddrIsAlignedByGranularity(a: end) && !AddressIsPoisoned(a: end))
856 ? RoundDownTo(x: end, boundary: granularity)
857 : end;
858 beg = Min(a: beg, b: annotations_end);
859 mid = Min(a: mid, b: annotations_end);
860 if (auto *bad = FindBadAddress(begin: beg, end: mid, poisoned: false))
861 return bad;
862 if (auto *bad = FindBadAddress(begin: mid, end: annotations_end, poisoned: true))
863 return bad;
864 return FindBadAddress(begin: annotations_end, end, poisoned: false);
865}
866
867int __sanitizer_verify_contiguous_container(const void *beg_p,
868 const void *mid_p,
869 const void *end_p) {
870 return __sanitizer_contiguous_container_find_bad_address(beg_p, mid_p,
871 end_p) == nullptr;
872}
873
874const void *__sanitizer_double_ended_contiguous_container_find_bad_address(
875 const void *storage_beg_p, const void *container_beg_p,
876 const void *container_end_p, const void *storage_end_p) {
877 if (!flags()->detect_container_overflow)
878 return nullptr;
879 uptr granularity = ASAN_SHADOW_GRANULARITY;
880 uptr storage_beg = reinterpret_cast<uptr>(storage_beg_p);
881 uptr storage_end = reinterpret_cast<uptr>(storage_end_p);
882 uptr beg = reinterpret_cast<uptr>(container_beg_p);
883 uptr end = reinterpret_cast<uptr>(container_end_p);
884
885 // The prefix of the firs granule of the container is unpoisoned.
886 if (beg != end)
887 beg = Max(a: storage_beg, b: RoundDownTo(x: beg, boundary: granularity));
888
889 // If the byte after the storage is unpoisoned, the prefix of the last granule
890 // is unpoisoned.
891 uptr annotations_end = (!AddrIsAlignedByGranularity(a: storage_end) &&
892 !AddressIsPoisoned(a: storage_end))
893 ? RoundDownTo(x: storage_end, boundary: granularity)
894 : storage_end;
895 storage_beg = Min(a: storage_beg, b: annotations_end);
896 beg = Min(a: beg, b: annotations_end);
897 end = Min(a: end, b: annotations_end);
898
899 if (auto *bad = FindBadAddress(begin: storage_beg, end: beg, poisoned: true))
900 return bad;
901 if (auto *bad = FindBadAddress(begin: beg, end, poisoned: false))
902 return bad;
903 if (auto *bad = FindBadAddress(begin: end, end: annotations_end, poisoned: true))
904 return bad;
905 return FindBadAddress(begin: annotations_end, end: storage_end, poisoned: false);
906}
907
908int __sanitizer_verify_double_ended_contiguous_container(
909 const void *storage_beg_p, const void *container_beg_p,
910 const void *container_end_p, const void *storage_end_p) {
911 return __sanitizer_double_ended_contiguous_container_find_bad_address(
912 storage_beg_p, container_beg_p, container_end_p, storage_end_p) ==
913 nullptr;
914}
915
916extern "C" SANITIZER_INTERFACE_ATTRIBUTE
917void __asan_poison_intra_object_redzone(uptr ptr, uptr size) {
918 AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, poison: true);
919}
920
921extern "C" SANITIZER_INTERFACE_ATTRIBUTE
922void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) {
923 AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, poison: false);
924}
925
926// --- Implementation of LSan-specific functions --- {{{1
927namespace __lsan {
928bool WordIsPoisoned(uptr addr) {
929 return (__asan_region_is_poisoned(beg: addr, size: sizeof(uptr)) != 0);
930}
931}
932