llvm-project
684 строки · 24.5 Кб
1//===-- asan_poisoning.cpp ------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is a part of AddressSanitizer, an address sanity checker.
10//
11// Shadow memory poisoning by ASan RTL and by user application.
12//===----------------------------------------------------------------------===//
13
14#include "asan_poisoning.h"
15
16#include "asan_report.h"
17#include "asan_stack.h"
18#include "sanitizer_common/sanitizer_atomic.h"
19#include "sanitizer_common/sanitizer_flags.h"
20#include "sanitizer_common/sanitizer_interface_internal.h"
21#include "sanitizer_common/sanitizer_libc.h"
22
23namespace __asan {
24
25static atomic_uint8_t can_poison_memory;
26
27void SetCanPoisonMemory(bool value) {
28atomic_store(&can_poison_memory, value, memory_order_release);
29}
30
31bool CanPoisonMemory() {
32return atomic_load(&can_poison_memory, memory_order_acquire);
33}
34
35void PoisonShadow(uptr addr, uptr size, u8 value) {
36if (value && !CanPoisonMemory()) return;
37CHECK(AddrIsAlignedByGranularity(addr));
38CHECK(AddrIsInMem(addr));
39CHECK(AddrIsAlignedByGranularity(addr + size));
40CHECK(AddrIsInMem(addr + size - ASAN_SHADOW_GRANULARITY));
41CHECK(REAL(memset));
42FastPoisonShadow(addr, size, value);
43}
44
45void PoisonShadowPartialRightRedzone(uptr addr,
46uptr size,
47uptr redzone_size,
48u8 value) {
49if (!CanPoisonMemory()) return;
50CHECK(AddrIsAlignedByGranularity(addr));
51CHECK(AddrIsInMem(addr));
52FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value);
53}
54
55struct ShadowSegmentEndpoint {
56u8 *chunk;
57s8 offset; // in [0, ASAN_SHADOW_GRANULARITY)
58s8 value; // = *chunk;
59
60explicit ShadowSegmentEndpoint(uptr address) {
61chunk = (u8*)MemToShadow(address);
62offset = address & (ASAN_SHADOW_GRANULARITY - 1);
63value = *chunk;
64}
65};
66
67void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
68uptr end = ptr + size;
69if (Verbosity()) {
70Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
71poison ? "" : "un", (void *)ptr, (void *)end, size);
72if (Verbosity() >= 2)
73PRINT_CURRENT_STACK();
74}
75CHECK(size);
76CHECK_LE(size, 4096);
77CHECK(IsAligned(end, ASAN_SHADOW_GRANULARITY));
78if (!IsAligned(ptr, ASAN_SHADOW_GRANULARITY)) {
79*(u8 *)MemToShadow(ptr) =
80poison ? static_cast<u8>(ptr % ASAN_SHADOW_GRANULARITY) : 0;
81ptr |= ASAN_SHADOW_GRANULARITY - 1;
82ptr++;
83}
84for (; ptr < end; ptr += ASAN_SHADOW_GRANULARITY)
85*(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0;
86}
87
88} // namespace __asan
89
90// ---------------------- Interface ---------------- {{{1
91using namespace __asan;
92
93// Current implementation of __asan_(un)poison_memory_region doesn't check
94// that user program (un)poisons the memory it owns. It poisons memory
95// conservatively, and unpoisons progressively to make sure asan shadow
96// mapping invariant is preserved (see detailed mapping description here:
97// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm).
98//
99// * if user asks to poison region [left, right), the program poisons
100// at least [left, AlignDown(right)).
101// * if user asks to unpoison region [left, right), the program unpoisons
102// at most [AlignDown(left), right).
103void __asan_poison_memory_region(void const volatile *addr, uptr size) {
104if (!flags()->allow_user_poisoning || size == 0) return;
105uptr beg_addr = (uptr)addr;
106uptr end_addr = beg_addr + size;
107VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,
108(void *)end_addr);
109ShadowSegmentEndpoint beg(beg_addr);
110ShadowSegmentEndpoint end(end_addr);
111if (beg.chunk == end.chunk) {
112CHECK_LT(beg.offset, end.offset);
113s8 value = beg.value;
114CHECK_EQ(value, end.value);
115// We can only poison memory if the byte in end.offset is unaddressable.
116// No need to re-poison memory if it is poisoned already.
117if (value > 0 && value <= end.offset) {
118if (beg.offset > 0) {
119*beg.chunk = Min(value, beg.offset);
120} else {
121*beg.chunk = kAsanUserPoisonedMemoryMagic;
122}
123}
124return;
125}
126CHECK_LT(beg.chunk, end.chunk);
127if (beg.offset > 0) {
128// Mark bytes from beg.offset as unaddressable.
129if (beg.value == 0) {
130*beg.chunk = beg.offset;
131} else {
132*beg.chunk = Min(beg.value, beg.offset);
133}
134beg.chunk++;
135}
136REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk);
137// Poison if byte in end.offset is unaddressable.
138if (end.value > 0 && end.value <= end.offset) {
139*end.chunk = kAsanUserPoisonedMemoryMagic;
140}
141}
142
143void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
144if (!flags()->allow_user_poisoning || size == 0) return;
145uptr beg_addr = (uptr)addr;
146uptr end_addr = beg_addr + size;
147VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,
148(void *)end_addr);
149ShadowSegmentEndpoint beg(beg_addr);
150ShadowSegmentEndpoint end(end_addr);
151if (beg.chunk == end.chunk) {
152CHECK_LT(beg.offset, end.offset);
153s8 value = beg.value;
154CHECK_EQ(value, end.value);
155// We unpoison memory bytes up to enbytes up to end.offset if it is not
156// unpoisoned already.
157if (value != 0) {
158*beg.chunk = Max(value, end.offset);
159}
160return;
161}
162CHECK_LT(beg.chunk, end.chunk);
163REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk);
164if (end.offset > 0 && end.value != 0) {
165*end.chunk = Max(end.value, end.offset);
166}
167}
168
169int __asan_address_is_poisoned(void const volatile *addr) {
170return __asan::AddressIsPoisoned((uptr)addr);
171}
172
173uptr __asan_region_is_poisoned(uptr beg, uptr size) {
174if (!size)
175return 0;
176uptr end = beg + size;
177if (!AddrIsInMem(beg))
178return beg;
179if (!AddrIsInMem(end))
180return end;
181CHECK_LT(beg, end);
182uptr aligned_b = RoundUpTo(beg, ASAN_SHADOW_GRANULARITY);
183uptr aligned_e = RoundDownTo(end, ASAN_SHADOW_GRANULARITY);
184uptr shadow_beg = MemToShadow(aligned_b);
185uptr shadow_end = MemToShadow(aligned_e);
186// First check the first and the last application bytes,
187// then check the ASAN_SHADOW_GRANULARITY-aligned region by calling
188// mem_is_zero on the corresponding shadow.
189if (!__asan::AddressIsPoisoned(beg) && !__asan::AddressIsPoisoned(end - 1) &&
190(shadow_end <= shadow_beg ||
191__sanitizer::mem_is_zero((const char *)shadow_beg,
192shadow_end - shadow_beg)))
193return 0;
194// The fast check failed, so we have a poisoned byte somewhere.
195// Find it slowly.
196for (; beg < end; beg++)
197if (__asan::AddressIsPoisoned(beg))
198return beg;
199UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found");
200return 0;
201}
202
203#define CHECK_SMALL_REGION(p, size, isWrite) \
204do { \
205uptr __p = reinterpret_cast<uptr>(p); \
206uptr __size = size; \
207if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \
208__asan::AddressIsPoisoned(__p + __size - 1))) { \
209GET_CURRENT_PC_BP_SP; \
210uptr __bad = __asan_region_is_poisoned(__p, __size); \
211__asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\
212} \
213} while (false)
214
215
216extern "C" SANITIZER_INTERFACE_ATTRIBUTE
217u16 __sanitizer_unaligned_load16(const uu16 *p) {
218CHECK_SMALL_REGION(p, sizeof(*p), false);
219return *p;
220}
221
222extern "C" SANITIZER_INTERFACE_ATTRIBUTE
223u32 __sanitizer_unaligned_load32(const uu32 *p) {
224CHECK_SMALL_REGION(p, sizeof(*p), false);
225return *p;
226}
227
228extern "C" SANITIZER_INTERFACE_ATTRIBUTE
229u64 __sanitizer_unaligned_load64(const uu64 *p) {
230CHECK_SMALL_REGION(p, sizeof(*p), false);
231return *p;
232}
233
234extern "C" SANITIZER_INTERFACE_ATTRIBUTE
235void __sanitizer_unaligned_store16(uu16 *p, u16 x) {
236CHECK_SMALL_REGION(p, sizeof(*p), true);
237*p = x;
238}
239
240extern "C" SANITIZER_INTERFACE_ATTRIBUTE
241void __sanitizer_unaligned_store32(uu32 *p, u32 x) {
242CHECK_SMALL_REGION(p, sizeof(*p), true);
243*p = x;
244}
245
246extern "C" SANITIZER_INTERFACE_ATTRIBUTE
247void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
248CHECK_SMALL_REGION(p, sizeof(*p), true);
249*p = x;
250}
251
252extern "C" SANITIZER_INTERFACE_ATTRIBUTE
253void __asan_poison_cxx_array_cookie(uptr p) {
254if (SANITIZER_WORDSIZE != 64) return;
255if (!flags()->poison_array_cookie) return;
256uptr s = MEM_TO_SHADOW(p);
257*reinterpret_cast<u8*>(s) = kAsanArrayCookieMagic;
258}
259
260extern "C" SANITIZER_INTERFACE_ATTRIBUTE
261uptr __asan_load_cxx_array_cookie(uptr *p) {
262if (SANITIZER_WORDSIZE != 64) return *p;
263if (!flags()->poison_array_cookie) return *p;
264uptr s = MEM_TO_SHADOW(reinterpret_cast<uptr>(p));
265u8 sval = *reinterpret_cast<u8*>(s);
266if (sval == kAsanArrayCookieMagic) return *p;
267// If sval is not kAsanArrayCookieMagic it can only be freed memory,
268// which means that we are going to get double-free. So, return 0 to avoid
269// infinite loop of destructors. We don't want to report a double-free here
270// though, so print a warning just in case.
271// CHECK_EQ(sval, kAsanHeapFreeMagic);
272if (sval == kAsanHeapFreeMagic) {
273Report("AddressSanitizer: loaded array cookie from free-d memory; "
274"expect a double-free report\n");
275return 0;
276}
277// The cookie may remain unpoisoned if e.g. it comes from a custom
278// operator new defined inside a class.
279return *p;
280}
281
282// This is a simplified version of __asan_(un)poison_memory_region, which
283// assumes that left border of region to be poisoned is properly aligned.
284static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
285if (size == 0) return;
286uptr aligned_size = size & ~(ASAN_SHADOW_GRANULARITY - 1);
287PoisonShadow(addr, aligned_size,
288do_poison ? kAsanStackUseAfterScopeMagic : 0);
289if (size == aligned_size)
290return;
291s8 end_offset = (s8)(size - aligned_size);
292s8* shadow_end = (s8*)MemToShadow(addr + aligned_size);
293s8 end_value = *shadow_end;
294if (do_poison) {
295// If possible, mark all the bytes mapping to last shadow byte as
296// unaddressable.
297if (end_value > 0 && end_value <= end_offset)
298*shadow_end = (s8)kAsanStackUseAfterScopeMagic;
299} else {
300// If necessary, mark few first bytes mapping to last shadow byte
301// as addressable
302if (end_value != 0)
303*shadow_end = Max(end_value, end_offset);
304}
305}
306
307void __asan_set_shadow_00(uptr addr, uptr size) {
308REAL(memset)((void *)addr, 0, size);
309}
310
311void __asan_set_shadow_01(uptr addr, uptr size) {
312REAL(memset)((void *)addr, 0x01, size);
313}
314
315void __asan_set_shadow_02(uptr addr, uptr size) {
316REAL(memset)((void *)addr, 0x02, size);
317}
318
319void __asan_set_shadow_03(uptr addr, uptr size) {
320REAL(memset)((void *)addr, 0x03, size);
321}
322
323void __asan_set_shadow_04(uptr addr, uptr size) {
324REAL(memset)((void *)addr, 0x04, size);
325}
326
327void __asan_set_shadow_05(uptr addr, uptr size) {
328REAL(memset)((void *)addr, 0x05, size);
329}
330
331void __asan_set_shadow_06(uptr addr, uptr size) {
332REAL(memset)((void *)addr, 0x06, size);
333}
334
335void __asan_set_shadow_07(uptr addr, uptr size) {
336REAL(memset)((void *)addr, 0x07, size);
337}
338
339void __asan_set_shadow_f1(uptr addr, uptr size) {
340REAL(memset)((void *)addr, 0xf1, size);
341}
342
343void __asan_set_shadow_f2(uptr addr, uptr size) {
344REAL(memset)((void *)addr, 0xf2, size);
345}
346
347void __asan_set_shadow_f3(uptr addr, uptr size) {
348REAL(memset)((void *)addr, 0xf3, size);
349}
350
351void __asan_set_shadow_f5(uptr addr, uptr size) {
352REAL(memset)((void *)addr, 0xf5, size);
353}
354
355void __asan_set_shadow_f8(uptr addr, uptr size) {
356REAL(memset)((void *)addr, 0xf8, size);
357}
358
359void __asan_poison_stack_memory(uptr addr, uptr size) {
360VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
361PoisonAlignedStackMemory(addr, size, true);
362}
363
364void __asan_unpoison_stack_memory(uptr addr, uptr size) {
365VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size);
366PoisonAlignedStackMemory(addr, size, false);
367}
368
369static void FixUnalignedStorage(uptr storage_beg, uptr storage_end,
370uptr &old_beg, uptr &old_end, uptr &new_beg,
371uptr &new_end) {
372constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
373if (UNLIKELY(!AddrIsAlignedByGranularity(storage_end))) {
374uptr end_down = RoundDownTo(storage_end, granularity);
375// Ignore the last unaligned granule if the storage is followed by
376// unpoisoned byte, because we can't poison the prefix anyway. Don't call
377// AddressIsPoisoned at all if container changes does not affect the last
378// granule at all.
379if ((((old_end != new_end) && Max(old_end, new_end) > end_down) ||
380((old_beg != new_beg) && Max(old_beg, new_beg) > end_down)) &&
381!AddressIsPoisoned(storage_end)) {
382old_beg = Min(end_down, old_beg);
383old_end = Min(end_down, old_end);
384new_beg = Min(end_down, new_beg);
385new_end = Min(end_down, new_end);
386}
387}
388
389// Handle misaligned begin and cut it off.
390if (UNLIKELY(!AddrIsAlignedByGranularity(storage_beg))) {
391uptr beg_up = RoundUpTo(storage_beg, granularity);
392// The first unaligned granule needs special handling only if we had bytes
393// there before and will have none after.
394if ((new_beg == new_end || new_beg >= beg_up) && old_beg != old_end &&
395old_beg < beg_up) {
396// Keep granule prefix outside of the storage unpoisoned.
397uptr beg_down = RoundDownTo(storage_beg, granularity);
398*(u8 *)MemToShadow(beg_down) = storage_beg - beg_down;
399old_beg = Max(beg_up, old_beg);
400old_end = Max(beg_up, old_end);
401new_beg = Max(beg_up, new_beg);
402new_end = Max(beg_up, new_end);
403}
404}
405}
406
407void __sanitizer_annotate_contiguous_container(const void *beg_p,
408const void *end_p,
409const void *old_mid_p,
410const void *new_mid_p) {
411if (!flags()->detect_container_overflow)
412return;
413VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
414new_mid_p);
415uptr storage_beg = reinterpret_cast<uptr>(beg_p);
416uptr storage_end = reinterpret_cast<uptr>(end_p);
417uptr old_end = reinterpret_cast<uptr>(old_mid_p);
418uptr new_end = reinterpret_cast<uptr>(new_mid_p);
419uptr old_beg = storage_beg;
420uptr new_beg = storage_beg;
421uptr granularity = ASAN_SHADOW_GRANULARITY;
422if (!(storage_beg <= old_end && storage_beg <= new_end &&
423old_end <= storage_end && new_end <= storage_end)) {
424GET_STACK_TRACE_FATAL_HERE;
425ReportBadParamsToAnnotateContiguousContainer(storage_beg, storage_end,
426old_end, new_end, &stack);
427}
428CHECK_LE(storage_end - storage_beg,
429FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
430
431if (old_end == new_end)
432return; // Nothing to do here.
433
434FixUnalignedStorage(storage_beg, storage_end, old_beg, old_end, new_beg,
435new_end);
436
437uptr a = RoundDownTo(Min(old_end, new_end), granularity);
438uptr c = RoundUpTo(Max(old_end, new_end), granularity);
439uptr d1 = RoundDownTo(old_end, granularity);
440// uptr d2 = RoundUpTo(old_mid, granularity);
441// Currently we should be in this state:
442// [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
443// Make a quick sanity check that we are indeed in this state.
444//
445// FIXME: Two of these three checks are disabled until we fix
446// https://github.com/google/sanitizers/issues/258.
447// if (d1 != d2)
448// DCHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
449//
450// NOTE: curly brackets for the "if" below to silence a MSVC warning.
451if (a + granularity <= d1) {
452DCHECK_EQ(*(u8 *)MemToShadow(a), 0);
453}
454// if (d2 + granularity <= c && c <= end)
455// DCHECK_EQ(*(u8 *)MemToShadow(c - granularity),
456// kAsanContiguousContainerOOBMagic);
457
458uptr b1 = RoundDownTo(new_end, granularity);
459uptr b2 = RoundUpTo(new_end, granularity);
460// New state:
461// [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good.
462if (b1 > a)
463PoisonShadow(a, b1 - a, 0);
464else if (c > b2)
465PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic);
466if (b1 != b2) {
467CHECK_EQ(b2 - b1, granularity);
468*(u8 *)MemToShadow(b1) = static_cast<u8>(new_end - b1);
469}
470}
471
472// Annotates a double ended contiguous memory area like std::deque's chunk.
473// It allows detecting buggy accesses to allocated but not used begining
474// or end items of such a container.
475void __sanitizer_annotate_double_ended_contiguous_container(
476const void *storage_beg_p, const void *storage_end_p,
477const void *old_container_beg_p, const void *old_container_end_p,
478const void *new_container_beg_p, const void *new_container_end_p) {
479if (!flags()->detect_container_overflow)
480return;
481
482VPrintf(2, "contiguous_container: %p %p %p %p %p %p\n", storage_beg_p,
483storage_end_p, old_container_beg_p, old_container_end_p,
484new_container_beg_p, new_container_end_p);
485
486uptr storage_beg = reinterpret_cast<uptr>(storage_beg_p);
487uptr storage_end = reinterpret_cast<uptr>(storage_end_p);
488uptr old_beg = reinterpret_cast<uptr>(old_container_beg_p);
489uptr old_end = reinterpret_cast<uptr>(old_container_end_p);
490uptr new_beg = reinterpret_cast<uptr>(new_container_beg_p);
491uptr new_end = reinterpret_cast<uptr>(new_container_end_p);
492
493constexpr uptr granularity = ASAN_SHADOW_GRANULARITY;
494
495if (!(old_beg <= old_end && new_beg <= new_end) ||
496!(storage_beg <= new_beg && new_end <= storage_end) ||
497!(storage_beg <= old_beg && old_end <= storage_end)) {
498GET_STACK_TRACE_FATAL_HERE;
499ReportBadParamsToAnnotateDoubleEndedContiguousContainer(
500storage_beg, storage_end, old_beg, old_end, new_beg, new_end, &stack);
501}
502CHECK_LE(storage_end - storage_beg,
503FIRST_32_SECOND_64(1UL << 30, 1ULL << 40)); // Sanity check.
504
505if ((old_beg == old_end && new_beg == new_end) ||
506(old_beg == new_beg && old_end == new_end))
507return; // Nothing to do here.
508
509FixUnalignedStorage(storage_beg, storage_end, old_beg, old_end, new_beg,
510new_end);
511
512// Handle non-intersecting new/old containers separately have simpler
513// intersecting case.
514if (old_beg == old_end || new_beg == new_end || new_end <= old_beg ||
515old_end <= new_beg) {
516if (old_beg != old_end) {
517// Poisoning the old container.
518uptr a = RoundDownTo(old_beg, granularity);
519uptr b = RoundUpTo(old_end, granularity);
520PoisonShadow(a, b - a, kAsanContiguousContainerOOBMagic);
521}
522
523if (new_beg != new_end) {
524// Unpoisoning the new container.
525uptr a = RoundDownTo(new_beg, granularity);
526uptr b = RoundDownTo(new_end, granularity);
527PoisonShadow(a, b - a, 0);
528if (!AddrIsAlignedByGranularity(new_end))
529*(u8 *)MemToShadow(b) = static_cast<u8>(new_end - b);
530}
531
532return;
533}
534
535// Intersection of old and new containers is not empty.
536CHECK_LT(new_beg, old_end);
537CHECK_GT(new_end, old_beg);
538
539if (new_beg < old_beg) {
540// Round down because we can't poison prefixes.
541uptr a = RoundDownTo(new_beg, granularity);
542// Round down and ignore the [c, old_beg) as its state defined by unchanged
543// [old_beg, old_end).
544uptr c = RoundDownTo(old_beg, granularity);
545PoisonShadow(a, c - a, 0);
546} else if (new_beg > old_beg) {
547// Round down and poison [a, old_beg) because it was unpoisoned only as a
548// prefix.
549uptr a = RoundDownTo(old_beg, granularity);
550// Round down and ignore the [c, new_beg) as its state defined by unchanged
551// [new_beg, old_end).
552uptr c = RoundDownTo(new_beg, granularity);
553
554PoisonShadow(a, c - a, kAsanContiguousContainerOOBMagic);
555}
556
557if (new_end > old_end) {
558// Round down to poison the prefix.
559uptr a = RoundDownTo(old_end, granularity);
560// Round down and handle remainder below.
561uptr c = RoundDownTo(new_end, granularity);
562PoisonShadow(a, c - a, 0);
563if (!AddrIsAlignedByGranularity(new_end))
564*(u8 *)MemToShadow(c) = static_cast<u8>(new_end - c);
565} else if (new_end < old_end) {
566// Round up and handle remained below.
567uptr a2 = RoundUpTo(new_end, granularity);
568// Round up to poison entire granule as we had nothing in [old_end, c2).
569uptr c2 = RoundUpTo(old_end, granularity);
570PoisonShadow(a2, c2 - a2, kAsanContiguousContainerOOBMagic);
571
572if (!AddrIsAlignedByGranularity(new_end)) {
573uptr a = RoundDownTo(new_end, granularity);
574*(u8 *)MemToShadow(a) = static_cast<u8>(new_end - a);
575}
576}
577}
578
579static const void *FindBadAddress(uptr begin, uptr end, bool poisoned) {
580CHECK_LE(begin, end);
581constexpr uptr kMaxRangeToCheck = 32;
582if (end - begin > kMaxRangeToCheck * 2) {
583if (auto *bad = FindBadAddress(begin, begin + kMaxRangeToCheck, poisoned))
584return bad;
585if (auto *bad = FindBadAddress(end - kMaxRangeToCheck, end, poisoned))
586return bad;
587}
588
589for (uptr i = begin; i < end; ++i)
590if (AddressIsPoisoned(i) != poisoned)
591return reinterpret_cast<const void *>(i);
592return nullptr;
593}
594
595const void *__sanitizer_contiguous_container_find_bad_address(
596const void *beg_p, const void *mid_p, const void *end_p) {
597if (!flags()->detect_container_overflow)
598return nullptr;
599uptr granularity = ASAN_SHADOW_GRANULARITY;
600uptr beg = reinterpret_cast<uptr>(beg_p);
601uptr end = reinterpret_cast<uptr>(end_p);
602uptr mid = reinterpret_cast<uptr>(mid_p);
603CHECK_LE(beg, mid);
604CHECK_LE(mid, end);
605// If the byte after the storage is unpoisoned, everything in the granule
606// before must stay unpoisoned.
607uptr annotations_end =
608(!AddrIsAlignedByGranularity(end) && !AddressIsPoisoned(end))
609? RoundDownTo(end, granularity)
610: end;
611beg = Min(beg, annotations_end);
612mid = Min(mid, annotations_end);
613if (auto *bad = FindBadAddress(beg, mid, false))
614return bad;
615if (auto *bad = FindBadAddress(mid, annotations_end, true))
616return bad;
617return FindBadAddress(annotations_end, end, false);
618}
619
620int __sanitizer_verify_contiguous_container(const void *beg_p,
621const void *mid_p,
622const void *end_p) {
623return __sanitizer_contiguous_container_find_bad_address(beg_p, mid_p,
624end_p) == nullptr;
625}
626
627const void *__sanitizer_double_ended_contiguous_container_find_bad_address(
628const void *storage_beg_p, const void *container_beg_p,
629const void *container_end_p, const void *storage_end_p) {
630if (!flags()->detect_container_overflow)
631return nullptr;
632uptr granularity = ASAN_SHADOW_GRANULARITY;
633uptr storage_beg = reinterpret_cast<uptr>(storage_beg_p);
634uptr storage_end = reinterpret_cast<uptr>(storage_end_p);
635uptr beg = reinterpret_cast<uptr>(container_beg_p);
636uptr end = reinterpret_cast<uptr>(container_end_p);
637
638// The prefix of the firs granule of the container is unpoisoned.
639if (beg != end)
640beg = Max(storage_beg, RoundDownTo(beg, granularity));
641
642// If the byte after the storage is unpoisoned, the prefix of the last granule
643// is unpoisoned.
644uptr annotations_end = (!AddrIsAlignedByGranularity(storage_end) &&
645!AddressIsPoisoned(storage_end))
646? RoundDownTo(storage_end, granularity)
647: storage_end;
648storage_beg = Min(storage_beg, annotations_end);
649beg = Min(beg, annotations_end);
650end = Min(end, annotations_end);
651
652if (auto *bad = FindBadAddress(storage_beg, beg, true))
653return bad;
654if (auto *bad = FindBadAddress(beg, end, false))
655return bad;
656if (auto *bad = FindBadAddress(end, annotations_end, true))
657return bad;
658return FindBadAddress(annotations_end, storage_end, false);
659}
660
661int __sanitizer_verify_double_ended_contiguous_container(
662const void *storage_beg_p, const void *container_beg_p,
663const void *container_end_p, const void *storage_end_p) {
664return __sanitizer_double_ended_contiguous_container_find_bad_address(
665storage_beg_p, container_beg_p, container_end_p, storage_end_p) ==
666nullptr;
667}
668
669extern "C" SANITIZER_INTERFACE_ATTRIBUTE
670void __asan_poison_intra_object_redzone(uptr ptr, uptr size) {
671AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, true);
672}
673
674extern "C" SANITIZER_INTERFACE_ATTRIBUTE
675void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) {
676AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, false);
677}
678
679// --- Implementation of LSan-specific functions --- {{{1
680namespace __lsan {
681bool WordIsPoisoned(uptr addr) {
682return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0);
683}
684}
685