llvm-project
1457 строк · 46.3 Кб
1//===-- sanitizer_mac.cpp -------------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file is shared between various sanitizers' runtime libraries and
10// implements OSX-specific functions.
11//===----------------------------------------------------------------------===//
12
13#include "sanitizer_platform.h"
14#if SANITIZER_APPLE
15# include "interception/interception.h"
16# include "sanitizer_mac.h"
17
18// Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so
19// the clients will most certainly use 64-bit ones as well.
20# ifndef _DARWIN_USE_64_BIT_INODE
21# define _DARWIN_USE_64_BIT_INODE 1
22# endif
23# include <stdio.h>
24
25# include "sanitizer_common.h"
26# include "sanitizer_file.h"
27# include "sanitizer_flags.h"
28# include "sanitizer_interface_internal.h"
29# include "sanitizer_internal_defs.h"
30# include "sanitizer_libc.h"
31# include "sanitizer_platform_limits_posix.h"
32# include "sanitizer_procmaps.h"
33# include "sanitizer_ptrauth.h"
34
35# if !SANITIZER_IOS
36# include <crt_externs.h> // for _NSGetEnviron
37# else
38extern char **environ;
39# endif
40
41# if defined(__has_include) && __has_include(<os/trace.h>)
42# define SANITIZER_OS_TRACE 1
43# include <os/trace.h>
44# else
45# define SANITIZER_OS_TRACE 0
46# endif
47
48// import new crash reporting api
49# if defined(__has_include) && __has_include(<CrashReporterClient.h>)
50# define HAVE_CRASHREPORTERCLIENT_H 1
51# include <CrashReporterClient.h>
52# else
53# define HAVE_CRASHREPORTERCLIENT_H 0
54# endif
55
56# if !SANITIZER_IOS
57# include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
58# else
59extern "C" {
60extern char ***_NSGetArgv(void);
61}
62# endif
63
64# include <asl.h>
65# include <dlfcn.h> // for dladdr()
66# include <errno.h>
67# include <fcntl.h>
68# include <libkern/OSAtomic.h>
69# include <mach-o/dyld.h>
70# include <mach/mach.h>
71# include <mach/mach_time.h>
72# include <mach/vm_statistics.h>
73# include <malloc/malloc.h>
74# include <os/log.h>
75# include <pthread.h>
76# include <pthread/introspection.h>
77# include <sched.h>
78# include <signal.h>
79# include <spawn.h>
80# include <stdlib.h>
81# include <sys/ioctl.h>
82# include <sys/mman.h>
83# include <sys/resource.h>
84# include <sys/stat.h>
85# include <sys/sysctl.h>
86# include <sys/types.h>
87# include <sys/wait.h>
88# include <unistd.h>
89# include <util.h>
90
91// From <crt_externs.h>, but we don't have that file on iOS.
92extern "C" {
93extern char ***_NSGetArgv(void);
94extern char ***_NSGetEnviron(void);
95}
96
97// From <mach/mach_vm.h>, but we don't have that file on iOS.
98extern "C" {
99extern kern_return_t mach_vm_region_recurse(
100vm_map_t target_task,
101mach_vm_address_t *address,
102mach_vm_size_t *size,
103natural_t *nesting_depth,
104vm_region_recurse_info_t info,
105mach_msg_type_number_t *infoCnt);
106}
107
108namespace __sanitizer {
109
110#include "sanitizer_syscall_generic.inc"
111
112// Direct syscalls, don't call libmalloc hooks (but not available on 10.6).
113extern "C" void *__mmap(void *addr, size_t len, int prot, int flags, int fildes,
114off_t off) SANITIZER_WEAK_ATTRIBUTE;
115extern "C" int __munmap(void *, size_t) SANITIZER_WEAK_ATTRIBUTE;
116
117// ---------------------- sanitizer_libc.h
118
119// From <mach/vm_statistics.h>, but not on older OSs.
120#ifndef VM_MEMORY_SANITIZER
121#define VM_MEMORY_SANITIZER 99
122#endif
123
124// XNU on Darwin provides a mmap flag that optimizes allocation/deallocation of
125// giant memory regions (i.e. shadow memory regions).
126#define kXnuFastMmapFd 0x4
127static size_t kXnuFastMmapThreshold = 2 << 30; // 2 GB
128static bool use_xnu_fast_mmap = false;
129
130uptr internal_mmap(void *addr, size_t length, int prot, int flags,
131int fd, u64 offset) {
132if (fd == -1) {
133fd = VM_MAKE_TAG(VM_MEMORY_SANITIZER);
134if (length >= kXnuFastMmapThreshold) {
135if (use_xnu_fast_mmap) fd |= kXnuFastMmapFd;
136}
137}
138if (&__mmap) return (uptr)__mmap(addr, length, prot, flags, fd, offset);
139return (uptr)mmap(addr, length, prot, flags, fd, offset);
140}
141
142uptr internal_munmap(void *addr, uptr length) {
143if (&__munmap) return __munmap(addr, length);
144return munmap(addr, length);
145}
146
147uptr internal_mremap(void *old_address, uptr old_size, uptr new_size, int flags,
148void *new_address) {
149CHECK(false && "internal_mremap is unimplemented on Mac");
150return 0;
151}
152
153int internal_mprotect(void *addr, uptr length, int prot) {
154return mprotect(addr, length, prot);
155}
156
157int internal_madvise(uptr addr, uptr length, int advice) {
158return madvise((void *)addr, length, advice);
159}
160
161uptr internal_close(fd_t fd) {
162return close(fd);
163}
164
165uptr internal_open(const char *filename, int flags) {
166return open(filename, flags);
167}
168
169uptr internal_open(const char *filename, int flags, u32 mode) {
170return open(filename, flags, mode);
171}
172
173uptr internal_read(fd_t fd, void *buf, uptr count) {
174return read(fd, buf, count);
175}
176
177uptr internal_write(fd_t fd, const void *buf, uptr count) {
178return write(fd, buf, count);
179}
180
181uptr internal_stat(const char *path, void *buf) {
182return stat(path, (struct stat *)buf);
183}
184
185uptr internal_lstat(const char *path, void *buf) {
186return lstat(path, (struct stat *)buf);
187}
188
189uptr internal_fstat(fd_t fd, void *buf) {
190return fstat(fd, (struct stat *)buf);
191}
192
193uptr internal_filesize(fd_t fd) {
194struct stat st;
195if (internal_fstat(fd, &st))
196return -1;
197return (uptr)st.st_size;
198}
199
200uptr internal_dup(int oldfd) {
201return dup(oldfd);
202}
203
204uptr internal_dup2(int oldfd, int newfd) {
205return dup2(oldfd, newfd);
206}
207
208uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
209return readlink(path, buf, bufsize);
210}
211
212uptr internal_unlink(const char *path) {
213return unlink(path);
214}
215
216uptr internal_sched_yield() {
217return sched_yield();
218}
219
220void internal__exit(int exitcode) {
221_exit(exitcode);
222}
223
224void internal_usleep(u64 useconds) { usleep(useconds); }
225
226uptr internal_getpid() {
227return getpid();
228}
229
230int internal_dlinfo(void *handle, int request, void *p) {
231UNIMPLEMENTED();
232}
233
234int internal_sigaction(int signum, const void *act, void *oldact) {
235return sigaction(signum,
236(const struct sigaction *)act, (struct sigaction *)oldact);
237}
238
239void internal_sigfillset(__sanitizer_sigset_t *set) { sigfillset(set); }
240
241uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
242__sanitizer_sigset_t *oldset) {
243// Don't use sigprocmask here, because it affects all threads.
244return pthread_sigmask(how, set, oldset);
245}
246
247// Doesn't call pthread_atfork() handlers (but not available on 10.6).
248extern "C" pid_t __fork(void) SANITIZER_WEAK_ATTRIBUTE;
249
250int internal_fork() {
251if (&__fork)
252return __fork();
253return fork();
254}
255
256int internal_sysctl(const int *name, unsigned int namelen, void *oldp,
257uptr *oldlenp, const void *newp, uptr newlen) {
258return sysctl(const_cast<int *>(name), namelen, oldp, (size_t *)oldlenp,
259const_cast<void *>(newp), (size_t)newlen);
260}
261
262int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
263const void *newp, uptr newlen) {
264return sysctlbyname(sname, oldp, (size_t *)oldlenp, const_cast<void *>(newp),
265(size_t)newlen);
266}
267
268static fd_t internal_spawn_impl(const char *argv[], const char *envp[],
269pid_t *pid) {
270fd_t primary_fd = kInvalidFd;
271fd_t secondary_fd = kInvalidFd;
272
273auto fd_closer = at_scope_exit([&] {
274internal_close(primary_fd);
275internal_close(secondary_fd);
276});
277
278// We need a new pseudoterminal to avoid buffering problems. The 'atos' tool
279// in particular detects when it's talking to a pipe and forgets to flush the
280// output stream after sending a response.
281primary_fd = posix_openpt(O_RDWR);
282if (primary_fd == kInvalidFd)
283return kInvalidFd;
284
285int res = grantpt(primary_fd) || unlockpt(primary_fd);
286if (res != 0) return kInvalidFd;
287
288// Use TIOCPTYGNAME instead of ptsname() to avoid threading problems.
289char secondary_pty_name[128];
290res = ioctl(primary_fd, TIOCPTYGNAME, secondary_pty_name);
291if (res == -1) return kInvalidFd;
292
293secondary_fd = internal_open(secondary_pty_name, O_RDWR);
294if (secondary_fd == kInvalidFd)
295return kInvalidFd;
296
297// File descriptor actions
298posix_spawn_file_actions_t acts;
299res = posix_spawn_file_actions_init(&acts);
300if (res != 0) return kInvalidFd;
301
302auto acts_cleanup = at_scope_exit([&] {
303posix_spawn_file_actions_destroy(&acts);
304});
305
306res = posix_spawn_file_actions_adddup2(&acts, secondary_fd, STDIN_FILENO) ||
307posix_spawn_file_actions_adddup2(&acts, secondary_fd, STDOUT_FILENO) ||
308posix_spawn_file_actions_addclose(&acts, secondary_fd);
309if (res != 0) return kInvalidFd;
310
311// Spawn attributes
312posix_spawnattr_t attrs;
313res = posix_spawnattr_init(&attrs);
314if (res != 0) return kInvalidFd;
315
316auto attrs_cleanup = at_scope_exit([&] {
317posix_spawnattr_destroy(&attrs);
318});
319
320// In the spawned process, close all file descriptors that are not explicitly
321// described by the file actions object. This is Darwin-specific extension.
322res = posix_spawnattr_setflags(&attrs, POSIX_SPAWN_CLOEXEC_DEFAULT);
323if (res != 0) return kInvalidFd;
324
325// posix_spawn
326char **argv_casted = const_cast<char **>(argv);
327char **envp_casted = const_cast<char **>(envp);
328res = posix_spawn(pid, argv[0], &acts, &attrs, argv_casted, envp_casted);
329if (res != 0) return kInvalidFd;
330
331// Disable echo in the new terminal, disable CR.
332struct termios termflags;
333tcgetattr(primary_fd, &termflags);
334termflags.c_oflag &= ~ONLCR;
335termflags.c_lflag &= ~ECHO;
336tcsetattr(primary_fd, TCSANOW, &termflags);
337
338// On success, do not close primary_fd on scope exit.
339fd_t fd = primary_fd;
340primary_fd = kInvalidFd;
341
342return fd;
343}
344
345fd_t internal_spawn(const char *argv[], const char *envp[], pid_t *pid) {
346// The client program may close its stdin and/or stdout and/or stderr thus
347// allowing open/posix_openpt to reuse file descriptors 0, 1 or 2. In this
348// case the communication is broken if either the parent or the child tries to
349// close or duplicate these descriptors. We temporarily reserve these
350// descriptors here to prevent this.
351fd_t low_fds[3];
352size_t count = 0;
353
354for (; count < 3; count++) {
355low_fds[count] = posix_openpt(O_RDWR);
356if (low_fds[count] >= STDERR_FILENO)
357break;
358}
359
360fd_t fd = internal_spawn_impl(argv, envp, pid);
361
362for (; count > 0; count--) {
363internal_close(low_fds[count]);
364}
365
366return fd;
367}
368
369uptr internal_rename(const char *oldpath, const char *newpath) {
370return rename(oldpath, newpath);
371}
372
373uptr internal_ftruncate(fd_t fd, uptr size) {
374return ftruncate(fd, size);
375}
376
377uptr internal_execve(const char *filename, char *const argv[],
378char *const envp[]) {
379return execve(filename, argv, envp);
380}
381
382uptr internal_waitpid(int pid, int *status, int options) {
383return waitpid(pid, status, options);
384}
385
386// ----------------- sanitizer_common.h
387bool FileExists(const char *filename) {
388if (ShouldMockFailureToOpen(filename))
389return false;
390struct stat st;
391if (stat(filename, &st))
392return false;
393// Sanity check: filename is a regular file.
394return S_ISREG(st.st_mode);
395}
396
397bool DirExists(const char *path) {
398struct stat st;
399if (stat(path, &st))
400return false;
401return S_ISDIR(st.st_mode);
402}
403
404tid_t GetTid() {
405tid_t tid;
406pthread_threadid_np(nullptr, &tid);
407return tid;
408}
409
410void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
411uptr *stack_bottom) {
412CHECK(stack_top);
413CHECK(stack_bottom);
414uptr stacksize = pthread_get_stacksize_np(pthread_self());
415// pthread_get_stacksize_np() returns an incorrect stack size for the main
416// thread on Mavericks. See
417// https://github.com/google/sanitizers/issues/261
418if ((GetMacosAlignedVersion() >= MacosVersion(10, 9)) && at_initialization &&
419stacksize == (1 << 19)) {
420struct rlimit rl;
421CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
422// Most often rl.rlim_cur will be the desired 8M.
423if (rl.rlim_cur < kMaxThreadStackSize) {
424stacksize = rl.rlim_cur;
425} else {
426stacksize = kMaxThreadStackSize;
427}
428}
429void *stackaddr = pthread_get_stackaddr_np(pthread_self());
430*stack_top = (uptr)stackaddr;
431*stack_bottom = *stack_top - stacksize;
432}
433
434char **GetEnviron() {
435#if !SANITIZER_IOS
436char ***env_ptr = _NSGetEnviron();
437if (!env_ptr) {
438Report("_NSGetEnviron() returned NULL. Please make sure __asan_init() is "
439"called after libSystem_initializer().\n");
440CHECK(env_ptr);
441}
442char **environ = *env_ptr;
443#endif
444CHECK(environ);
445return environ;
446}
447
448const char *GetEnv(const char *name) {
449char **env = GetEnviron();
450uptr name_len = internal_strlen(name);
451while (*env != 0) {
452uptr len = internal_strlen(*env);
453if (len > name_len) {
454const char *p = *env;
455if (!internal_memcmp(p, name, name_len) &&
456p[name_len] == '=') { // Match.
457return *env + name_len + 1; // String starting after =.
458}
459}
460env++;
461}
462return 0;
463}
464
465uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
466CHECK_LE(kMaxPathLength, buf_len);
467
468// On OS X the executable path is saved to the stack by dyld. Reading it
469// from there is much faster than calling dladdr, especially for large
470// binaries with symbols.
471InternalMmapVector<char> exe_path(kMaxPathLength);
472uint32_t size = exe_path.size();
473if (_NSGetExecutablePath(exe_path.data(), &size) == 0 &&
474realpath(exe_path.data(), buf) != 0) {
475return internal_strlen(buf);
476}
477return 0;
478}
479
480uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) {
481return ReadBinaryName(buf, buf_len);
482}
483
484void ReExec() {
485UNIMPLEMENTED();
486}
487
488void CheckASLR() {
489// Do nothing
490}
491
492void CheckMPROTECT() {
493// Do nothing
494}
495
496uptr GetPageSize() {
497return sysconf(_SC_PAGESIZE);
498}
499
500extern "C" unsigned malloc_num_zones;
501extern "C" malloc_zone_t **malloc_zones;
502malloc_zone_t sanitizer_zone;
503
504// We need to make sure that sanitizer_zone is registered as malloc_zones[0]. If
505// libmalloc tries to set up a different zone as malloc_zones[0], it will call
506// mprotect(malloc_zones, ..., PROT_READ). This interceptor will catch that and
507// make sure we are still the first (default) zone.
508void MprotectMallocZones(void *addr, int prot) {
509if (addr == malloc_zones && prot == PROT_READ) {
510if (malloc_num_zones > 1 && malloc_zones[0] != &sanitizer_zone) {
511for (unsigned i = 1; i < malloc_num_zones; i++) {
512if (malloc_zones[i] == &sanitizer_zone) {
513// Swap malloc_zones[0] and malloc_zones[i].
514malloc_zones[i] = malloc_zones[0];
515malloc_zones[0] = &sanitizer_zone;
516break;
517}
518}
519}
520}
521}
522
523void FutexWait(atomic_uint32_t *p, u32 cmp) {
524// FIXME: implement actual blocking.
525sched_yield();
526}
527
528void FutexWake(atomic_uint32_t *p, u32 count) {}
529
530u64 NanoTime() {
531timeval tv;
532internal_memset(&tv, 0, sizeof(tv));
533gettimeofday(&tv, 0);
534return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;
535}
536
537// This needs to be called during initialization to avoid being racy.
538u64 MonotonicNanoTime() {
539static mach_timebase_info_data_t timebase_info;
540if (timebase_info.denom == 0) mach_timebase_info(&timebase_info);
541return (mach_absolute_time() * timebase_info.numer) / timebase_info.denom;
542}
543
544uptr GetTlsSize() {
545return 0;
546}
547
548void InitTlsSize() {
549}
550
551uptr TlsBaseAddr() {
552uptr segbase = 0;
553#if defined(__x86_64__)
554asm("movq %%gs:0,%0" : "=r"(segbase));
555#elif defined(__i386__)
556asm("movl %%gs:0,%0" : "=r"(segbase));
557#elif defined(__aarch64__)
558asm("mrs %x0, tpidrro_el0" : "=r"(segbase));
559segbase &= 0x07ul; // clearing lower bits, cpu id stored there
560#endif
561return segbase;
562}
563
564// The size of the tls on darwin does not appear to be well documented,
565// however the vm memory map suggests that it is 1024 uptrs in size,
566// with a size of 0x2000 bytes on x86_64 and 0x1000 bytes on i386.
567uptr TlsSize() {
568#if defined(__x86_64__) || defined(__i386__)
569return 1024 * sizeof(uptr);
570#else
571return 0;
572#endif
573}
574
575void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
576uptr *tls_addr, uptr *tls_size) {
577#if !SANITIZER_GO
578uptr stack_top, stack_bottom;
579GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
580*stk_addr = stack_bottom;
581*stk_size = stack_top - stack_bottom;
582*tls_addr = TlsBaseAddr();
583*tls_size = TlsSize();
584#else
585*stk_addr = 0;
586*stk_size = 0;
587*tls_addr = 0;
588*tls_size = 0;
589#endif
590}
591
592void ListOfModules::init() {
593clearOrInit();
594MemoryMappingLayout memory_mapping(false);
595memory_mapping.DumpListOfModules(&modules_);
596}
597
598void ListOfModules::fallbackInit() { clear(); }
599
600static HandleSignalMode GetHandleSignalModeImpl(int signum) {
601switch (signum) {
602case SIGABRT:
603return common_flags()->handle_abort;
604case SIGILL:
605return common_flags()->handle_sigill;
606case SIGTRAP:
607return common_flags()->handle_sigtrap;
608case SIGFPE:
609return common_flags()->handle_sigfpe;
610case SIGSEGV:
611return common_flags()->handle_segv;
612case SIGBUS:
613return common_flags()->handle_sigbus;
614}
615return kHandleSignalNo;
616}
617
618HandleSignalMode GetHandleSignalMode(int signum) {
619// Handling fatal signals on watchOS and tvOS devices is disallowed.
620if ((SANITIZER_WATCHOS || SANITIZER_TVOS) && !(SANITIZER_IOSSIM))
621return kHandleSignalNo;
622HandleSignalMode result = GetHandleSignalModeImpl(signum);
623if (result == kHandleSignalYes && !common_flags()->allow_user_segv_handler)
624return kHandleSignalExclusive;
625return result;
626}
627
628// Offset example:
629// XNU 17 -- macOS 10.13 -- iOS 11 -- tvOS 11 -- watchOS 4
630constexpr u16 GetOSMajorKernelOffset() {
631if (TARGET_OS_OSX) return 4;
632if (TARGET_OS_IOS || TARGET_OS_TV) return 6;
633if (TARGET_OS_WATCH) return 13;
634}
635
636using VersStr = char[64];
637
638static uptr ApproximateOSVersionViaKernelVersion(VersStr vers) {
639u16 kernel_major = GetDarwinKernelVersion().major;
640u16 offset = GetOSMajorKernelOffset();
641CHECK_GE(kernel_major, offset);
642u16 os_major = kernel_major - offset;
643
644const char *format = "%d.0";
645if (TARGET_OS_OSX) {
646if (os_major >= 16) { // macOS 11+
647os_major -= 5;
648} else { // macOS 10.15 and below
649format = "10.%d";
650}
651}
652return internal_snprintf(vers, sizeof(VersStr), format, os_major);
653}
654
655static void GetOSVersion(VersStr vers) {
656uptr len = sizeof(VersStr);
657if (SANITIZER_IOSSIM) {
658const char *vers_env = GetEnv("SIMULATOR_RUNTIME_VERSION");
659if (!vers_env) {
660Report("ERROR: Running in simulator but SIMULATOR_RUNTIME_VERSION env "
661"var is not set.\n");
662Die();
663}
664len = internal_strlcpy(vers, vers_env, len);
665} else {
666int res =
667internal_sysctlbyname("kern.osproductversion", vers, &len, nullptr, 0);
668
669// XNU 17 (macOS 10.13) and below do not provide the sysctl
670// `kern.osproductversion` entry (res != 0).
671bool no_os_version = res != 0;
672
673// For launchd, sanitizer initialization runs before sysctl is setup
674// (res == 0 && len != strlen(vers), vers is not a valid version). However,
675// the kernel version `kern.osrelease` is available.
676bool launchd = (res == 0 && internal_strlen(vers) < 3);
677if (launchd) CHECK_EQ(internal_getpid(), 1);
678
679if (no_os_version || launchd) {
680len = ApproximateOSVersionViaKernelVersion(vers);
681}
682}
683CHECK_LT(len, sizeof(VersStr));
684}
685
686void ParseVersion(const char *vers, u16 *major, u16 *minor) {
687// Format: <major>.<minor>[.<patch>]\0
688CHECK_GE(internal_strlen(vers), 3);
689const char *p = vers;
690*major = internal_simple_strtoll(p, &p, /*base=*/10);
691CHECK_EQ(*p, '.');
692p += 1;
693*minor = internal_simple_strtoll(p, &p, /*base=*/10);
694}
695
696// Aligned versions example:
697// macOS 10.15 -- iOS 13 -- tvOS 13 -- watchOS 6
698static void MapToMacos(u16 *major, u16 *minor) {
699if (TARGET_OS_OSX)
700return;
701
702if (TARGET_OS_IOS || TARGET_OS_TV)
703*major += 2;
704else if (TARGET_OS_WATCH)
705*major += 9;
706else
707UNREACHABLE("unsupported platform");
708
709if (*major >= 16) { // macOS 11+
710*major -= 5;
711} else { // macOS 10.15 and below
712*minor = *major;
713*major = 10;
714}
715}
716
717static MacosVersion GetMacosAlignedVersionInternal() {
718VersStr vers = {};
719GetOSVersion(vers);
720
721u16 major, minor;
722ParseVersion(vers, &major, &minor);
723MapToMacos(&major, &minor);
724
725return MacosVersion(major, minor);
726}
727
728static_assert(sizeof(MacosVersion) == sizeof(atomic_uint32_t::Type),
729"MacosVersion cache size");
730static atomic_uint32_t cached_macos_version;
731
732MacosVersion GetMacosAlignedVersion() {
733atomic_uint32_t::Type result =
734atomic_load(&cached_macos_version, memory_order_acquire);
735if (!result) {
736MacosVersion version = GetMacosAlignedVersionInternal();
737result = *reinterpret_cast<atomic_uint32_t::Type *>(&version);
738atomic_store(&cached_macos_version, result, memory_order_release);
739}
740return *reinterpret_cast<MacosVersion *>(&result);
741}
742
743DarwinKernelVersion GetDarwinKernelVersion() {
744VersStr vers = {};
745uptr len = sizeof(VersStr);
746int res = internal_sysctlbyname("kern.osrelease", vers, &len, nullptr, 0);
747CHECK_EQ(res, 0);
748CHECK_LT(len, sizeof(VersStr));
749
750u16 major, minor;
751ParseVersion(vers, &major, &minor);
752
753return DarwinKernelVersion(major, minor);
754}
755
756uptr GetRSS() {
757struct task_basic_info info;
758unsigned count = TASK_BASIC_INFO_COUNT;
759kern_return_t result =
760task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)&info, &count);
761if (UNLIKELY(result != KERN_SUCCESS)) {
762Report("Cannot get task info. Error: %d\n", result);
763Die();
764}
765return info.resident_size;
766}
767
768void *internal_start_thread(void *(*func)(void *arg), void *arg) {
769// Start the thread with signals blocked, otherwise it can steal user signals.
770__sanitizer_sigset_t set, old;
771internal_sigfillset(&set);
772internal_sigprocmask(SIG_SETMASK, &set, &old);
773pthread_t th;
774pthread_create(&th, 0, func, arg);
775internal_sigprocmask(SIG_SETMASK, &old, 0);
776return th;
777}
778
779void internal_join_thread(void *th) { pthread_join((pthread_t)th, 0); }
780
781#if !SANITIZER_GO
782static Mutex syslog_lock;
783# endif
784
785void WriteOneLineToSyslog(const char *s) {
786#if !SANITIZER_GO
787syslog_lock.CheckLocked();
788if (GetMacosAlignedVersion() >= MacosVersion(10, 12)) {
789os_log_error(OS_LOG_DEFAULT, "%{public}s", s);
790} else {
791asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", s);
792}
793#endif
794}
795
796// buffer to store crash report application information
797static char crashreporter_info_buff[__sanitizer::kErrorMessageBufferSize] = {};
798static Mutex crashreporter_info_mutex;
799
800extern "C" {
801// Integrate with crash reporter libraries.
802#if HAVE_CRASHREPORTERCLIENT_H
803CRASH_REPORTER_CLIENT_HIDDEN
804struct crashreporter_annotations_t gCRAnnotations
805__attribute__((section("__DATA," CRASHREPORTER_ANNOTATIONS_SECTION))) = {
806CRASHREPORTER_ANNOTATIONS_VERSION,
8070,
8080,
8090,
8100,
8110,
8120,
813#if CRASHREPORTER_ANNOTATIONS_VERSION > 4
8140,
815#endif
816};
817
818#else
819// fall back to old crashreporter api
820static const char *__crashreporter_info__ __attribute__((__used__)) =
821&crashreporter_info_buff[0];
822asm(".desc ___crashreporter_info__, 0x10");
823#endif
824
825} // extern "C"
826
827static void CRAppendCrashLogMessage(const char *msg) {
828Lock l(&crashreporter_info_mutex);
829internal_strlcat(crashreporter_info_buff, msg,
830sizeof(crashreporter_info_buff));
831#if HAVE_CRASHREPORTERCLIENT_H
832(void)CRSetCrashLogMessage(crashreporter_info_buff);
833#endif
834}
835
836void LogMessageOnPrintf(const char *str) {
837// Log all printf output to CrashLog.
838if (common_flags()->abort_on_error)
839CRAppendCrashLogMessage(str);
840}
841
842void LogFullErrorReport(const char *buffer) {
843#if !SANITIZER_GO
844// Log with os_trace. This will make it into the crash log.
845#if SANITIZER_OS_TRACE
846if (GetMacosAlignedVersion() >= MacosVersion(10, 10)) {
847// os_trace requires the message (format parameter) to be a string literal.
848if (internal_strncmp(SanitizerToolName, "AddressSanitizer",
849sizeof("AddressSanitizer") - 1) == 0)
850os_trace("Address Sanitizer reported a failure.");
851else if (internal_strncmp(SanitizerToolName, "UndefinedBehaviorSanitizer",
852sizeof("UndefinedBehaviorSanitizer") - 1) == 0)
853os_trace("Undefined Behavior Sanitizer reported a failure.");
854else if (internal_strncmp(SanitizerToolName, "ThreadSanitizer",
855sizeof("ThreadSanitizer") - 1) == 0)
856os_trace("Thread Sanitizer reported a failure.");
857else
858os_trace("Sanitizer tool reported a failure.");
859
860if (common_flags()->log_to_syslog)
861os_trace("Consult syslog for more information.");
862}
863#endif
864
865// Log to syslog.
866// The logging on OS X may call pthread_create so we need the threading
867// environment to be fully initialized. Also, this should never be called when
868// holding the thread registry lock since that may result in a deadlock. If
869// the reporting thread holds the thread registry mutex, and asl_log waits
870// for GCD to dispatch a new thread, the process will deadlock, because the
871// pthread_create wrapper needs to acquire the lock as well.
872Lock l(&syslog_lock);
873if (common_flags()->log_to_syslog)
874WriteToSyslog(buffer);
875
876// The report is added to CrashLog as part of logging all of Printf output.
877#endif
878}
879
880SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
881#if defined(__x86_64__) || defined(__i386__)
882ucontext_t *ucontext = static_cast<ucontext_t*>(context);
883return ucontext->uc_mcontext->__es.__err & 2 /*T_PF_WRITE*/ ? Write : Read;
884#elif defined(__arm64__)
885ucontext_t *ucontext = static_cast<ucontext_t*>(context);
886return ucontext->uc_mcontext->__es.__esr & 0x40 /*ISS_DA_WNR*/ ? Write : Read;
887#else
888return Unknown;
889#endif
890}
891
892bool SignalContext::IsTrueFaultingAddress() const {
893auto si = static_cast<const siginfo_t *>(siginfo);
894// "Real" SIGSEGV codes (e.g., SEGV_MAPERR, SEGV_MAPERR) are non-zero.
895return si->si_signo == SIGSEGV && si->si_code != 0;
896}
897
898#if defined(__aarch64__) && defined(arm_thread_state64_get_sp)
899#define AARCH64_GET_REG(r) \
900(uptr)ptrauth_strip( \
901(void *)arm_thread_state64_get_##r(ucontext->uc_mcontext->__ss), 0)
902#else
903#define AARCH64_GET_REG(r) (uptr)ucontext->uc_mcontext->__ss.__##r
904#endif
905
906static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
907ucontext_t *ucontext = (ucontext_t*)context;
908# if defined(__aarch64__)
909*pc = AARCH64_GET_REG(pc);
910*bp = AARCH64_GET_REG(fp);
911*sp = AARCH64_GET_REG(sp);
912# elif defined(__x86_64__)
913*pc = ucontext->uc_mcontext->__ss.__rip;
914*bp = ucontext->uc_mcontext->__ss.__rbp;
915*sp = ucontext->uc_mcontext->__ss.__rsp;
916# elif defined(__arm__)
917*pc = ucontext->uc_mcontext->__ss.__pc;
918*bp = ucontext->uc_mcontext->__ss.__r[7];
919*sp = ucontext->uc_mcontext->__ss.__sp;
920# elif defined(__i386__)
921*pc = ucontext->uc_mcontext->__ss.__eip;
922*bp = ucontext->uc_mcontext->__ss.__ebp;
923*sp = ucontext->uc_mcontext->__ss.__esp;
924# else
925# error "Unknown architecture"
926# endif
927}
928
929void SignalContext::InitPcSpBp() {
930addr = (uptr)ptrauth_strip((void *)addr, 0);
931GetPcSpBp(context, &pc, &sp, &bp);
932}
933
934// ASan/TSan use mmap in a way that creates “deallocation gaps” which triggers
935// EXC_GUARD exceptions on macOS 10.15+ (XNU 19.0+).
936static void DisableMmapExcGuardExceptions() {
937using task_exc_guard_behavior_t = uint32_t;
938using task_set_exc_guard_behavior_t =
939kern_return_t(task_t task, task_exc_guard_behavior_t behavior);
940auto *set_behavior = (task_set_exc_guard_behavior_t *)dlsym(
941RTLD_DEFAULT, "task_set_exc_guard_behavior");
942if (set_behavior == nullptr) return;
943const task_exc_guard_behavior_t task_exc_guard_none = 0;
944set_behavior(mach_task_self(), task_exc_guard_none);
945}
946
947static void VerifyInterceptorsWorking();
948static void StripEnv();
949
950void InitializePlatformEarly() {
951// Only use xnu_fast_mmap when on x86_64 and the kernel supports it.
952use_xnu_fast_mmap =
953#if defined(__x86_64__)
954GetDarwinKernelVersion() >= DarwinKernelVersion(17, 5);
955#else
956false;
957#endif
958if (GetDarwinKernelVersion() >= DarwinKernelVersion(19, 0))
959DisableMmapExcGuardExceptions();
960
961# if !SANITIZER_GO
962MonotonicNanoTime(); // Call to initialize mach_timebase_info
963VerifyInterceptorsWorking();
964StripEnv();
965# endif
966}
967
968#if !SANITIZER_GO
969static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES";
970LowLevelAllocator allocator_for_env;
971
972static bool ShouldCheckInterceptors() {
973// Restrict "interceptors working?" check to ASan and TSan.
974const char *sanitizer_names[] = {"AddressSanitizer", "ThreadSanitizer"};
975size_t count = sizeof(sanitizer_names) / sizeof(sanitizer_names[0]);
976for (size_t i = 0; i < count; i++) {
977if (internal_strcmp(sanitizer_names[i], SanitizerToolName) == 0)
978return true;
979}
980return false;
981}
982
983static void VerifyInterceptorsWorking() {
984if (!common_flags()->verify_interceptors || !ShouldCheckInterceptors())
985return;
986
987// Verify that interceptors really work. We'll use dlsym to locate
988// "puts", if interceptors are working, it should really point to
989// "wrap_puts" within our own dylib.
990Dl_info info_puts, info_runtime;
991RAW_CHECK(dladdr(dlsym(RTLD_DEFAULT, "puts"), &info_puts));
992RAW_CHECK(dladdr((void *)&VerifyInterceptorsWorking, &info_runtime));
993if (internal_strcmp(info_puts.dli_fname, info_runtime.dli_fname) != 0) {
994Report(
995"ERROR: Interceptors are not working. This may be because %s is "
996"loaded too late (e.g. via dlopen). Please launch the executable "
997"with:\n%s=%s\n",
998SanitizerToolName, kDyldInsertLibraries, info_runtime.dli_fname);
999RAW_CHECK("interceptors not installed" && 0);
1000}
1001}
1002
1003// Change the value of the env var |name|, leaking the original value.
1004// If |name_value| is NULL, the variable is deleted from the environment,
1005// otherwise the corresponding "NAME=value" string is replaced with
1006// |name_value|.
1007static void LeakyResetEnv(const char *name, const char *name_value) {
1008char **env = GetEnviron();
1009uptr name_len = internal_strlen(name);
1010while (*env != 0) {
1011uptr len = internal_strlen(*env);
1012if (len > name_len) {
1013const char *p = *env;
1014if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') {
1015// Match.
1016if (name_value) {
1017// Replace the old value with the new one.
1018*env = const_cast<char*>(name_value);
1019} else {
1020// Shift the subsequent pointers back.
1021char **del = env;
1022do {
1023del[0] = del[1];
1024} while (*del++);
1025}
1026}
1027}
1028env++;
1029}
1030}
1031
1032static void StripEnv() {
1033if (!common_flags()->strip_env)
1034return;
1035
1036char *dyld_insert_libraries =
1037const_cast<char *>(GetEnv(kDyldInsertLibraries));
1038if (!dyld_insert_libraries)
1039return;
1040
1041Dl_info info;
1042RAW_CHECK(dladdr((void *)&StripEnv, &info));
1043const char *dylib_name = StripModuleName(info.dli_fname);
1044bool lib_is_in_env = internal_strstr(dyld_insert_libraries, dylib_name);
1045if (!lib_is_in_env)
1046return;
1047
1048// DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove
1049// the dylib from the environment variable, because interceptors are installed
1050// and we don't want our children to inherit the variable.
1051
1052uptr old_env_len = internal_strlen(dyld_insert_libraries);
1053uptr dylib_name_len = internal_strlen(dylib_name);
1054uptr env_name_len = internal_strlen(kDyldInsertLibraries);
1055// Allocate memory to hold the previous env var name, its value, the '='
1056// sign and the '\0' char.
1057char *new_env = (char*)allocator_for_env.Allocate(
1058old_env_len + 2 + env_name_len);
1059RAW_CHECK(new_env);
1060internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
1061internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
1062new_env[env_name_len] = '=';
1063char *new_env_pos = new_env + env_name_len + 1;
1064
1065// Iterate over colon-separated pieces of |dyld_insert_libraries|.
1066char *piece_start = dyld_insert_libraries;
1067char *piece_end = NULL;
1068char *old_env_end = dyld_insert_libraries + old_env_len;
1069do {
1070if (piece_start[0] == ':') piece_start++;
1071piece_end = internal_strchr(piece_start, ':');
1072if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
1073if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
1074uptr piece_len = piece_end - piece_start;
1075
1076char *filename_start =
1077(char *)internal_memrchr(piece_start, '/', piece_len);
1078uptr filename_len = piece_len;
1079if (filename_start) {
1080filename_start += 1;
1081filename_len = piece_len - (filename_start - piece_start);
1082} else {
1083filename_start = piece_start;
1084}
1085
1086// If the current piece isn't the runtime library name,
1087// append it to new_env.
1088if ((dylib_name_len != filename_len) ||
1089(internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) {
1090if (new_env_pos != new_env + env_name_len + 1) {
1091new_env_pos[0] = ':';
1092new_env_pos++;
1093}
1094internal_strncpy(new_env_pos, piece_start, piece_len);
1095new_env_pos += piece_len;
1096}
1097// Move on to the next piece.
1098piece_start = piece_end;
1099} while (piece_start < old_env_end);
1100
1101// Can't use setenv() here, because it requires the allocator to be
1102// initialized.
1103// FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
1104// a separate function called after InitializeAllocator().
1105if (new_env_pos == new_env + env_name_len + 1) new_env = NULL;
1106LeakyResetEnv(kDyldInsertLibraries, new_env);
1107}
1108#endif // SANITIZER_GO
1109
1110char **GetArgv() {
1111return *_NSGetArgv();
1112}
1113
1114#if SANITIZER_IOS && !SANITIZER_IOSSIM
1115// The task_vm_info struct is normally provided by the macOS SDK, but we need
1116// fields only available in 10.12+. Declare the struct manually to be able to
1117// build against older SDKs.
1118struct __sanitizer_task_vm_info {
1119mach_vm_size_t virtual_size;
1120integer_t region_count;
1121integer_t page_size;
1122mach_vm_size_t resident_size;
1123mach_vm_size_t resident_size_peak;
1124mach_vm_size_t device;
1125mach_vm_size_t device_peak;
1126mach_vm_size_t internal;
1127mach_vm_size_t internal_peak;
1128mach_vm_size_t external;
1129mach_vm_size_t external_peak;
1130mach_vm_size_t reusable;
1131mach_vm_size_t reusable_peak;
1132mach_vm_size_t purgeable_volatile_pmap;
1133mach_vm_size_t purgeable_volatile_resident;
1134mach_vm_size_t purgeable_volatile_virtual;
1135mach_vm_size_t compressed;
1136mach_vm_size_t compressed_peak;
1137mach_vm_size_t compressed_lifetime;
1138mach_vm_size_t phys_footprint;
1139mach_vm_address_t min_address;
1140mach_vm_address_t max_address;
1141};
1142#define __SANITIZER_TASK_VM_INFO_COUNT ((mach_msg_type_number_t) \
1143(sizeof(__sanitizer_task_vm_info) / sizeof(natural_t)))
1144
1145static uptr GetTaskInfoMaxAddress() {
1146__sanitizer_task_vm_info vm_info = {} /* zero initialize */;
1147mach_msg_type_number_t count = __SANITIZER_TASK_VM_INFO_COUNT;
1148int err = task_info(mach_task_self(), TASK_VM_INFO, (int *)&vm_info, &count);
1149return err ? 0 : vm_info.max_address;
1150}
1151
1152uptr GetMaxUserVirtualAddress() {
1153static uptr max_vm = GetTaskInfoMaxAddress();
1154if (max_vm != 0) {
1155const uptr ret_value = max_vm - 1;
1156CHECK_LE(ret_value, SANITIZER_MMAP_RANGE_SIZE);
1157return ret_value;
1158}
1159
1160// xnu cannot provide vm address limit
1161# if SANITIZER_WORDSIZE == 32
1162constexpr uptr fallback_max_vm = 0xffe00000 - 1;
1163# else
1164constexpr uptr fallback_max_vm = 0x200000000 - 1;
1165# endif
1166static_assert(fallback_max_vm <= SANITIZER_MMAP_RANGE_SIZE,
1167"Max virtual address must be less than mmap range size.");
1168return fallback_max_vm;
1169}
1170
1171#else // !SANITIZER_IOS
1172
1173uptr GetMaxUserVirtualAddress() {
1174# if SANITIZER_WORDSIZE == 64
1175constexpr uptr max_vm = (1ULL << 47) - 1; // 0x00007fffffffffffUL;
1176# else // SANITIZER_WORDSIZE == 32
1177static_assert(SANITIZER_WORDSIZE == 32, "Wrong wordsize");
1178constexpr uptr max_vm = (1ULL << 32) - 1; // 0xffffffff;
1179# endif
1180static_assert(max_vm <= SANITIZER_MMAP_RANGE_SIZE,
1181"Max virtual address must be less than mmap range size.");
1182return max_vm;
1183}
1184#endif
1185
1186uptr GetMaxVirtualAddress() {
1187return GetMaxUserVirtualAddress();
1188}
1189
1190uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
1191uptr min_shadow_base_alignment, uptr &high_mem_end,
1192uptr granularity) {
1193const uptr alignment =
1194Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment);
1195const uptr left_padding =
1196Max<uptr>(granularity, 1ULL << min_shadow_base_alignment);
1197
1198uptr space_size = shadow_size_bytes + left_padding;
1199
1200uptr largest_gap_found = 0;
1201uptr max_occupied_addr = 0;
1202VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size);
1203uptr shadow_start =
1204FindAvailableMemoryRange(space_size, alignment, granularity,
1205&largest_gap_found, &max_occupied_addr);
1206// If the shadow doesn't fit, restrict the address space to make it fit.
1207if (shadow_start == 0) {
1208VReport(
12092,
1210"Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n",
1211(void *)largest_gap_found, (void *)max_occupied_addr);
1212uptr new_max_vm = RoundDownTo(largest_gap_found << shadow_scale, alignment);
1213if (new_max_vm < max_occupied_addr) {
1214Report("Unable to find a memory range for dynamic shadow.\n");
1215Report(
1216"space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
1217"new_max_vm = %p\n",
1218(void *)space_size, (void *)largest_gap_found,
1219(void *)max_occupied_addr, (void *)new_max_vm);
1220CHECK(0 && "cannot place shadow");
1221}
1222RestrictMemoryToMaxAddress(new_max_vm);
1223high_mem_end = new_max_vm - 1;
1224space_size = (high_mem_end >> shadow_scale) + left_padding;
1225VReport(2, "FindDynamicShadowStart, space_size = %p\n", (void *)space_size);
1226shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,
1227nullptr, nullptr);
1228if (shadow_start == 0) {
1229Report("Unable to find a memory range after restricting VM.\n");
1230CHECK(0 && "cannot place shadow after restricting vm");
1231}
1232}
1233CHECK_NE((uptr)0, shadow_start);
1234CHECK(IsAligned(shadow_start, alignment));
1235return shadow_start;
1236}
1237
1238uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
1239uptr num_aliases, uptr ring_buffer_size) {
1240CHECK(false && "HWASan aliasing is unimplemented on Mac");
1241return 0;
1242}
1243
1244uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
1245uptr *largest_gap_found,
1246uptr *max_occupied_addr) {
1247typedef vm_region_submap_short_info_data_64_t RegionInfo;
1248enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 };
1249// Start searching for available memory region past PAGEZERO, which is
1250// 4KB on 32-bit and 4GB on 64-bit.
1251mach_vm_address_t start_address =
1252(SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000;
1253
1254const mach_vm_address_t max_vm_address = GetMaxVirtualAddress() + 1;
1255mach_vm_address_t address = start_address;
1256mach_vm_address_t free_begin = start_address;
1257kern_return_t kr = KERN_SUCCESS;
1258if (largest_gap_found) *largest_gap_found = 0;
1259if (max_occupied_addr) *max_occupied_addr = 0;
1260while (kr == KERN_SUCCESS) {
1261mach_vm_size_t vmsize = 0;
1262natural_t depth = 0;
1263RegionInfo vminfo;
1264mach_msg_type_number_t count = kRegionInfoSize;
1265kr = mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth,
1266(vm_region_info_t)&vminfo, &count);
1267if (kr == KERN_INVALID_ADDRESS) {
1268// No more regions beyond "address", consider the gap at the end of VM.
1269address = max_vm_address;
1270vmsize = 0;
1271} else {
1272if (max_occupied_addr) *max_occupied_addr = address + vmsize;
1273}
1274if (free_begin != address) {
1275// We found a free region [free_begin..address-1].
1276uptr gap_start = RoundUpTo((uptr)free_begin + left_padding, alignment);
1277uptr gap_end = RoundDownTo((uptr)Min(address, max_vm_address), alignment);
1278uptr gap_size = gap_end > gap_start ? gap_end - gap_start : 0;
1279if (size < gap_size) {
1280return gap_start;
1281}
1282
1283if (largest_gap_found && *largest_gap_found < gap_size) {
1284*largest_gap_found = gap_size;
1285}
1286}
1287// Move to the next region.
1288address += vmsize;
1289free_begin = address;
1290}
1291
1292// We looked at all free regions and could not find one large enough.
1293return 0;
1294}
1295
1296// FIXME implement on this platform.
1297void GetMemoryProfile(fill_profile_f cb, uptr *stats) {}
1298
1299void SignalContext::DumpAllRegisters(void *context) {
1300Report("Register values:\n");
1301
1302ucontext_t *ucontext = (ucontext_t*)context;
1303# define DUMPREG64(r) \
1304Printf("%s = 0x%016llx ", #r, ucontext->uc_mcontext->__ss.__ ## r);
1305# define DUMPREGA64(r) \
1306Printf(" %s = 0x%016lx ", #r, AARCH64_GET_REG(r));
1307# define DUMPREG32(r) \
1308Printf("%s = 0x%08x ", #r, ucontext->uc_mcontext->__ss.__ ## r);
1309# define DUMPREG_(r) Printf(" "); DUMPREG(r);
1310# define DUMPREG__(r) Printf(" "); DUMPREG(r);
1311# define DUMPREG___(r) Printf(" "); DUMPREG(r);
1312
1313# if defined(__x86_64__)
1314# define DUMPREG(r) DUMPREG64(r)
1315DUMPREG(rax); DUMPREG(rbx); DUMPREG(rcx); DUMPREG(rdx); Printf("\n");
1316DUMPREG(rdi); DUMPREG(rsi); DUMPREG(rbp); DUMPREG(rsp); Printf("\n");
1317DUMPREG_(r8); DUMPREG_(r9); DUMPREG(r10); DUMPREG(r11); Printf("\n");
1318DUMPREG(r12); DUMPREG(r13); DUMPREG(r14); DUMPREG(r15); Printf("\n");
1319# elif defined(__i386__)
1320# define DUMPREG(r) DUMPREG32(r)
1321DUMPREG(eax); DUMPREG(ebx); DUMPREG(ecx); DUMPREG(edx); Printf("\n");
1322DUMPREG(edi); DUMPREG(esi); DUMPREG(ebp); DUMPREG(esp); Printf("\n");
1323# elif defined(__aarch64__)
1324# define DUMPREG(r) DUMPREG64(r)
1325DUMPREG_(x[0]); DUMPREG_(x[1]); DUMPREG_(x[2]); DUMPREG_(x[3]); Printf("\n");
1326DUMPREG_(x[4]); DUMPREG_(x[5]); DUMPREG_(x[6]); DUMPREG_(x[7]); Printf("\n");
1327DUMPREG_(x[8]); DUMPREG_(x[9]); DUMPREG(x[10]); DUMPREG(x[11]); Printf("\n");
1328DUMPREG(x[12]); DUMPREG(x[13]); DUMPREG(x[14]); DUMPREG(x[15]); Printf("\n");
1329DUMPREG(x[16]); DUMPREG(x[17]); DUMPREG(x[18]); DUMPREG(x[19]); Printf("\n");
1330DUMPREG(x[20]); DUMPREG(x[21]); DUMPREG(x[22]); DUMPREG(x[23]); Printf("\n");
1331DUMPREG(x[24]); DUMPREG(x[25]); DUMPREG(x[26]); DUMPREG(x[27]); Printf("\n");
1332DUMPREG(x[28]); DUMPREGA64(fp); DUMPREGA64(lr); DUMPREGA64(sp); Printf("\n");
1333# elif defined(__arm__)
1334# define DUMPREG(r) DUMPREG32(r)
1335DUMPREG_(r[0]); DUMPREG_(r[1]); DUMPREG_(r[2]); DUMPREG_(r[3]); Printf("\n");
1336DUMPREG_(r[4]); DUMPREG_(r[5]); DUMPREG_(r[6]); DUMPREG_(r[7]); Printf("\n");
1337DUMPREG_(r[8]); DUMPREG_(r[9]); DUMPREG(r[10]); DUMPREG(r[11]); Printf("\n");
1338DUMPREG(r[12]); DUMPREG___(sp); DUMPREG___(lr); DUMPREG___(pc); Printf("\n");
1339# else
1340# error "Unknown architecture"
1341# endif
1342
1343# undef DUMPREG64
1344# undef DUMPREG32
1345# undef DUMPREG_
1346# undef DUMPREG__
1347# undef DUMPREG___
1348# undef DUMPREG
1349}
1350
1351static inline bool CompareBaseAddress(const LoadedModule &a,
1352const LoadedModule &b) {
1353return a.base_address() < b.base_address();
1354}
1355
1356void FormatUUID(char *out, uptr size, const u8 *uuid) {
1357internal_snprintf(out, size,
1358"<%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-"
1359"%02X%02X%02X%02X%02X%02X>",
1360uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5],
1361uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11],
1362uuid[12], uuid[13], uuid[14], uuid[15]);
1363}
1364
1365void DumpProcessMap() {
1366Printf("Process module map:\n");
1367MemoryMappingLayout memory_mapping(false);
1368InternalMmapVector<LoadedModule> modules;
1369modules.reserve(128);
1370memory_mapping.DumpListOfModules(&modules);
1371Sort(modules.data(), modules.size(), CompareBaseAddress);
1372for (uptr i = 0; i < modules.size(); ++i) {
1373char uuid_str[128];
1374FormatUUID(uuid_str, sizeof(uuid_str), modules[i].uuid());
1375Printf("0x%zx-0x%zx %s (%s) %s\n", modules[i].base_address(),
1376modules[i].max_address(), modules[i].full_name(),
1377ModuleArchToString(modules[i].arch()), uuid_str);
1378}
1379Printf("End of module map.\n");
1380}
1381
1382void CheckNoDeepBind(const char *filename, int flag) {
1383// Do nothing.
1384}
1385
1386bool GetRandom(void *buffer, uptr length, bool blocking) {
1387if (!buffer || !length || length > 256)
1388return false;
1389// arc4random never fails.
1390REAL(arc4random_buf)(buffer, length);
1391return true;
1392}
1393
1394u32 GetNumberOfCPUs() {
1395return (u32)sysconf(_SC_NPROCESSORS_ONLN);
1396}
1397
1398void InitializePlatformCommonFlags(CommonFlags *cf) {}
1399
1400// Pthread introspection hook
1401//
1402// * GCD worker threads are created without a call to pthread_create(), but we
1403// still need to register these threads (with ThreadCreate/Start()).
1404// * We use the "pthread introspection hook" below to observe the creation of
1405// such threads.
1406// * GCD worker threads don't have parent threads and the CREATE event is
1407// delivered in the context of the thread itself. CREATE events for regular
1408// threads, are delivered on the parent. We use this to tell apart which
1409// threads are GCD workers with `thread == pthread_self()`.
1410//
1411static pthread_introspection_hook_t prev_pthread_introspection_hook;
1412static ThreadEventCallbacks thread_event_callbacks;
1413
1414static void sanitizer_pthread_introspection_hook(unsigned int event,
1415pthread_t thread, void *addr,
1416size_t size) {
1417// create -> start -> terminate -> destroy
1418// * create/destroy are usually (not guaranteed) delivered on the parent and
1419// track resource allocation/reclamation
1420// * start/terminate are guaranteed to be delivered in the context of the
1421// thread and give hooks into "just after (before) thread starts (stops)
1422// executing"
1423DCHECK(event >= PTHREAD_INTROSPECTION_THREAD_CREATE &&
1424event <= PTHREAD_INTROSPECTION_THREAD_DESTROY);
1425
1426if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) {
1427bool gcd_worker = (thread == pthread_self());
1428if (thread_event_callbacks.create)
1429thread_event_callbacks.create((uptr)thread, gcd_worker);
1430} else if (event == PTHREAD_INTROSPECTION_THREAD_START) {
1431CHECK_EQ(thread, pthread_self());
1432if (thread_event_callbacks.start)
1433thread_event_callbacks.start((uptr)thread);
1434}
1435
1436if (prev_pthread_introspection_hook)
1437prev_pthread_introspection_hook(event, thread, addr, size);
1438
1439if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
1440CHECK_EQ(thread, pthread_self());
1441if (thread_event_callbacks.terminate)
1442thread_event_callbacks.terminate((uptr)thread);
1443} else if (event == PTHREAD_INTROSPECTION_THREAD_DESTROY) {
1444if (thread_event_callbacks.destroy)
1445thread_event_callbacks.destroy((uptr)thread);
1446}
1447}
1448
1449void InstallPthreadIntrospectionHook(const ThreadEventCallbacks &callbacks) {
1450thread_event_callbacks = callbacks;
1451prev_pthread_introspection_hook =
1452pthread_introspection_hook_install(&sanitizer_pthread_introspection_hook);
1453}
1454
1455} // namespace __sanitizer
1456
1457#endif // SANITIZER_APPLE
1458