Home | History | Annotate | Download | only in asan
      1 //===-- asan_mac.cc -------------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of AddressSanitizer, an address sanity checker.
     11 //
     12 // Mac-specific details.
     13 //===----------------------------------------------------------------------===//
     14 
     15 #ifdef __APPLE__
     16 
     17 #include "asan_interceptors.h"
     18 #include "asan_internal.h"
     19 #include "asan_mapping.h"
     20 #include "asan_procmaps.h"
     21 #include "asan_stack.h"
     22 #include "asan_thread.h"
     23 #include "asan_thread_registry.h"
     24 
     25 #include <crt_externs.h>  // for _NSGetEnviron
     26 #include <mach-o/dyld.h>
     27 #include <mach-o/loader.h>
     28 #include <sys/mman.h>
     29 #include <sys/resource.h>
     30 #include <sys/sysctl.h>
     31 #include <sys/ucontext.h>
     32 #include <pthread.h>
     33 #include <fcntl.h>
     34 #include <unistd.h>
     35 #include <libkern/OSAtomic.h>
     36 #include <CoreFoundation/CFString.h>
     37 
     38 namespace __asan {
     39 
     40 void GetPcSpBp(void *context, uintptr_t *pc, uintptr_t *sp, uintptr_t *bp) {
     41   ucontext_t *ucontext = (ucontext_t*)context;
     42 # if __WORDSIZE == 64
     43   *pc = ucontext->uc_mcontext->__ss.__rip;
     44   *bp = ucontext->uc_mcontext->__ss.__rbp;
     45   *sp = ucontext->uc_mcontext->__ss.__rsp;
     46 # else
     47   *pc = ucontext->uc_mcontext->__ss.__eip;
     48   *bp = ucontext->uc_mcontext->__ss.__ebp;
     49   *sp = ucontext->uc_mcontext->__ss.__esp;
     50 # endif  // __WORDSIZE
     51 }
     52 
     53 enum {
     54   MACOS_VERSION_UNKNOWN = 0,
     55   MACOS_VERSION_LEOPARD,
     56   MACOS_VERSION_SNOW_LEOPARD,
     57   MACOS_VERSION_LION,
     58 };
     59 
     60 static int GetMacosVersion() {
     61   int mib[2] = { CTL_KERN, KERN_OSRELEASE };
     62   char version[100];
     63   size_t len = 0, maxlen = sizeof(version) / sizeof(version[0]);
     64   for (int i = 0; i < maxlen; i++) version[i] = '\0';
     65   // Get the version length.
     66   CHECK(sysctl(mib, 2, NULL, &len, NULL, 0) != -1);
     67   CHECK(len < maxlen);
     68   CHECK(sysctl(mib, 2, version, &len, NULL, 0) != -1);
     69   switch (version[0]) {
     70     case '9': return MACOS_VERSION_LEOPARD;
     71     case '1': {
     72       switch (version[1]) {
     73         case '0': return MACOS_VERSION_SNOW_LEOPARD;
     74         case '1': return MACOS_VERSION_LION;
     75         default: return MACOS_VERSION_UNKNOWN;
     76       }
     77     }
     78     default: return MACOS_VERSION_UNKNOWN;
     79   }
     80 }
     81 
     82 bool PlatformHasDifferentMemcpyAndMemmove() {
     83   // On OS X 10.7 memcpy() and memmove() are both resolved
     84   // into memmove$VARIANT$sse42.
     85   // See also http://code.google.com/p/address-sanitizer/issues/detail?id=34.
     86   // TODO(glider): need to check dynamically that memcpy() and memmove() are
     87   // actually the same function.
     88   return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD;
     89 }
     90 
     91 // No-op. Mac does not support static linkage anyway.
     92 void *AsanDoesNotSupportStaticLinkage() {
     93   return NULL;
     94 }
     95 
     96 static inline bool IntervalsAreSeparate(uintptr_t start1, uintptr_t end1,
     97                                         uintptr_t start2, uintptr_t end2) {
     98   CHECK(start1 <= end1);
     99   CHECK(start2 <= end2);
    100   return (end1 < start2) || (end2 < start1);
    101 }
    102 
    103 // FIXME: this is thread-unsafe, but should not cause problems most of the time.
    104 // When the shadow is mapped only a single thread usually exists (plus maybe
    105 // several worker threads on Mac, which aren't expected to map big chunks of
    106 // memory).
    107 bool AsanShadowRangeIsAvailable() {
    108   AsanProcMaps procmaps;
    109   uintptr_t start, end;
    110   bool available = true;
    111   while (procmaps.Next(&start, &end,
    112                        /*offset*/NULL, /*filename*/NULL, /*filename_size*/0)) {
    113     if (!IntervalsAreSeparate(start, end,
    114                               kLowShadowBeg - kMmapGranularity,
    115                               kHighShadowEnd)) {
    116       available = false;
    117       break;
    118     }
    119   }
    120   return available;
    121 }
    122 
    123 bool AsanInterceptsSignal(int signum) {
    124   return (signum == SIGSEGV || signum == SIGBUS) && FLAG_handle_segv;
    125 }
    126 
    127 static void *asan_mmap(void *addr, size_t length, int prot, int flags,
    128                 int fd, uint64_t offset) {
    129   return mmap(addr, length, prot, flags, fd, offset);
    130 }
    131 
    132 size_t AsanWrite(int fd, const void *buf, size_t count) {
    133   return write(fd, buf, count);
    134 }
    135 
    136 void *AsanMmapSomewhereOrDie(size_t size, const char *mem_type) {
    137   size = RoundUpTo(size, kPageSize);
    138   void *res = asan_mmap(0, size,
    139                         PROT_READ | PROT_WRITE,
    140                         MAP_PRIVATE | MAP_ANON, -1, 0);
    141   if (res == (void*)-1) {
    142     OutOfMemoryMessageAndDie(mem_type, size);
    143   }
    144   return res;
    145 }
    146 
    147 void *AsanMmapFixedNoReserve(uintptr_t fixed_addr, size_t size) {
    148   return asan_mmap((void*)fixed_addr, size,
    149                    PROT_READ | PROT_WRITE,
    150                    MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
    151                    0, 0);
    152 }
    153 
    154 void *AsanMprotect(uintptr_t fixed_addr, size_t size) {
    155   return asan_mmap((void*)fixed_addr, size,
    156                    PROT_NONE,
    157                    MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
    158                    0, 0);
    159 }
    160 
    161 void AsanUnmapOrDie(void *addr, size_t size) {
    162   if (!addr || !size) return;
    163   int res = munmap(addr, size);
    164   if (res != 0) {
    165     Report("Failed to unmap\n");
    166     AsanDie();
    167   }
    168 }
    169 
    170 int AsanOpenReadonly(const char* filename) {
    171   return open(filename, O_RDONLY);
    172 }
    173 
    174 const char *AsanGetEnv(const char *name) {
    175   char ***env_ptr = _NSGetEnviron();
    176   CHECK(env_ptr);
    177   char **environ = *env_ptr;
    178   CHECK(environ);
    179   size_t name_len = internal_strlen(name);
    180   while (*environ != NULL) {
    181     size_t len = internal_strlen(*environ);
    182     if (len > name_len) {
    183       const char *p = *environ;
    184       if (!internal_memcmp(p, name, name_len) &&
    185           p[name_len] == '=') {  // Match.
    186         return *environ + name_len + 1;  // String starting after =.
    187       }
    188     }
    189     environ++;
    190   }
    191   return NULL;
    192 }
    193 
    194 size_t AsanRead(int fd, void *buf, size_t count) {
    195   return read(fd, buf, count);
    196 }
    197 
    198 int AsanClose(int fd) {
    199   return close(fd);
    200 }
    201 
    202 AsanProcMaps::AsanProcMaps() {
    203   Reset();
    204 }
    205 
    206 AsanProcMaps::~AsanProcMaps() {
    207 }
    208 
    209 // More information about Mach-O headers can be found in mach-o/loader.h
    210 // Each Mach-O image has a header (mach_header or mach_header_64) starting with
    211 // a magic number, and a list of linker load commands directly following the
    212 // header.
    213 // A load command is at least two 32-bit words: the command type and the
    214 // command size in bytes. We're interested only in segment load commands
    215 // (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped
    216 // into the task's address space.
    217 // The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or
    218 // segment_command_64 correspond to the memory address, memory size and the
    219 // file offset of the current memory segment.
    220 // Because these fields are taken from the images as is, one needs to add
    221 // _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.
    222 
    223 void AsanProcMaps::Reset() {
    224   // Count down from the top.
    225   // TODO(glider): as per man 3 dyld, iterating over the headers with
    226   // _dyld_image_count is thread-unsafe. We need to register callbacks for
    227   // adding and removing images which will invalidate the AsanProcMaps state.
    228   current_image_ = _dyld_image_count();
    229   current_load_cmd_count_ = -1;
    230   current_load_cmd_addr_ = NULL;
    231   current_magic_ = 0;
    232 }
    233 
    234 // Next and NextSegmentLoad were inspired by base/sysinfo.cc in
    235 // Google Perftools, http://code.google.com/p/google-perftools.
    236 
    237 // NextSegmentLoad scans the current image for the next segment load command
    238 // and returns the start and end addresses and file offset of the corresponding
    239 // segment.
    240 // Note that the segment addresses are not necessarily sorted.
    241 template<uint32_t kLCSegment, typename SegmentCommand>
    242 bool AsanProcMaps::NextSegmentLoad(
    243     uintptr_t *start, uintptr_t *end, uintptr_t *offset,
    244     char filename[], size_t filename_size) {
    245   const char* lc = current_load_cmd_addr_;
    246   current_load_cmd_addr_ += ((const load_command *)lc)->cmdsize;
    247   if (((const load_command *)lc)->cmd == kLCSegment) {
    248     const intptr_t dlloff = _dyld_get_image_vmaddr_slide(current_image_);
    249     const SegmentCommand* sc = (const SegmentCommand *)lc;
    250     if (start) *start = sc->vmaddr + dlloff;
    251     if (end) *end = sc->vmaddr + sc->vmsize + dlloff;
    252     if (offset) *offset = sc->fileoff;
    253     if (filename) {
    254       REAL(strncpy)(filename, _dyld_get_image_name(current_image_),
    255                     filename_size);
    256     }
    257     if (FLAG_v >= 4)
    258       Report("LC_SEGMENT: %p--%p %s+%p\n", *start, *end, filename, *offset);
    259     return true;
    260   }
    261   return false;
    262 }
    263 
    264 bool AsanProcMaps::Next(uintptr_t *start, uintptr_t *end,
    265                         uintptr_t *offset, char filename[],
    266                         size_t filename_size) {
    267   for (; current_image_ >= 0; current_image_--) {
    268     const mach_header* hdr = _dyld_get_image_header(current_image_);
    269     if (!hdr) continue;
    270     if (current_load_cmd_count_ < 0) {
    271       // Set up for this image;
    272       current_load_cmd_count_ = hdr->ncmds;
    273       current_magic_ = hdr->magic;
    274       switch (current_magic_) {
    275 #ifdef MH_MAGIC_64
    276         case MH_MAGIC_64: {
    277           current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header_64);
    278           break;
    279         }
    280 #endif
    281         case MH_MAGIC: {
    282           current_load_cmd_addr_ = (char*)hdr + sizeof(mach_header);
    283           break;
    284         }
    285         default: {
    286           continue;
    287         }
    288       }
    289     }
    290 
    291     for (; current_load_cmd_count_ >= 0; current_load_cmd_count_--) {
    292       switch (current_magic_) {
    293         // current_magic_ may be only one of MH_MAGIC, MH_MAGIC_64.
    294 #ifdef MH_MAGIC_64
    295         case MH_MAGIC_64: {
    296           if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
    297                   start, end, offset, filename, filename_size))
    298             return true;
    299           break;
    300         }
    301 #endif
    302         case MH_MAGIC: {
    303           if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
    304                   start, end, offset, filename, filename_size))
    305             return true;
    306           break;
    307         }
    308       }
    309     }
    310     // If we get here, no more load_cmd's in this image talk about
    311     // segments.  Go on to the next image.
    312   }
    313   return false;
    314 }
    315 
    316 bool AsanProcMaps::GetObjectNameAndOffset(uintptr_t addr, uintptr_t *offset,
    317                                           char filename[],
    318                                           size_t filename_size) {
    319   return IterateForObjectNameAndOffset(addr, offset, filename, filename_size);
    320 }
    321 
    322 void AsanThread::SetThreadStackTopAndBottom() {
    323   size_t stacksize = pthread_get_stacksize_np(pthread_self());
    324   void *stackaddr = pthread_get_stackaddr_np(pthread_self());
    325   stack_top_ = (uintptr_t)stackaddr;
    326   stack_bottom_ = stack_top_ - stacksize;
    327   int local;
    328   CHECK(AddrIsInStack((uintptr_t)&local));
    329 }
    330 
    331 AsanLock::AsanLock(LinkerInitialized) {
    332   // We assume that OS_SPINLOCK_INIT is zero
    333 }
    334 
    335 void AsanLock::Lock() {
    336   CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
    337   CHECK(OS_SPINLOCK_INIT == 0);
    338   CHECK(owner_ != (uintptr_t)pthread_self());
    339   OSSpinLockLock((OSSpinLock*)&opaque_storage_);
    340   CHECK(!owner_);
    341   owner_ = (uintptr_t)pthread_self();
    342 }
    343 
    344 void AsanLock::Unlock() {
    345   CHECK(owner_ == (uintptr_t)pthread_self());
    346   owner_ = 0;
    347   OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
    348 }
    349 
    350 void AsanStackTrace::GetStackTrace(size_t max_s, uintptr_t pc, uintptr_t bp) {
    351   size = 0;
    352   trace[0] = pc;
    353   if ((max_s) > 1) {
    354     max_size = max_s;
    355     FastUnwindStack(pc, bp);
    356   }
    357 }
    358 
    359 // The range of pages to be used for escape islands.
    360 // TODO(glider): instead of mapping a fixed range we must find a range of
    361 // unmapped pages in vmmap and take them.
    362 // These constants were chosen empirically and may not work if the shadow
    363 // memory layout changes. Unfortunately they do necessarily depend on
    364 // kHighMemBeg or kHighMemEnd.
    365 static void *island_allocator_pos = NULL;
    366 
    367 #if __WORDSIZE == 32
    368 # define kIslandEnd (0xffdf0000 - kPageSize)
    369 # define kIslandBeg (kIslandEnd - 256 * kPageSize)
    370 #else
    371 # define kIslandEnd (0x7fffffdf0000 - kPageSize)
    372 # define kIslandBeg (kIslandEnd - 256 * kPageSize)
    373 #endif
    374 
    375 extern "C"
    376 mach_error_t __interception_allocate_island(void **ptr,
    377                                             size_t unused_size,
    378                                             void *unused_hint) {
    379   if (!island_allocator_pos) {
    380     island_allocator_pos =
    381         asan_mmap((void*)kIslandBeg, kIslandEnd - kIslandBeg,
    382                   PROT_READ | PROT_WRITE | PROT_EXEC,
    383                   MAP_PRIVATE | MAP_ANON | MAP_FIXED,
    384                  -1, 0);
    385     if (island_allocator_pos != (void*)kIslandBeg) {
    386       return KERN_NO_SPACE;
    387     }
    388     if (FLAG_v) {
    389       Report("Mapped pages %p--%p for branch islands.\n",
    390              kIslandBeg, kIslandEnd);
    391     }
    392     // Should not be very performance-critical.
    393     internal_memset(island_allocator_pos, 0xCC, kIslandEnd - kIslandBeg);
    394   };
    395   *ptr = island_allocator_pos;
    396   island_allocator_pos = (char*)island_allocator_pos + kPageSize;
    397   if (FLAG_v) {
    398     Report("Branch island allocated at %p\n", *ptr);
    399   }
    400   return err_none;
    401 }
    402 
    403 extern "C"
    404 mach_error_t __interception_deallocate_island(void *ptr) {
    405   // Do nothing.
    406   // TODO(glider): allow to free and reuse the island memory.
    407   return err_none;
    408 }
    409 
    410 // Support for the following functions from libdispatch on Mac OS:
    411 //   dispatch_async_f()
    412 //   dispatch_async()
    413 //   dispatch_sync_f()
    414 //   dispatch_sync()
    415 //   dispatch_after_f()
    416 //   dispatch_after()
    417 //   dispatch_group_async_f()
    418 //   dispatch_group_async()
    419 // TODO(glider): libdispatch API contains other functions that we don't support
    420 // yet.
    421 //
    422 // dispatch_sync() and dispatch_sync_f() are synchronous, although chances are
    423 // they can cause jobs to run on a thread different from the current one.
    424 // TODO(glider): if so, we need a test for this (otherwise we should remove
    425 // them).
    426 //
    427 // The following functions use dispatch_barrier_async_f() (which isn't a library
    428 // function but is exported) and are thus supported:
    429 //   dispatch_source_set_cancel_handler_f()
    430 //   dispatch_source_set_cancel_handler()
    431 //   dispatch_source_set_event_handler_f()
    432 //   dispatch_source_set_event_handler()
    433 //
    434 // The reference manual for Grand Central Dispatch is available at
    435 //   http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html
    436 // The implementation details are at
    437 //   http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c
    438 
    439 typedef void* pthread_workqueue_t;
    440 typedef void* pthread_workitem_handle_t;
    441 
    442 typedef void* dispatch_group_t;
    443 typedef void* dispatch_queue_t;
    444 typedef uint64_t dispatch_time_t;
    445 typedef void (*dispatch_function_t)(void *block);
    446 typedef void* (*worker_t)(void *block);
    447 
    448 // A wrapper for the ObjC blocks used to support libdispatch.
    449 typedef struct {
    450   void *block;
    451   dispatch_function_t func;
    452   int parent_tid;
    453 } asan_block_context_t;
    454 
    455 // We use extern declarations of libdispatch functions here instead
    456 // of including <dispatch/dispatch.h>. This header is not present on
    457 // Mac OS X Leopard and eariler, and although we don't expect ASan to
    458 // work on legacy systems, it's bad to break the build of
    459 // LLVM compiler-rt there.
    460 extern "C" {
    461 void dispatch_async_f(dispatch_queue_t dq, void *ctxt,
    462                       dispatch_function_t func);
    463 void dispatch_sync_f(dispatch_queue_t dq, void *ctxt,
    464                      dispatch_function_t func);
    465 void dispatch_after_f(dispatch_time_t when, dispatch_queue_t dq, void *ctxt,
    466                       dispatch_function_t func);
    467 void dispatch_barrier_async_f(dispatch_queue_t dq, void *ctxt,
    468                               dispatch_function_t func);
    469 void dispatch_group_async_f(dispatch_group_t group, dispatch_queue_t dq,
    470                             void *ctxt, dispatch_function_t func);
    471 int pthread_workqueue_additem_np(pthread_workqueue_t workq,
    472     void *(*workitem_func)(void *), void * workitem_arg,
    473     pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp);
    474 }  // extern "C"
    475 
    476 extern "C"
    477 void asan_dispatch_call_block_and_release(void *block) {
    478   GET_STACK_TRACE_HERE(kStackTraceMax);
    479   asan_block_context_t *context = (asan_block_context_t*)block;
    480   if (FLAG_v >= 2) {
    481     Report("asan_dispatch_call_block_and_release(): "
    482            "context: %p, pthread_self: %p\n",
    483            block, pthread_self());
    484   }
    485   AsanThread *t = asanThreadRegistry().GetCurrent();
    486   if (!t) {
    487     t = AsanThread::Create(context->parent_tid, NULL, NULL, &stack);
    488     asanThreadRegistry().RegisterThread(t);
    489     t->Init();
    490     asanThreadRegistry().SetCurrent(t);
    491   }
    492   // Call the original dispatcher for the block.
    493   context->func(context->block);
    494   asan_free(context, &stack);
    495 }
    496 
    497 }  // namespace __asan
    498 
    499 using namespace __asan;  // NOLINT
    500 
    501 // Wrap |ctxt| and |func| into an asan_block_context_t.
    502 // The caller retains control of the allocated context.
    503 extern "C"
    504 asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
    505                                          AsanStackTrace *stack) {
    506   asan_block_context_t *asan_ctxt =
    507       (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack);
    508   asan_ctxt->block = ctxt;
    509   asan_ctxt->func = func;
    510   asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrMinusOne();
    511   return asan_ctxt;
    512 }
    513 
    514 // TODO(glider): can we reduce code duplication by introducing a macro?
    515 INTERCEPTOR(void, dispatch_async_f, dispatch_queue_t dq, void *ctxt,
    516                                     dispatch_function_t func) {
    517   GET_STACK_TRACE_HERE(kStackTraceMax);
    518   asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
    519   if (FLAG_v >= 2) {
    520     Report("dispatch_async_f(): context: %p, pthread_self: %p\n",
    521         asan_ctxt, pthread_self());
    522     PRINT_CURRENT_STACK();
    523   }
    524   return REAL(dispatch_async_f)(dq, (void*)asan_ctxt,
    525                                 asan_dispatch_call_block_and_release);
    526 }
    527 
    528 INTERCEPTOR(void, dispatch_sync_f, dispatch_queue_t dq, void *ctxt,
    529                                    dispatch_function_t func) {
    530   GET_STACK_TRACE_HERE(kStackTraceMax);
    531   asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
    532   if (FLAG_v >= 2) {
    533     Report("dispatch_sync_f(): context: %p, pthread_self: %p\n",
    534         asan_ctxt, pthread_self());
    535     PRINT_CURRENT_STACK();
    536   }
    537   return REAL(dispatch_sync_f)(dq, (void*)asan_ctxt,
    538                                asan_dispatch_call_block_and_release);
    539 }
    540 
    541 INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
    542                                     dispatch_queue_t dq, void *ctxt,
    543                                     dispatch_function_t func) {
    544   GET_STACK_TRACE_HERE(kStackTraceMax);
    545   asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
    546   if (FLAG_v >= 2) {
    547     Report("dispatch_after_f: %p\n", asan_ctxt);
    548     PRINT_CURRENT_STACK();
    549   }
    550   return REAL(dispatch_after_f)(when, dq, (void*)asan_ctxt,
    551                                 asan_dispatch_call_block_and_release);
    552 }
    553 
    554 INTERCEPTOR(void, dispatch_barrier_async_f, dispatch_queue_t dq, void *ctxt,
    555                                             dispatch_function_t func) {
    556   GET_STACK_TRACE_HERE(kStackTraceMax);
    557   asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
    558   if (FLAG_v >= 2) {
    559     Report("dispatch_barrier_async_f(): context: %p, pthread_self: %p\n",
    560            asan_ctxt, pthread_self());
    561     PRINT_CURRENT_STACK();
    562   }
    563   REAL(dispatch_barrier_async_f)(dq, (void*)asan_ctxt,
    564                                  asan_dispatch_call_block_and_release);
    565 }
    566 
    567 INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
    568                                           dispatch_queue_t dq, void *ctxt,
    569                                           dispatch_function_t func) {
    570   GET_STACK_TRACE_HERE(kStackTraceMax);
    571   asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
    572   if (FLAG_v >= 2) {
    573     Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
    574            asan_ctxt, pthread_self());
    575     PRINT_CURRENT_STACK();
    576   }
    577   REAL(dispatch_group_async_f)(group, dq, (void*)asan_ctxt,
    578                                asan_dispatch_call_block_and_release);
    579 }
    580 
    581 // The following stuff has been extremely helpful while looking for the
    582 // unhandled functions that spawned jobs on Chromium shutdown. If the verbosity
    583 // level is 2 or greater, we wrap pthread_workqueue_additem_np() in order to
    584 // find the points of worker thread creation (each of such threads may be used
    585 // to run several tasks, that's why this is not enough to support the whole
    586 // libdispatch API.
    587 extern "C"
    588 void *wrap_workitem_func(void *arg) {
    589   if (FLAG_v >= 2) {
    590     Report("wrap_workitem_func: %p, pthread_self: %p\n", arg, pthread_self());
    591   }
    592   asan_block_context_t *ctxt = (asan_block_context_t*)arg;
    593   worker_t fn = (worker_t)(ctxt->func);
    594   void *result =  fn(ctxt->block);
    595   GET_STACK_TRACE_HERE(kStackTraceMax);
    596   asan_free(arg, &stack);
    597   return result;
    598 }
    599 
    600 INTERCEPTOR(int, pthread_workqueue_additem_np, pthread_workqueue_t workq,
    601     void *(*workitem_func)(void *), void * workitem_arg,
    602     pthread_workitem_handle_t * itemhandlep, unsigned int *gencountp) {
    603   GET_STACK_TRACE_HERE(kStackTraceMax);
    604   asan_block_context_t *asan_ctxt =
    605       (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), &stack);
    606   asan_ctxt->block = workitem_arg;
    607   asan_ctxt->func = (dispatch_function_t)workitem_func;
    608   asan_ctxt->parent_tid = asanThreadRegistry().GetCurrentTidOrMinusOne();
    609   if (FLAG_v >= 2) {
    610     Report("pthread_workqueue_additem_np: %p\n", asan_ctxt);
    611     PRINT_CURRENT_STACK();
    612   }
    613   return REAL(pthread_workqueue_additem_np)(workq, wrap_workitem_func,
    614                                             asan_ctxt, itemhandlep,
    615                                             gencountp);
    616 }
    617 
    618 // CF_RC_BITS, the layout of CFRuntimeBase and __CFStrIsConstant are internal
    619 // and subject to change in further CoreFoundation versions. Apple does not
    620 // guarantee any binary compatibility from release to release.
    621 
    622 // See http://opensource.apple.com/source/CF/CF-635.15/CFInternal.h
    623 #if defined(__BIG_ENDIAN__)
    624 #define CF_RC_BITS 0
    625 #endif
    626 
    627 #if defined(__LITTLE_ENDIAN__)
    628 #define CF_RC_BITS 3
    629 #endif
    630 
    631 // See http://opensource.apple.com/source/CF/CF-635.15/CFRuntime.h
    632 typedef struct __CFRuntimeBase {
    633   uintptr_t _cfisa;
    634   uint8_t _cfinfo[4];
    635 #if __LP64__
    636   uint32_t _rc;
    637 #endif
    638 } CFRuntimeBase;
    639 
    640 // See http://opensource.apple.com/source/CF/CF-635.15/CFString.c
    641 int __CFStrIsConstant(CFStringRef str) {
    642   CFRuntimeBase *base = (CFRuntimeBase*)str;
    643 #if __LP64__
    644   return base->_rc == 0;
    645 #else
    646   return (base->_cfinfo[CF_RC_BITS]) == 0;
    647 #endif
    648 }
    649 
    650 INTERCEPTOR(CFStringRef, CFStringCreateCopy, CFAllocatorRef alloc,
    651                                              CFStringRef str) {
    652   if (__CFStrIsConstant(str)) {
    653     return str;
    654   } else {
    655     return REAL(CFStringCreateCopy)(alloc, str);
    656   }
    657 }
    658 
    659 namespace __asan {
    660 
    661 void InitializeMacInterceptors() {
    662   CHECK(INTERCEPT_FUNCTION(dispatch_async_f));
    663   CHECK(INTERCEPT_FUNCTION(dispatch_sync_f));
    664   CHECK(INTERCEPT_FUNCTION(dispatch_after_f));
    665   CHECK(INTERCEPT_FUNCTION(dispatch_barrier_async_f));
    666   CHECK(INTERCEPT_FUNCTION(dispatch_group_async_f));
    667   // We don't need to intercept pthread_workqueue_additem_np() to support the
    668   // libdispatch API, but it helps us to debug the unsupported functions. Let's
    669   // intercept it only during verbose runs.
    670   if (FLAG_v >= 2) {
    671     CHECK(INTERCEPT_FUNCTION(pthread_workqueue_additem_np));
    672   }
    673   // Normally CFStringCreateCopy should not copy constant CF strings.
    674   // Replacing the default CFAllocator causes constant strings to be copied
    675   // rather than just returned, which leads to bugs in big applications like
    676   // Chromium and WebKit, see
    677   // http://code.google.com/p/address-sanitizer/issues/detail?id=10
    678   // Until this problem is fixed we need to check that the string is
    679   // non-constant before calling CFStringCreateCopy.
    680   CHECK(INTERCEPT_FUNCTION(CFStringCreateCopy));
    681 }
    682 
    683 }  // namespace __asan
    684 
    685 #endif  // __APPLE__
    686