1 //===-- asan_malloc_mac.cc ------------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of AddressSanitizer, an address sanity checker. 11 // 12 // Mac-specific malloc interception. 13 //===----------------------------------------------------------------------===// 14 15 #ifdef __APPLE__ 16 17 #include <AvailabilityMacros.h> 18 #include <CoreFoundation/CFBase.h> 19 #include <dlfcn.h> 20 #include <malloc/malloc.h> 21 22 #include "asan_allocator.h" 23 #include "asan_interceptors.h" 24 #include "asan_internal.h" 25 #include "asan_mac.h" 26 #include "asan_report.h" 27 #include "asan_stack.h" 28 #include "asan_stats.h" 29 #include "asan_thread_registry.h" 30 31 // Similar code is used in Google Perftools, 32 // http://code.google.com/p/google-perftools. 33 34 // ---------------------- Replacement functions ---------------- {{{1 35 using namespace __asan; // NOLINT 36 37 // TODO(glider): do we need both zones? 38 static malloc_zone_t *system_malloc_zone = 0; 39 static malloc_zone_t asan_zone; 40 41 INTERCEPTOR(malloc_zone_t *, malloc_create_zone, 42 vm_size_t start_size, unsigned zone_flags) { 43 if (!asan_inited) __asan_init(); 44 GET_STACK_TRACE_MALLOC; 45 malloc_zone_t *new_zone = 46 (malloc_zone_t*)asan_malloc(sizeof(asan_zone), &stack); 47 internal_memcpy(new_zone, &asan_zone, sizeof(asan_zone)); 48 new_zone->zone_name = NULL; // The name will be changed anyway. 49 return new_zone; 50 } 51 52 INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) { 53 if (!asan_inited) __asan_init(); 54 return &asan_zone; 55 } 56 57 INTERCEPTOR(malloc_zone_t *, malloc_default_purgeable_zone, void) { 58 // FIXME: ASan should support purgeable allocations. 59 // https://code.google.com/p/address-sanitizer/issues/detail?id=139 60 if (!asan_inited) __asan_init(); 61 return &asan_zone; 62 } 63 64 INTERCEPTOR(void, malloc_make_purgeable, void *ptr) { 65 // FIXME: ASan should support purgeable allocations. Ignoring them is fine 66 // for now. 67 if (!asan_inited) __asan_init(); 68 } 69 70 INTERCEPTOR(int, malloc_make_nonpurgeable, void *ptr) { 71 // FIXME: ASan should support purgeable allocations. Ignoring them is fine 72 // for now. 73 if (!asan_inited) __asan_init(); 74 // Must return 0 if the contents were not purged since the last call to 75 // malloc_make_purgeable(). 76 return 0; 77 } 78 79 INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) { 80 if (!asan_inited) __asan_init(); 81 // Allocate |strlen("asan-") + 1 + internal_strlen(name)| bytes. 82 size_t buflen = 6 + (name ? internal_strlen(name) : 0); 83 InternalScopedBuffer<char> new_name(buflen); 84 if (name && zone->introspect == asan_zone.introspect) { 85 internal_snprintf(new_name.data(), buflen, "asan-%s", name); 86 name = new_name.data(); 87 } 88 89 // Call the system malloc's implementation for both external and our zones, 90 // since that appropriately changes VM region protections on the zone. 91 REAL(malloc_set_zone_name)(zone, name); 92 } 93 94 INTERCEPTOR(void *, malloc, size_t size) { 95 if (!asan_inited) __asan_init(); 96 GET_STACK_TRACE_MALLOC; 97 void *res = asan_malloc(size, &stack); 98 return res; 99 } 100 101 INTERCEPTOR(void, free, void *ptr) { 102 if (!asan_inited) __asan_init(); 103 if (!ptr) return; 104 GET_STACK_TRACE_FREE; 105 asan_free(ptr, &stack, FROM_MALLOC); 106 } 107 108 INTERCEPTOR(void *, realloc, void *ptr, size_t size) { 109 if (!asan_inited) __asan_init(); 110 GET_STACK_TRACE_MALLOC; 111 return asan_realloc(ptr, size, &stack); 112 } 113 114 INTERCEPTOR(void *, calloc, size_t nmemb, size_t size) { 115 if (!asan_inited) __asan_init(); 116 GET_STACK_TRACE_MALLOC; 117 return asan_calloc(nmemb, size, &stack); 118 } 119 120 INTERCEPTOR(void *, valloc, size_t size) { 121 if (!asan_inited) __asan_init(); 122 GET_STACK_TRACE_MALLOC; 123 return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC); 124 } 125 126 INTERCEPTOR(size_t, malloc_good_size, size_t size) { 127 if (!asan_inited) __asan_init(); 128 return asan_zone.introspect->good_size(&asan_zone, size); 129 } 130 131 INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) { 132 if (!asan_inited) __asan_init(); 133 CHECK(memptr); 134 GET_STACK_TRACE_MALLOC; 135 void *result = asan_memalign(alignment, size, &stack, FROM_MALLOC); 136 if (result) { 137 *memptr = result; 138 return 0; 139 } 140 return -1; 141 } 142 143 namespace { 144 145 // TODO(glider): the mz_* functions should be united with the Linux wrappers, 146 // as they are basically copied from there. 147 size_t mz_size(malloc_zone_t* zone, const void* ptr) { 148 return asan_mz_size(ptr); 149 } 150 151 void *mz_malloc(malloc_zone_t *zone, size_t size) { 152 if (!asan_inited) { 153 CHECK(system_malloc_zone); 154 return malloc_zone_malloc(system_malloc_zone, size); 155 } 156 GET_STACK_TRACE_MALLOC; 157 return asan_malloc(size, &stack); 158 } 159 160 void *mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) { 161 if (!asan_inited) { 162 // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. 163 const size_t kCallocPoolSize = 1024; 164 static uptr calloc_memory_for_dlsym[kCallocPoolSize]; 165 static size_t allocated; 166 size_t size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize; 167 void *mem = (void*)&calloc_memory_for_dlsym[allocated]; 168 allocated += size_in_words; 169 CHECK(allocated < kCallocPoolSize); 170 return mem; 171 } 172 GET_STACK_TRACE_MALLOC; 173 return asan_calloc(nmemb, size, &stack); 174 } 175 176 void *mz_valloc(malloc_zone_t *zone, size_t size) { 177 if (!asan_inited) { 178 CHECK(system_malloc_zone); 179 return malloc_zone_valloc(system_malloc_zone, size); 180 } 181 GET_STACK_TRACE_MALLOC; 182 return asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC); 183 } 184 185 #define GET_ZONE_FOR_PTR(ptr) \ 186 malloc_zone_t *zone_ptr = malloc_zone_from_ptr(ptr); \ 187 const char *zone_name = (zone_ptr == 0) ? 0 : zone_ptr->zone_name 188 189 void ALWAYS_INLINE free_common(void *context, void *ptr) { 190 if (!ptr) return; 191 GET_STACK_TRACE_FREE; 192 // FIXME: need to retire this flag. 193 if (!flags()->mac_ignore_invalid_free) { 194 asan_free(ptr, &stack, FROM_MALLOC); 195 } else { 196 GET_ZONE_FOR_PTR(ptr); 197 WarnMacFreeUnallocated((uptr)ptr, (uptr)zone_ptr, zone_name, &stack); 198 return; 199 } 200 } 201 202 // TODO(glider): the allocation callbacks need to be refactored. 203 void mz_free(malloc_zone_t *zone, void *ptr) { 204 free_common(zone, ptr); 205 } 206 207 void *mz_realloc(malloc_zone_t *zone, void *ptr, size_t size) { 208 if (!ptr) { 209 GET_STACK_TRACE_MALLOC; 210 return asan_malloc(size, &stack); 211 } else { 212 if (asan_mz_size(ptr)) { 213 GET_STACK_TRACE_MALLOC; 214 return asan_realloc(ptr, size, &stack); 215 } else { 216 // We can't recover from reallocating an unknown address, because 217 // this would require reading at most |size| bytes from 218 // potentially unaccessible memory. 219 GET_STACK_TRACE_FREE; 220 GET_ZONE_FOR_PTR(ptr); 221 ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack); 222 } 223 } 224 } 225 226 void mz_destroy(malloc_zone_t* zone) { 227 // A no-op -- we will not be destroyed! 228 Report("mz_destroy() called -- ignoring\n"); 229 } 230 231 // from AvailabilityMacros.h 232 #if defined(MAC_OS_X_VERSION_10_6) && \ 233 MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 234 void *mz_memalign(malloc_zone_t *zone, size_t align, size_t size) { 235 if (!asan_inited) { 236 CHECK(system_malloc_zone); 237 return malloc_zone_memalign(system_malloc_zone, align, size); 238 } 239 GET_STACK_TRACE_MALLOC; 240 return asan_memalign(align, size, &stack, FROM_MALLOC); 241 } 242 243 // This function is currently unused, and we build with -Werror. 244 #if 0 245 void mz_free_definite_size(malloc_zone_t* zone, void *ptr, size_t size) { 246 // TODO(glider): check that |size| is valid. 247 UNIMPLEMENTED(); 248 } 249 #endif 250 #endif 251 252 kern_return_t mi_enumerator(task_t task, void *, 253 unsigned type_mask, vm_address_t zone_address, 254 memory_reader_t reader, 255 vm_range_recorder_t recorder) { 256 // Should enumerate all the pointers we have. Seems like a lot of work. 257 return KERN_FAILURE; 258 } 259 260 size_t mi_good_size(malloc_zone_t *zone, size_t size) { 261 // I think it's always safe to return size, but we maybe could do better. 262 return size; 263 } 264 265 boolean_t mi_check(malloc_zone_t *zone) { 266 UNIMPLEMENTED(); 267 } 268 269 void mi_print(malloc_zone_t *zone, boolean_t verbose) { 270 UNIMPLEMENTED(); 271 } 272 273 void mi_log(malloc_zone_t *zone, void *address) { 274 // I don't think we support anything like this 275 } 276 277 void mi_force_lock(malloc_zone_t *zone) { 278 asan_mz_force_lock(); 279 } 280 281 void mi_force_unlock(malloc_zone_t *zone) { 282 asan_mz_force_unlock(); 283 } 284 285 void mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) { 286 AsanMallocStats malloc_stats; 287 asanThreadRegistry().FillMallocStatistics(&malloc_stats); 288 CHECK(sizeof(malloc_statistics_t) == sizeof(AsanMallocStats)); 289 internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t)); 290 } 291 292 #if defined(MAC_OS_X_VERSION_10_6) && \ 293 MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 294 boolean_t mi_zone_locked(malloc_zone_t *zone) { 295 // UNIMPLEMENTED(); 296 return false; 297 } 298 #endif 299 300 } // unnamed namespace 301 302 namespace __asan { 303 304 void ReplaceSystemMalloc() { 305 static malloc_introspection_t asan_introspection; 306 // Ok to use internal_memset, these places are not performance-critical. 307 internal_memset(&asan_introspection, 0, sizeof(asan_introspection)); 308 309 asan_introspection.enumerator = &mi_enumerator; 310 asan_introspection.good_size = &mi_good_size; 311 asan_introspection.check = &mi_check; 312 asan_introspection.print = &mi_print; 313 asan_introspection.log = &mi_log; 314 asan_introspection.force_lock = &mi_force_lock; 315 asan_introspection.force_unlock = &mi_force_unlock; 316 asan_introspection.statistics = &mi_statistics; 317 318 internal_memset(&asan_zone, 0, sizeof(malloc_zone_t)); 319 320 // Start with a version 4 zone which is used for OS X 10.4 and 10.5. 321 asan_zone.version = 4; 322 asan_zone.zone_name = "asan"; 323 asan_zone.size = &mz_size; 324 asan_zone.malloc = &mz_malloc; 325 asan_zone.calloc = &mz_calloc; 326 asan_zone.valloc = &mz_valloc; 327 asan_zone.free = &mz_free; 328 asan_zone.realloc = &mz_realloc; 329 asan_zone.destroy = &mz_destroy; 330 asan_zone.batch_malloc = 0; 331 asan_zone.batch_free = 0; 332 asan_zone.introspect = &asan_introspection; 333 334 // from AvailabilityMacros.h 335 #if defined(MAC_OS_X_VERSION_10_6) && \ 336 MAC_OS_X_VERSION_MAX_ALLOWED >= MAC_OS_X_VERSION_10_6 337 // Switch to version 6 on OSX 10.6 to support memalign. 338 asan_zone.version = 6; 339 asan_zone.free_definite_size = 0; 340 asan_zone.memalign = &mz_memalign; 341 asan_introspection.zone_locked = &mi_zone_locked; 342 #endif 343 344 // Register the ASan zone. 345 malloc_zone_register(&asan_zone); 346 } 347 } // namespace __asan 348 349 #endif // __APPLE__ 350