1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "base/process/memory.h" 6 7 #include <CoreFoundation/CoreFoundation.h> 8 #include <errno.h> 9 #include <mach/mach.h> 10 #include <mach/mach_vm.h> 11 #include <malloc/malloc.h> 12 #import <objc/runtime.h> 13 14 #include <new> 15 16 #include "base/lazy_instance.h" 17 #include "base/logging.h" 18 #include "base/mac/mac_util.h" 19 #include "base/mac/mach_logging.h" 20 #include "base/scoped_clear_errno.h" 21 #include "third_party/apple_apsl/CFBase.h" 22 #include "third_party/apple_apsl/malloc.h" 23 24 #if ARCH_CPU_32_BITS 25 #include <dlfcn.h> 26 #include <mach-o/nlist.h> 27 28 #include "base/threading/thread_local.h" 29 #include "third_party/mach_override/mach_override.h" 30 #endif // ARCH_CPU_32_BITS 31 32 namespace base { 33 34 // These are helpers for EnableTerminationOnHeapCorruption, which is a no-op 35 // on 64 bit Macs. 36 #if ARCH_CPU_32_BITS 37 namespace { 38 39 // Finds the library path for malloc() and thus the libC part of libSystem, 40 // which in Lion is in a separate image. 41 const char* LookUpLibCPath() { 42 const void* addr = reinterpret_cast<void*>(&malloc); 43 44 Dl_info info; 45 if (dladdr(addr, &info)) 46 return info.dli_fname; 47 48 DLOG(WARNING) << "Could not find image path for malloc()"; 49 return NULL; 50 } 51 52 typedef void(*malloc_error_break_t)(void); 53 malloc_error_break_t g_original_malloc_error_break = NULL; 54 55 // Returns the function pointer for malloc_error_break. This symbol is declared 56 // as __private_extern__ and cannot be dlsym()ed. Instead, use nlist() to 57 // get it. 58 malloc_error_break_t LookUpMallocErrorBreak() { 59 const char* lib_c_path = LookUpLibCPath(); 60 if (!lib_c_path) 61 return NULL; 62 63 // Only need to look up two symbols, but nlist() requires a NULL-terminated 64 // array and takes no count. 65 struct nlist nl[3]; 66 bzero(&nl, sizeof(nl)); 67 68 // The symbol to find. 69 nl[0].n_un.n_name = const_cast<char*>("_malloc_error_break"); 70 71 // A reference symbol by which the address of the desired symbol will be 72 // calculated. 73 nl[1].n_un.n_name = const_cast<char*>("_malloc"); 74 75 int rv = nlist(lib_c_path, nl); 76 if (rv != 0 || nl[0].n_type == N_UNDF || nl[1].n_type == N_UNDF) { 77 return NULL; 78 } 79 80 // nlist() returns addresses as offsets in the image, not the instruction 81 // pointer in memory. Use the known in-memory address of malloc() 82 // to compute the offset for malloc_error_break(). 83 uintptr_t reference_addr = reinterpret_cast<uintptr_t>(&malloc); 84 reference_addr -= nl[1].n_value; 85 reference_addr += nl[0].n_value; 86 87 return reinterpret_cast<malloc_error_break_t>(reference_addr); 88 } 89 90 // Combines ThreadLocalBoolean with AutoReset. It would be convenient 91 // to compose ThreadLocalPointer<bool> with base::AutoReset<bool>, but that 92 // would require allocating some storage for the bool. 93 class ThreadLocalBooleanAutoReset { 94 public: 95 ThreadLocalBooleanAutoReset(ThreadLocalBoolean* tlb, bool new_value) 96 : scoped_tlb_(tlb), 97 original_value_(tlb->Get()) { 98 scoped_tlb_->Set(new_value); 99 } 100 ~ThreadLocalBooleanAutoReset() { 101 scoped_tlb_->Set(original_value_); 102 } 103 104 private: 105 ThreadLocalBoolean* scoped_tlb_; 106 bool original_value_; 107 108 DISALLOW_COPY_AND_ASSIGN(ThreadLocalBooleanAutoReset); 109 }; 110 111 base::LazyInstance<ThreadLocalBoolean>::Leaky 112 g_unchecked_alloc = LAZY_INSTANCE_INITIALIZER; 113 114 // NOTE(shess): This is called when the malloc library noticed that the heap 115 // is fubar. Avoid calls which will re-enter the malloc library. 116 void CrMallocErrorBreak() { 117 g_original_malloc_error_break(); 118 119 // Out of memory is certainly not heap corruption, and not necessarily 120 // something for which the process should be terminated. Leave that decision 121 // to the OOM killer. 122 if (errno == ENOMEM) 123 return; 124 125 // The malloc library attempts to log to ASL (syslog) before calling this 126 // code, which fails accessing a Unix-domain socket when sandboxed. The 127 // failed socket results in writing to a -1 fd, leaving EBADF in errno. If 128 // UncheckedMalloc() is on the stack, for large allocations (15k and up) only 129 // an OOM failure leads here. Smaller allocations could also arrive here due 130 // to freelist corruption, but there is no way to distinguish that from OOM at 131 // this point. 132 // 133 // NOTE(shess): I hypothesize that EPERM case in 10.9 is the same root cause 134 // as EBADF. Unfortunately, 10.9's opensource releases don't include malloc 135 // source code at this time. 136 // <http://crbug.com/312234> 137 if ((errno == EBADF || errno == EPERM) && g_unchecked_alloc.Get().Get()) 138 return; 139 140 // A unit test checks this error message, so it needs to be in release builds. 141 char buf[1024] = 142 "Terminating process due to a potential for future heap corruption: " 143 "errno="; 144 char errnobuf[] = { 145 '0' + ((errno / 100) % 10), 146 '0' + ((errno / 10) % 10), 147 '0' + (errno % 10), 148 '\000' 149 }; 150 COMPILE_ASSERT(ELAST <= 999, errno_too_large_to_encode); 151 strlcat(buf, errnobuf, sizeof(buf)); 152 RAW_LOG(ERROR, buf); 153 154 // Crash by writing to NULL+errno to allow analyzing errno from 155 // crash dump info (setting a breakpad key would re-enter the malloc 156 // library). Max documented errno in intro(2) is actually 102, but 157 // it really just needs to be "small" to stay on the right vm page. 158 const int kMaxErrno = 256; 159 char* volatile death_ptr = NULL; 160 death_ptr += std::min(errno, kMaxErrno); 161 *death_ptr = '!'; 162 } 163 164 } // namespace 165 #endif // ARCH_CPU_32_BITS 166 167 void EnableTerminationOnHeapCorruption() { 168 #if defined(ADDRESS_SANITIZER) || ARCH_CPU_64_BITS 169 // AddressSanitizer handles heap corruption, and on 64 bit Macs, the malloc 170 // system automatically abort()s on heap corruption. 171 return; 172 #else 173 // Only override once, otherwise CrMallocErrorBreak() will recurse 174 // to itself. 175 if (g_original_malloc_error_break) 176 return; 177 178 malloc_error_break_t malloc_error_break = LookUpMallocErrorBreak(); 179 if (!malloc_error_break) { 180 DLOG(WARNING) << "Could not find malloc_error_break"; 181 return; 182 } 183 184 mach_error_t err = mach_override_ptr( 185 (void*)malloc_error_break, 186 (void*)&CrMallocErrorBreak, 187 (void**)&g_original_malloc_error_break); 188 189 if (err != err_none) 190 DLOG(WARNING) << "Could not override malloc_error_break; error = " << err; 191 #endif // defined(ADDRESS_SANITIZER) || ARCH_CPU_64_BITS 192 } 193 194 // ------------------------------------------------------------------------ 195 196 namespace { 197 198 bool g_oom_killer_enabled; 199 200 // Starting with Mac OS X 10.7, the zone allocators set up by the system are 201 // read-only, to prevent them from being overwritten in an attack. However, 202 // blindly unprotecting and reprotecting the zone allocators fails with 203 // GuardMalloc because GuardMalloc sets up its zone allocator using a block of 204 // memory in its bss. Explicit saving/restoring of the protection is required. 205 // 206 // This function takes a pointer to a malloc zone, de-protects it if necessary, 207 // and returns (in the out parameters) a region of memory (if any) to be 208 // re-protected when modifications are complete. This approach assumes that 209 // there is no contention for the protection of this memory. 210 void DeprotectMallocZone(ChromeMallocZone* default_zone, 211 mach_vm_address_t* reprotection_start, 212 mach_vm_size_t* reprotection_length, 213 vm_prot_t* reprotection_value) { 214 mach_port_t unused; 215 *reprotection_start = reinterpret_cast<mach_vm_address_t>(default_zone); 216 struct vm_region_basic_info_64 info; 217 mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64; 218 kern_return_t result = 219 mach_vm_region(mach_task_self(), 220 reprotection_start, 221 reprotection_length, 222 VM_REGION_BASIC_INFO_64, 223 reinterpret_cast<vm_region_info_t>(&info), 224 &count, 225 &unused); 226 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_region"; 227 228 // The kernel always returns a null object for VM_REGION_BASIC_INFO_64, but 229 // balance it with a deallocate in case this ever changes. See 10.9.2 230 // xnu-2422.90.20/osfmk/vm/vm_map.c vm_map_region. 231 mach_port_deallocate(mach_task_self(), unused); 232 233 // Does the region fully enclose the zone pointers? Possibly unwarranted 234 // simplification used: using the size of a full version 8 malloc zone rather 235 // than the actual smaller size if the passed-in zone is not version 8. 236 CHECK(*reprotection_start <= 237 reinterpret_cast<mach_vm_address_t>(default_zone)); 238 mach_vm_size_t zone_offset = reinterpret_cast<mach_vm_size_t>(default_zone) - 239 reinterpret_cast<mach_vm_size_t>(*reprotection_start); 240 CHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length); 241 242 if (info.protection & VM_PROT_WRITE) { 243 // No change needed; the zone is already writable. 244 *reprotection_start = 0; 245 *reprotection_length = 0; 246 *reprotection_value = VM_PROT_NONE; 247 } else { 248 *reprotection_value = info.protection; 249 result = mach_vm_protect(mach_task_self(), 250 *reprotection_start, 251 *reprotection_length, 252 false, 253 info.protection | VM_PROT_WRITE); 254 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect"; 255 } 256 } 257 258 // === C malloc/calloc/valloc/realloc/posix_memalign === 259 260 typedef void* (*malloc_type)(struct _malloc_zone_t* zone, 261 size_t size); 262 typedef void* (*calloc_type)(struct _malloc_zone_t* zone, 263 size_t num_items, 264 size_t size); 265 typedef void* (*valloc_type)(struct _malloc_zone_t* zone, 266 size_t size); 267 typedef void (*free_type)(struct _malloc_zone_t* zone, 268 void* ptr); 269 typedef void* (*realloc_type)(struct _malloc_zone_t* zone, 270 void* ptr, 271 size_t size); 272 typedef void* (*memalign_type)(struct _malloc_zone_t* zone, 273 size_t alignment, 274 size_t size); 275 276 malloc_type g_old_malloc; 277 calloc_type g_old_calloc; 278 valloc_type g_old_valloc; 279 free_type g_old_free; 280 realloc_type g_old_realloc; 281 memalign_type g_old_memalign; 282 283 malloc_type g_old_malloc_purgeable; 284 calloc_type g_old_calloc_purgeable; 285 valloc_type g_old_valloc_purgeable; 286 free_type g_old_free_purgeable; 287 realloc_type g_old_realloc_purgeable; 288 memalign_type g_old_memalign_purgeable; 289 290 void* oom_killer_malloc(struct _malloc_zone_t* zone, 291 size_t size) { 292 #if ARCH_CPU_32_BITS 293 ScopedClearErrno clear_errno; 294 #endif // ARCH_CPU_32_BITS 295 void* result = g_old_malloc(zone, size); 296 if (!result && size) 297 debug::BreakDebugger(); 298 return result; 299 } 300 301 void* oom_killer_calloc(struct _malloc_zone_t* zone, 302 size_t num_items, 303 size_t size) { 304 #if ARCH_CPU_32_BITS 305 ScopedClearErrno clear_errno; 306 #endif // ARCH_CPU_32_BITS 307 void* result = g_old_calloc(zone, num_items, size); 308 if (!result && num_items && size) 309 debug::BreakDebugger(); 310 return result; 311 } 312 313 void* oom_killer_valloc(struct _malloc_zone_t* zone, 314 size_t size) { 315 #if ARCH_CPU_32_BITS 316 ScopedClearErrno clear_errno; 317 #endif // ARCH_CPU_32_BITS 318 void* result = g_old_valloc(zone, size); 319 if (!result && size) 320 debug::BreakDebugger(); 321 return result; 322 } 323 324 void oom_killer_free(struct _malloc_zone_t* zone, 325 void* ptr) { 326 #if ARCH_CPU_32_BITS 327 ScopedClearErrno clear_errno; 328 #endif // ARCH_CPU_32_BITS 329 g_old_free(zone, ptr); 330 } 331 332 void* oom_killer_realloc(struct _malloc_zone_t* zone, 333 void* ptr, 334 size_t size) { 335 #if ARCH_CPU_32_BITS 336 ScopedClearErrno clear_errno; 337 #endif // ARCH_CPU_32_BITS 338 void* result = g_old_realloc(zone, ptr, size); 339 if (!result && size) 340 debug::BreakDebugger(); 341 return result; 342 } 343 344 void* oom_killer_memalign(struct _malloc_zone_t* zone, 345 size_t alignment, 346 size_t size) { 347 #if ARCH_CPU_32_BITS 348 ScopedClearErrno clear_errno; 349 #endif // ARCH_CPU_32_BITS 350 void* result = g_old_memalign(zone, alignment, size); 351 // Only die if posix_memalign would have returned ENOMEM, since there are 352 // other reasons why NULL might be returned (see 353 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ). 354 if (!result && size && alignment >= sizeof(void*) 355 && (alignment & (alignment - 1)) == 0) { 356 debug::BreakDebugger(); 357 } 358 return result; 359 } 360 361 void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone, 362 size_t size) { 363 #if ARCH_CPU_32_BITS 364 ScopedClearErrno clear_errno; 365 #endif // ARCH_CPU_32_BITS 366 void* result = g_old_malloc_purgeable(zone, size); 367 if (!result && size) 368 debug::BreakDebugger(); 369 return result; 370 } 371 372 void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone, 373 size_t num_items, 374 size_t size) { 375 #if ARCH_CPU_32_BITS 376 ScopedClearErrno clear_errno; 377 #endif // ARCH_CPU_32_BITS 378 void* result = g_old_calloc_purgeable(zone, num_items, size); 379 if (!result && num_items && size) 380 debug::BreakDebugger(); 381 return result; 382 } 383 384 void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone, 385 size_t size) { 386 #if ARCH_CPU_32_BITS 387 ScopedClearErrno clear_errno; 388 #endif // ARCH_CPU_32_BITS 389 void* result = g_old_valloc_purgeable(zone, size); 390 if (!result && size) 391 debug::BreakDebugger(); 392 return result; 393 } 394 395 void oom_killer_free_purgeable(struct _malloc_zone_t* zone, 396 void* ptr) { 397 #if ARCH_CPU_32_BITS 398 ScopedClearErrno clear_errno; 399 #endif // ARCH_CPU_32_BITS 400 g_old_free_purgeable(zone, ptr); 401 } 402 403 void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone, 404 void* ptr, 405 size_t size) { 406 #if ARCH_CPU_32_BITS 407 ScopedClearErrno clear_errno; 408 #endif // ARCH_CPU_32_BITS 409 void* result = g_old_realloc_purgeable(zone, ptr, size); 410 if (!result && size) 411 debug::BreakDebugger(); 412 return result; 413 } 414 415 void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone, 416 size_t alignment, 417 size_t size) { 418 #if ARCH_CPU_32_BITS 419 ScopedClearErrno clear_errno; 420 #endif // ARCH_CPU_32_BITS 421 void* result = g_old_memalign_purgeable(zone, alignment, size); 422 // Only die if posix_memalign would have returned ENOMEM, since there are 423 // other reasons why NULL might be returned (see 424 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ). 425 if (!result && size && alignment >= sizeof(void*) 426 && (alignment & (alignment - 1)) == 0) { 427 debug::BreakDebugger(); 428 } 429 return result; 430 } 431 432 // === C++ operator new === 433 434 void oom_killer_new() { 435 debug::BreakDebugger(); 436 } 437 438 // === Core Foundation CFAllocators === 439 440 bool CanGetContextForCFAllocator() { 441 return !base::mac::IsOSLaterThanYosemite_DontCallThis(); 442 } 443 444 CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) { 445 if (base::mac::IsOSSnowLeopard()) { 446 ChromeCFAllocatorLeopards* our_allocator = 447 const_cast<ChromeCFAllocatorLeopards*>( 448 reinterpret_cast<const ChromeCFAllocatorLeopards*>(allocator)); 449 return &our_allocator->_context; 450 } else if (base::mac::IsOSLion() || 451 base::mac::IsOSMountainLion() || 452 base::mac::IsOSMavericks() || 453 base::mac::IsOSYosemite()) { 454 ChromeCFAllocatorLions* our_allocator = 455 const_cast<ChromeCFAllocatorLions*>( 456 reinterpret_cast<const ChromeCFAllocatorLions*>(allocator)); 457 return &our_allocator->_context; 458 } else { 459 return NULL; 460 } 461 } 462 463 CFAllocatorAllocateCallBack g_old_cfallocator_system_default; 464 CFAllocatorAllocateCallBack g_old_cfallocator_malloc; 465 CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone; 466 467 void* oom_killer_cfallocator_system_default(CFIndex alloc_size, 468 CFOptionFlags hint, 469 void* info) { 470 void* result = g_old_cfallocator_system_default(alloc_size, hint, info); 471 if (!result) 472 debug::BreakDebugger(); 473 return result; 474 } 475 476 void* oom_killer_cfallocator_malloc(CFIndex alloc_size, 477 CFOptionFlags hint, 478 void* info) { 479 void* result = g_old_cfallocator_malloc(alloc_size, hint, info); 480 if (!result) 481 debug::BreakDebugger(); 482 return result; 483 } 484 485 void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size, 486 CFOptionFlags hint, 487 void* info) { 488 void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info); 489 if (!result) 490 debug::BreakDebugger(); 491 return result; 492 } 493 494 // === Cocoa NSObject allocation === 495 496 typedef id (*allocWithZone_t)(id, SEL, NSZone*); 497 allocWithZone_t g_old_allocWithZone; 498 499 id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone) 500 { 501 id result = g_old_allocWithZone(self, _cmd, zone); 502 if (!result) 503 debug::BreakDebugger(); 504 return result; 505 } 506 507 } // namespace 508 509 bool UncheckedMalloc(size_t size, void** result) { 510 if (g_old_malloc) { 511 #if ARCH_CPU_32_BITS 512 ScopedClearErrno clear_errno; 513 ThreadLocalBooleanAutoReset flag(g_unchecked_alloc.Pointer(), true); 514 #endif // ARCH_CPU_32_BITS 515 *result = g_old_malloc(malloc_default_zone(), size); 516 } else { 517 *result = malloc(size); 518 } 519 520 return *result != NULL; 521 } 522 523 bool UncheckedCalloc(size_t num_items, size_t size, void** result) { 524 if (g_old_calloc) { 525 #if ARCH_CPU_32_BITS 526 ScopedClearErrno clear_errno; 527 ThreadLocalBooleanAutoReset flag(g_unchecked_alloc.Pointer(), true); 528 #endif // ARCH_CPU_32_BITS 529 *result = g_old_calloc(malloc_default_zone(), num_items, size); 530 } else { 531 *result = calloc(num_items, size); 532 } 533 534 return *result != NULL; 535 } 536 537 void* UncheckedMalloc(size_t size) { 538 void* address; 539 return UncheckedMalloc(size, &address) ? address : NULL; 540 } 541 542 void* UncheckedCalloc(size_t num_items, size_t size) { 543 void* address; 544 return UncheckedCalloc(num_items, size, &address) ? address : NULL; 545 } 546 547 void EnableTerminationOnOutOfMemory() { 548 if (g_oom_killer_enabled) 549 return; 550 551 g_oom_killer_enabled = true; 552 553 // === C malloc/calloc/valloc/realloc/posix_memalign === 554 555 // This approach is not perfect, as requests for amounts of memory larger than 556 // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will 557 // still fail with a NULL rather than dying (see 558 // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details). 559 // Unfortunately, it's the best we can do. Also note that this does not affect 560 // allocations from non-default zones. 561 562 CHECK(!g_old_malloc && !g_old_calloc && !g_old_valloc && !g_old_realloc && 563 !g_old_memalign) << "Old allocators unexpectedly non-null"; 564 565 CHECK(!g_old_malloc_purgeable && !g_old_calloc_purgeable && 566 !g_old_valloc_purgeable && !g_old_realloc_purgeable && 567 !g_old_memalign_purgeable) << "Old allocators unexpectedly non-null"; 568 569 #if !defined(ADDRESS_SANITIZER) 570 // Don't do anything special on OOM for the malloc zones replaced by 571 // AddressSanitizer, as modifying or protecting them may not work correctly. 572 573 ChromeMallocZone* default_zone = 574 reinterpret_cast<ChromeMallocZone*>(malloc_default_zone()); 575 ChromeMallocZone* purgeable_zone = 576 reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone()); 577 578 mach_vm_address_t default_reprotection_start = 0; 579 mach_vm_size_t default_reprotection_length = 0; 580 vm_prot_t default_reprotection_value = VM_PROT_NONE; 581 DeprotectMallocZone(default_zone, 582 &default_reprotection_start, 583 &default_reprotection_length, 584 &default_reprotection_value); 585 586 mach_vm_address_t purgeable_reprotection_start = 0; 587 mach_vm_size_t purgeable_reprotection_length = 0; 588 vm_prot_t purgeable_reprotection_value = VM_PROT_NONE; 589 if (purgeable_zone) { 590 DeprotectMallocZone(purgeable_zone, 591 &purgeable_reprotection_start, 592 &purgeable_reprotection_length, 593 &purgeable_reprotection_value); 594 } 595 596 // Default zone 597 598 g_old_malloc = default_zone->malloc; 599 g_old_calloc = default_zone->calloc; 600 g_old_valloc = default_zone->valloc; 601 g_old_free = default_zone->free; 602 g_old_realloc = default_zone->realloc; 603 CHECK(g_old_malloc && g_old_calloc && g_old_valloc && g_old_free && 604 g_old_realloc) 605 << "Failed to get system allocation functions."; 606 607 default_zone->malloc = oom_killer_malloc; 608 default_zone->calloc = oom_killer_calloc; 609 default_zone->valloc = oom_killer_valloc; 610 default_zone->free = oom_killer_free; 611 default_zone->realloc = oom_killer_realloc; 612 613 if (default_zone->version >= 5) { 614 g_old_memalign = default_zone->memalign; 615 if (g_old_memalign) 616 default_zone->memalign = oom_killer_memalign; 617 } 618 619 // Purgeable zone (if it exists) 620 621 if (purgeable_zone) { 622 g_old_malloc_purgeable = purgeable_zone->malloc; 623 g_old_calloc_purgeable = purgeable_zone->calloc; 624 g_old_valloc_purgeable = purgeable_zone->valloc; 625 g_old_free_purgeable = purgeable_zone->free; 626 g_old_realloc_purgeable = purgeable_zone->realloc; 627 CHECK(g_old_malloc_purgeable && g_old_calloc_purgeable && 628 g_old_valloc_purgeable && g_old_free_purgeable && 629 g_old_realloc_purgeable) 630 << "Failed to get system allocation functions."; 631 632 purgeable_zone->malloc = oom_killer_malloc_purgeable; 633 purgeable_zone->calloc = oom_killer_calloc_purgeable; 634 purgeable_zone->valloc = oom_killer_valloc_purgeable; 635 purgeable_zone->free = oom_killer_free_purgeable; 636 purgeable_zone->realloc = oom_killer_realloc_purgeable; 637 638 if (purgeable_zone->version >= 5) { 639 g_old_memalign_purgeable = purgeable_zone->memalign; 640 if (g_old_memalign_purgeable) 641 purgeable_zone->memalign = oom_killer_memalign_purgeable; 642 } 643 } 644 645 // Restore protection if it was active. 646 647 if (default_reprotection_start) { 648 kern_return_t result = mach_vm_protect(mach_task_self(), 649 default_reprotection_start, 650 default_reprotection_length, 651 false, 652 default_reprotection_value); 653 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect"; 654 } 655 656 if (purgeable_reprotection_start) { 657 kern_return_t result = mach_vm_protect(mach_task_self(), 658 purgeable_reprotection_start, 659 purgeable_reprotection_length, 660 false, 661 purgeable_reprotection_value); 662 MACH_CHECK(result == KERN_SUCCESS, result) << "mach_vm_protect"; 663 } 664 #endif 665 666 // === C malloc_zone_batch_malloc === 667 668 // batch_malloc is omitted because the default malloc zone's implementation 669 // only supports batch_malloc for "tiny" allocations from the free list. It 670 // will fail for allocations larger than "tiny", and will only allocate as 671 // many blocks as it's able to from the free list. These factors mean that it 672 // can return less than the requested memory even in a non-out-of-memory 673 // situation. There's no good way to detect whether a batch_malloc failure is 674 // due to these other factors, or due to genuine memory or address space 675 // exhaustion. The fact that it only allocates space from the "tiny" free list 676 // means that it's likely that a failure will not be due to memory exhaustion. 677 // Similarly, these constraints on batch_malloc mean that callers must always 678 // be expecting to receive less memory than was requested, even in situations 679 // where memory pressure is not a concern. Finally, the only public interface 680 // to batch_malloc is malloc_zone_batch_malloc, which is specific to the 681 // system's malloc implementation. It's unlikely that anyone's even heard of 682 // it. 683 684 // === C++ operator new === 685 686 // Yes, operator new does call through to malloc, but this will catch failures 687 // that our imperfect handling of malloc cannot. 688 689 std::set_new_handler(oom_killer_new); 690 691 #ifndef ADDRESS_SANITIZER 692 // === Core Foundation CFAllocators === 693 694 // This will not catch allocation done by custom allocators, but will catch 695 // all allocation done by system-provided ones. 696 697 CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc && 698 !g_old_cfallocator_malloc_zone) 699 << "Old allocators unexpectedly non-null"; 700 701 bool cf_allocator_internals_known = CanGetContextForCFAllocator(); 702 703 if (cf_allocator_internals_known) { 704 CFAllocatorContext* context = 705 ContextForCFAllocator(kCFAllocatorSystemDefault); 706 CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault."; 707 g_old_cfallocator_system_default = context->allocate; 708 CHECK(g_old_cfallocator_system_default) 709 << "Failed to get kCFAllocatorSystemDefault allocation function."; 710 context->allocate = oom_killer_cfallocator_system_default; 711 712 context = ContextForCFAllocator(kCFAllocatorMalloc); 713 CHECK(context) << "Failed to get context for kCFAllocatorMalloc."; 714 g_old_cfallocator_malloc = context->allocate; 715 CHECK(g_old_cfallocator_malloc) 716 << "Failed to get kCFAllocatorMalloc allocation function."; 717 context->allocate = oom_killer_cfallocator_malloc; 718 719 context = ContextForCFAllocator(kCFAllocatorMallocZone); 720 CHECK(context) << "Failed to get context for kCFAllocatorMallocZone."; 721 g_old_cfallocator_malloc_zone = context->allocate; 722 CHECK(g_old_cfallocator_malloc_zone) 723 << "Failed to get kCFAllocatorMallocZone allocation function."; 724 context->allocate = oom_killer_cfallocator_malloc_zone; 725 } else { 726 DLOG(WARNING) << "Internals of CFAllocator not known; out-of-memory " 727 "failures via CFAllocator will not result in termination. " 728 "http://crbug.com/45650"; 729 } 730 #endif 731 732 // === Cocoa NSObject allocation === 733 734 // Note that both +[NSObject new] and +[NSObject alloc] call through to 735 // +[NSObject allocWithZone:]. 736 737 CHECK(!g_old_allocWithZone) 738 << "Old allocator unexpectedly non-null"; 739 740 Class nsobject_class = [NSObject class]; 741 Method orig_method = class_getClassMethod(nsobject_class, 742 @selector(allocWithZone:)); 743 g_old_allocWithZone = reinterpret_cast<allocWithZone_t>( 744 method_getImplementation(orig_method)); 745 CHECK(g_old_allocWithZone) 746 << "Failed to get allocWithZone allocation function."; 747 method_setImplementation(orig_method, 748 reinterpret_cast<IMP>(oom_killer_allocWithZone)); 749 } 750 751 } // namespace base 752