Home | History | Annotate | Download | only in process
      1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "base/process/memory.h"
      6 
      7 #include <CoreFoundation/CoreFoundation.h>
      8 #include <errno.h>
      9 #include <mach/mach.h>
     10 #include <mach/mach_vm.h>
     11 #include <malloc/malloc.h>
     12 #import <objc/runtime.h>
     13 
     14 #include <new>
     15 
     16 #include "base/lazy_instance.h"
     17 #include "base/logging.h"
     18 #include "base/mac/mac_util.h"
     19 #include "base/scoped_clear_errno.h"
     20 #include "third_party/apple_apsl/CFBase.h"
     21 #include "third_party/apple_apsl/malloc.h"
     22 
     23 #if ARCH_CPU_32_BITS
     24 #include <dlfcn.h>
     25 #include <mach-o/nlist.h>
     26 
     27 #include "base/threading/thread_local.h"
     28 #include "third_party/mach_override/mach_override.h"
     29 #endif  // ARCH_CPU_32_BITS
     30 
     31 namespace base {
     32 
     33 // These are helpers for EnableTerminationOnHeapCorruption, which is a no-op
     34 // on 64 bit Macs.
     35 #if ARCH_CPU_32_BITS
     36 namespace {
     37 
     38 // Finds the library path for malloc() and thus the libC part of libSystem,
     39 // which in Lion is in a separate image.
     40 const char* LookUpLibCPath() {
     41   const void* addr = reinterpret_cast<void*>(&malloc);
     42 
     43   Dl_info info;
     44   if (dladdr(addr, &info))
     45     return info.dli_fname;
     46 
     47   DLOG(WARNING) << "Could not find image path for malloc()";
     48   return NULL;
     49 }
     50 
     51 typedef void(*malloc_error_break_t)(void);
     52 malloc_error_break_t g_original_malloc_error_break = NULL;
     53 
     54 // Returns the function pointer for malloc_error_break. This symbol is declared
     55 // as __private_extern__ and cannot be dlsym()ed. Instead, use nlist() to
     56 // get it.
     57 malloc_error_break_t LookUpMallocErrorBreak() {
     58   const char* lib_c_path = LookUpLibCPath();
     59   if (!lib_c_path)
     60     return NULL;
     61 
     62   // Only need to look up two symbols, but nlist() requires a NULL-terminated
     63   // array and takes no count.
     64   struct nlist nl[3];
     65   bzero(&nl, sizeof(nl));
     66 
     67   // The symbol to find.
     68   nl[0].n_un.n_name = const_cast<char*>("_malloc_error_break");
     69 
     70   // A reference symbol by which the address of the desired symbol will be
     71   // calculated.
     72   nl[1].n_un.n_name = const_cast<char*>("_malloc");
     73 
     74   int rv = nlist(lib_c_path, nl);
     75   if (rv != 0 || nl[0].n_type == N_UNDF || nl[1].n_type == N_UNDF) {
     76     return NULL;
     77   }
     78 
     79   // nlist() returns addresses as offsets in the image, not the instruction
     80   // pointer in memory. Use the known in-memory address of malloc()
     81   // to compute the offset for malloc_error_break().
     82   uintptr_t reference_addr = reinterpret_cast<uintptr_t>(&malloc);
     83   reference_addr -= nl[1].n_value;
     84   reference_addr += nl[0].n_value;
     85 
     86   return reinterpret_cast<malloc_error_break_t>(reference_addr);
     87 }
     88 
     89 // Combines ThreadLocalBoolean with AutoReset.  It would be convenient
     90 // to compose ThreadLocalPointer<bool> with base::AutoReset<bool>, but that
     91 // would require allocating some storage for the bool.
     92 class ThreadLocalBooleanAutoReset {
     93  public:
     94   ThreadLocalBooleanAutoReset(ThreadLocalBoolean* tlb, bool new_value)
     95       : scoped_tlb_(tlb),
     96         original_value_(tlb->Get()) {
     97     scoped_tlb_->Set(new_value);
     98   }
     99   ~ThreadLocalBooleanAutoReset() {
    100     scoped_tlb_->Set(original_value_);
    101   }
    102 
    103  private:
    104   ThreadLocalBoolean* scoped_tlb_;
    105   bool original_value_;
    106 
    107   DISALLOW_COPY_AND_ASSIGN(ThreadLocalBooleanAutoReset);
    108 };
    109 
    110 base::LazyInstance<ThreadLocalBoolean>::Leaky
    111     g_unchecked_malloc = LAZY_INSTANCE_INITIALIZER;
    112 
    113 // NOTE(shess): This is called when the malloc library noticed that the heap
    114 // is fubar.  Avoid calls which will re-enter the malloc library.
    115 void CrMallocErrorBreak() {
    116   g_original_malloc_error_break();
    117 
    118   // Out of memory is certainly not heap corruption, and not necessarily
    119   // something for which the process should be terminated. Leave that decision
    120   // to the OOM killer.  The EBADF case comes up because the malloc library
    121   // attempts to log to ASL (syslog) before calling this code, which fails
    122   // accessing a Unix-domain socket because of sandboxing.
    123   if (errno == ENOMEM || (errno == EBADF && g_unchecked_malloc.Get().Get()))
    124     return;
    125 
    126   // A unit test checks this error message, so it needs to be in release builds.
    127   char buf[1024] =
    128       "Terminating process due to a potential for future heap corruption: "
    129       "errno=";
    130   char errnobuf[] = {
    131     '0' + ((errno / 100) % 10),
    132     '0' + ((errno / 10) % 10),
    133     '0' + (errno % 10),
    134     '\000'
    135   };
    136   COMPILE_ASSERT(ELAST <= 999, errno_too_large_to_encode);
    137   strlcat(buf, errnobuf, sizeof(buf));
    138   RAW_LOG(ERROR, buf);
    139 
    140   // Crash by writing to NULL+errno to allow analyzing errno from
    141   // crash dump info (setting a breakpad key would re-enter the malloc
    142   // library).  Max documented errno in intro(2) is actually 102, but
    143   // it really just needs to be "small" to stay on the right vm page.
    144   const int kMaxErrno = 256;
    145   char* volatile death_ptr = NULL;
    146   death_ptr += std::min(errno, kMaxErrno);
    147   *death_ptr = '!';
    148 }
    149 
    150 }  // namespace
    151 #endif  // ARCH_CPU_32_BITS
    152 
    153 void EnableTerminationOnHeapCorruption() {
    154 #if defined(ADDRESS_SANITIZER) || ARCH_CPU_64_BITS
    155   // AddressSanitizer handles heap corruption, and on 64 bit Macs, the malloc
    156   // system automatically abort()s on heap corruption.
    157   return;
    158 #else
    159   // Only override once, otherwise CrMallocErrorBreak() will recurse
    160   // to itself.
    161   if (g_original_malloc_error_break)
    162     return;
    163 
    164   malloc_error_break_t malloc_error_break = LookUpMallocErrorBreak();
    165   if (!malloc_error_break) {
    166     DLOG(WARNING) << "Could not find malloc_error_break";
    167     return;
    168   }
    169 
    170   mach_error_t err = mach_override_ptr(
    171      (void*)malloc_error_break,
    172      (void*)&CrMallocErrorBreak,
    173      (void**)&g_original_malloc_error_break);
    174 
    175   if (err != err_none)
    176     DLOG(WARNING) << "Could not override malloc_error_break; error = " << err;
    177 #endif  // defined(ADDRESS_SANITIZER) || ARCH_CPU_64_BITS
    178 }
    179 
    180 // ------------------------------------------------------------------------
    181 
    182 namespace {
    183 
    184 bool g_oom_killer_enabled;
    185 
    186 // Starting with Mac OS X 10.7, the zone allocators set up by the system are
    187 // read-only, to prevent them from being overwritten in an attack. However,
    188 // blindly unprotecting and reprotecting the zone allocators fails with
    189 // GuardMalloc because GuardMalloc sets up its zone allocator using a block of
    190 // memory in its bss. Explicit saving/restoring of the protection is required.
    191 //
    192 // This function takes a pointer to a malloc zone, de-protects it if necessary,
    193 // and returns (in the out parameters) a region of memory (if any) to be
    194 // re-protected when modifications are complete. This approach assumes that
    195 // there is no contention for the protection of this memory.
    196 void DeprotectMallocZone(ChromeMallocZone* default_zone,
    197                          mach_vm_address_t* reprotection_start,
    198                          mach_vm_size_t* reprotection_length,
    199                          vm_prot_t* reprotection_value) {
    200   mach_port_t unused;
    201   *reprotection_start = reinterpret_cast<mach_vm_address_t>(default_zone);
    202   struct vm_region_basic_info_64 info;
    203   mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
    204   kern_return_t result =
    205       mach_vm_region(mach_task_self(),
    206                      reprotection_start,
    207                      reprotection_length,
    208                      VM_REGION_BASIC_INFO_64,
    209                      reinterpret_cast<vm_region_info_t>(&info),
    210                      &count,
    211                      &unused);
    212   CHECK(result == KERN_SUCCESS);
    213 
    214   result = mach_port_deallocate(mach_task_self(), unused);
    215   CHECK(result == KERN_SUCCESS);
    216 
    217   // Does the region fully enclose the zone pointers? Possibly unwarranted
    218   // simplification used: using the size of a full version 8 malloc zone rather
    219   // than the actual smaller size if the passed-in zone is not version 8.
    220   CHECK(*reprotection_start <=
    221             reinterpret_cast<mach_vm_address_t>(default_zone));
    222   mach_vm_size_t zone_offset = reinterpret_cast<mach_vm_size_t>(default_zone) -
    223       reinterpret_cast<mach_vm_size_t>(*reprotection_start);
    224   CHECK(zone_offset + sizeof(ChromeMallocZone) <= *reprotection_length);
    225 
    226   if (info.protection & VM_PROT_WRITE) {
    227     // No change needed; the zone is already writable.
    228     *reprotection_start = 0;
    229     *reprotection_length = 0;
    230     *reprotection_value = VM_PROT_NONE;
    231   } else {
    232     *reprotection_value = info.protection;
    233     result = mach_vm_protect(mach_task_self(),
    234                              *reprotection_start,
    235                              *reprotection_length,
    236                              false,
    237                              info.protection | VM_PROT_WRITE);
    238     CHECK(result == KERN_SUCCESS);
    239   }
    240 }
    241 
    242 // === C malloc/calloc/valloc/realloc/posix_memalign ===
    243 
    244 typedef void* (*malloc_type)(struct _malloc_zone_t* zone,
    245                              size_t size);
    246 typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
    247                              size_t num_items,
    248                              size_t size);
    249 typedef void* (*valloc_type)(struct _malloc_zone_t* zone,
    250                              size_t size);
    251 typedef void (*free_type)(struct _malloc_zone_t* zone,
    252                           void* ptr);
    253 typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
    254                               void* ptr,
    255                               size_t size);
    256 typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
    257                                size_t alignment,
    258                                size_t size);
    259 
    260 malloc_type g_old_malloc;
    261 calloc_type g_old_calloc;
    262 valloc_type g_old_valloc;
    263 free_type g_old_free;
    264 realloc_type g_old_realloc;
    265 memalign_type g_old_memalign;
    266 
    267 malloc_type g_old_malloc_purgeable;
    268 calloc_type g_old_calloc_purgeable;
    269 valloc_type g_old_valloc_purgeable;
    270 free_type g_old_free_purgeable;
    271 realloc_type g_old_realloc_purgeable;
    272 memalign_type g_old_memalign_purgeable;
    273 
    274 void* oom_killer_malloc(struct _malloc_zone_t* zone,
    275                         size_t size) {
    276 #if ARCH_CPU_32_BITS
    277   ScopedClearErrno clear_errno;
    278 #endif  // ARCH_CPU_32_BITS
    279   void* result = g_old_malloc(zone, size);
    280   if (!result && size)
    281     debug::BreakDebugger();
    282   return result;
    283 }
    284 
    285 void* oom_killer_calloc(struct _malloc_zone_t* zone,
    286                         size_t num_items,
    287                         size_t size) {
    288 #if ARCH_CPU_32_BITS
    289   ScopedClearErrno clear_errno;
    290 #endif  // ARCH_CPU_32_BITS
    291   void* result = g_old_calloc(zone, num_items, size);
    292   if (!result && num_items && size)
    293     debug::BreakDebugger();
    294   return result;
    295 }
    296 
    297 void* oom_killer_valloc(struct _malloc_zone_t* zone,
    298                         size_t size) {
    299 #if ARCH_CPU_32_BITS
    300   ScopedClearErrno clear_errno;
    301 #endif  // ARCH_CPU_32_BITS
    302   void* result = g_old_valloc(zone, size);
    303   if (!result && size)
    304     debug::BreakDebugger();
    305   return result;
    306 }
    307 
    308 void oom_killer_free(struct _malloc_zone_t* zone,
    309                      void* ptr) {
    310 #if ARCH_CPU_32_BITS
    311   ScopedClearErrno clear_errno;
    312 #endif  // ARCH_CPU_32_BITS
    313   g_old_free(zone, ptr);
    314 }
    315 
    316 void* oom_killer_realloc(struct _malloc_zone_t* zone,
    317                          void* ptr,
    318                          size_t size) {
    319 #if ARCH_CPU_32_BITS
    320   ScopedClearErrno clear_errno;
    321 #endif  // ARCH_CPU_32_BITS
    322   void* result = g_old_realloc(zone, ptr, size);
    323   if (!result && size)
    324     debug::BreakDebugger();
    325   return result;
    326 }
    327 
    328 void* oom_killer_memalign(struct _malloc_zone_t* zone,
    329                           size_t alignment,
    330                           size_t size) {
    331 #if ARCH_CPU_32_BITS
    332   ScopedClearErrno clear_errno;
    333 #endif  // ARCH_CPU_32_BITS
    334   void* result = g_old_memalign(zone, alignment, size);
    335   // Only die if posix_memalign would have returned ENOMEM, since there are
    336   // other reasons why NULL might be returned (see
    337   // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
    338   if (!result && size && alignment >= sizeof(void*)
    339       && (alignment & (alignment - 1)) == 0) {
    340     debug::BreakDebugger();
    341   }
    342   return result;
    343 }
    344 
    345 void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone,
    346                                   size_t size) {
    347 #if ARCH_CPU_32_BITS
    348   ScopedClearErrno clear_errno;
    349 #endif  // ARCH_CPU_32_BITS
    350   void* result = g_old_malloc_purgeable(zone, size);
    351   if (!result && size)
    352     debug::BreakDebugger();
    353   return result;
    354 }
    355 
    356 void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
    357                                   size_t num_items,
    358                                   size_t size) {
    359 #if ARCH_CPU_32_BITS
    360   ScopedClearErrno clear_errno;
    361 #endif  // ARCH_CPU_32_BITS
    362   void* result = g_old_calloc_purgeable(zone, num_items, size);
    363   if (!result && num_items && size)
    364     debug::BreakDebugger();
    365   return result;
    366 }
    367 
    368 void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone,
    369                                   size_t size) {
    370 #if ARCH_CPU_32_BITS
    371   ScopedClearErrno clear_errno;
    372 #endif  // ARCH_CPU_32_BITS
    373   void* result = g_old_valloc_purgeable(zone, size);
    374   if (!result && size)
    375     debug::BreakDebugger();
    376   return result;
    377 }
    378 
    379 void oom_killer_free_purgeable(struct _malloc_zone_t* zone,
    380                                void* ptr) {
    381 #if ARCH_CPU_32_BITS
    382   ScopedClearErrno clear_errno;
    383 #endif  // ARCH_CPU_32_BITS
    384   g_old_free_purgeable(zone, ptr);
    385 }
    386 
    387 void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
    388                                    void* ptr,
    389                                    size_t size) {
    390 #if ARCH_CPU_32_BITS
    391   ScopedClearErrno clear_errno;
    392 #endif  // ARCH_CPU_32_BITS
    393   void* result = g_old_realloc_purgeable(zone, ptr, size);
    394   if (!result && size)
    395     debug::BreakDebugger();
    396   return result;
    397 }
    398 
    399 void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
    400                                     size_t alignment,
    401                                     size_t size) {
    402 #if ARCH_CPU_32_BITS
    403   ScopedClearErrno clear_errno;
    404 #endif  // ARCH_CPU_32_BITS
    405   void* result = g_old_memalign_purgeable(zone, alignment, size);
    406   // Only die if posix_memalign would have returned ENOMEM, since there are
    407   // other reasons why NULL might be returned (see
    408   // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
    409   if (!result && size && alignment >= sizeof(void*)
    410       && (alignment & (alignment - 1)) == 0) {
    411     debug::BreakDebugger();
    412   }
    413   return result;
    414 }
    415 
    416 // === C++ operator new ===
    417 
    418 void oom_killer_new() {
    419   debug::BreakDebugger();
    420 }
    421 
    422 // === Core Foundation CFAllocators ===
    423 
    424 bool CanGetContextForCFAllocator() {
    425   return !base::mac::IsOSLaterThanMountainLion_DontCallThis();
    426 }
    427 
    428 CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator) {
    429   if (base::mac::IsOSSnowLeopard()) {
    430     ChromeCFAllocatorLeopards* our_allocator =
    431         const_cast<ChromeCFAllocatorLeopards*>(
    432             reinterpret_cast<const ChromeCFAllocatorLeopards*>(allocator));
    433     return &our_allocator->_context;
    434   } else if (base::mac::IsOSLion() || base::mac::IsOSMountainLion()) {
    435     ChromeCFAllocatorLions* our_allocator =
    436         const_cast<ChromeCFAllocatorLions*>(
    437             reinterpret_cast<const ChromeCFAllocatorLions*>(allocator));
    438     return &our_allocator->_context;
    439   } else {
    440     return NULL;
    441   }
    442 }
    443 
    444 CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
    445 CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
    446 CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
    447 
    448 void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
    449                                             CFOptionFlags hint,
    450                                             void* info) {
    451   void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
    452   if (!result)
    453     debug::BreakDebugger();
    454   return result;
    455 }
    456 
    457 void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
    458                                     CFOptionFlags hint,
    459                                     void* info) {
    460   void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
    461   if (!result)
    462     debug::BreakDebugger();
    463   return result;
    464 }
    465 
    466 void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
    467                                          CFOptionFlags hint,
    468                                          void* info) {
    469   void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
    470   if (!result)
    471     debug::BreakDebugger();
    472   return result;
    473 }
    474 
    475 // === Cocoa NSObject allocation ===
    476 
    477 typedef id (*allocWithZone_t)(id, SEL, NSZone*);
    478 allocWithZone_t g_old_allocWithZone;
    479 
    480 id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone)
    481 {
    482   id result = g_old_allocWithZone(self, _cmd, zone);
    483   if (!result)
    484     debug::BreakDebugger();
    485   return result;
    486 }
    487 
    488 }  // namespace
    489 
    490 void* UncheckedMalloc(size_t size) {
    491   if (g_old_malloc) {
    492 #if ARCH_CPU_32_BITS
    493     ScopedClearErrno clear_errno;
    494     ThreadLocalBooleanAutoReset flag(g_unchecked_malloc.Pointer(), true);
    495 #endif  // ARCH_CPU_32_BITS
    496     return g_old_malloc(malloc_default_zone(), size);
    497   }
    498   return malloc(size);
    499 }
    500 
    501 void EnableTerminationOnOutOfMemory() {
    502   if (g_oom_killer_enabled)
    503     return;
    504 
    505   g_oom_killer_enabled = true;
    506 
    507   // === C malloc/calloc/valloc/realloc/posix_memalign ===
    508 
    509   // This approach is not perfect, as requests for amounts of memory larger than
    510   // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will
    511   // still fail with a NULL rather than dying (see
    512   // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details).
    513   // Unfortunately, it's the best we can do. Also note that this does not affect
    514   // allocations from non-default zones.
    515 
    516   CHECK(!g_old_malloc && !g_old_calloc && !g_old_valloc && !g_old_realloc &&
    517         !g_old_memalign) << "Old allocators unexpectedly non-null";
    518 
    519   CHECK(!g_old_malloc_purgeable && !g_old_calloc_purgeable &&
    520         !g_old_valloc_purgeable && !g_old_realloc_purgeable &&
    521         !g_old_memalign_purgeable) << "Old allocators unexpectedly non-null";
    522 
    523 #if !defined(ADDRESS_SANITIZER)
    524   // Don't do anything special on OOM for the malloc zones replaced by
    525   // AddressSanitizer, as modifying or protecting them may not work correctly.
    526 
    527   ChromeMallocZone* default_zone =
    528       reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
    529   ChromeMallocZone* purgeable_zone =
    530       reinterpret_cast<ChromeMallocZone*>(malloc_default_purgeable_zone());
    531 
    532   mach_vm_address_t default_reprotection_start = 0;
    533   mach_vm_size_t default_reprotection_length = 0;
    534   vm_prot_t default_reprotection_value = VM_PROT_NONE;
    535   DeprotectMallocZone(default_zone,
    536                       &default_reprotection_start,
    537                       &default_reprotection_length,
    538                       &default_reprotection_value);
    539 
    540   mach_vm_address_t purgeable_reprotection_start = 0;
    541   mach_vm_size_t purgeable_reprotection_length = 0;
    542   vm_prot_t purgeable_reprotection_value = VM_PROT_NONE;
    543   if (purgeable_zone) {
    544     DeprotectMallocZone(purgeable_zone,
    545                         &purgeable_reprotection_start,
    546                         &purgeable_reprotection_length,
    547                         &purgeable_reprotection_value);
    548   }
    549 
    550   // Default zone
    551 
    552   g_old_malloc = default_zone->malloc;
    553   g_old_calloc = default_zone->calloc;
    554   g_old_valloc = default_zone->valloc;
    555   g_old_free = default_zone->free;
    556   g_old_realloc = default_zone->realloc;
    557   CHECK(g_old_malloc && g_old_calloc && g_old_valloc && g_old_free &&
    558         g_old_realloc)
    559       << "Failed to get system allocation functions.";
    560 
    561   default_zone->malloc = oom_killer_malloc;
    562   default_zone->calloc = oom_killer_calloc;
    563   default_zone->valloc = oom_killer_valloc;
    564   default_zone->free = oom_killer_free;
    565   default_zone->realloc = oom_killer_realloc;
    566 
    567   if (default_zone->version >= 5) {
    568     g_old_memalign = default_zone->memalign;
    569     if (g_old_memalign)
    570       default_zone->memalign = oom_killer_memalign;
    571   }
    572 
    573   // Purgeable zone (if it exists)
    574 
    575   if (purgeable_zone) {
    576     g_old_malloc_purgeable = purgeable_zone->malloc;
    577     g_old_calloc_purgeable = purgeable_zone->calloc;
    578     g_old_valloc_purgeable = purgeable_zone->valloc;
    579     g_old_free_purgeable = purgeable_zone->free;
    580     g_old_realloc_purgeable = purgeable_zone->realloc;
    581     CHECK(g_old_malloc_purgeable && g_old_calloc_purgeable &&
    582           g_old_valloc_purgeable && g_old_free_purgeable &&
    583           g_old_realloc_purgeable)
    584         << "Failed to get system allocation functions.";
    585 
    586     purgeable_zone->malloc = oom_killer_malloc_purgeable;
    587     purgeable_zone->calloc = oom_killer_calloc_purgeable;
    588     purgeable_zone->valloc = oom_killer_valloc_purgeable;
    589     purgeable_zone->free = oom_killer_free_purgeable;
    590     purgeable_zone->realloc = oom_killer_realloc_purgeable;
    591 
    592     if (purgeable_zone->version >= 5) {
    593       g_old_memalign_purgeable = purgeable_zone->memalign;
    594       if (g_old_memalign_purgeable)
    595         purgeable_zone->memalign = oom_killer_memalign_purgeable;
    596     }
    597   }
    598 
    599   // Restore protection if it was active.
    600 
    601   if (default_reprotection_start) {
    602     kern_return_t result = mach_vm_protect(mach_task_self(),
    603                                            default_reprotection_start,
    604                                            default_reprotection_length,
    605                                            false,
    606                                            default_reprotection_value);
    607     CHECK(result == KERN_SUCCESS);
    608   }
    609 
    610   if (purgeable_reprotection_start) {
    611     kern_return_t result = mach_vm_protect(mach_task_self(),
    612                                            purgeable_reprotection_start,
    613                                            purgeable_reprotection_length,
    614                                            false,
    615                                            purgeable_reprotection_value);
    616     CHECK(result == KERN_SUCCESS);
    617   }
    618 #endif
    619 
    620   // === C malloc_zone_batch_malloc ===
    621 
    622   // batch_malloc is omitted because the default malloc zone's implementation
    623   // only supports batch_malloc for "tiny" allocations from the free list. It
    624   // will fail for allocations larger than "tiny", and will only allocate as
    625   // many blocks as it's able to from the free list. These factors mean that it
    626   // can return less than the requested memory even in a non-out-of-memory
    627   // situation. There's no good way to detect whether a batch_malloc failure is
    628   // due to these other factors, or due to genuine memory or address space
    629   // exhaustion. The fact that it only allocates space from the "tiny" free list
    630   // means that it's likely that a failure will not be due to memory exhaustion.
    631   // Similarly, these constraints on batch_malloc mean that callers must always
    632   // be expecting to receive less memory than was requested, even in situations
    633   // where memory pressure is not a concern. Finally, the only public interface
    634   // to batch_malloc is malloc_zone_batch_malloc, which is specific to the
    635   // system's malloc implementation. It's unlikely that anyone's even heard of
    636   // it.
    637 
    638   // === C++ operator new ===
    639 
    640   // Yes, operator new does call through to malloc, but this will catch failures
    641   // that our imperfect handling of malloc cannot.
    642 
    643   std::set_new_handler(oom_killer_new);
    644 
    645 #ifndef ADDRESS_SANITIZER
    646   // === Core Foundation CFAllocators ===
    647 
    648   // This will not catch allocation done by custom allocators, but will catch
    649   // all allocation done by system-provided ones.
    650 
    651   CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
    652         !g_old_cfallocator_malloc_zone)
    653       << "Old allocators unexpectedly non-null";
    654 
    655   bool cf_allocator_internals_known = CanGetContextForCFAllocator();
    656 
    657   if (cf_allocator_internals_known) {
    658     CFAllocatorContext* context =
    659         ContextForCFAllocator(kCFAllocatorSystemDefault);
    660     CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault.";
    661     g_old_cfallocator_system_default = context->allocate;
    662     CHECK(g_old_cfallocator_system_default)
    663         << "Failed to get kCFAllocatorSystemDefault allocation function.";
    664     context->allocate = oom_killer_cfallocator_system_default;
    665 
    666     context = ContextForCFAllocator(kCFAllocatorMalloc);
    667     CHECK(context) << "Failed to get context for kCFAllocatorMalloc.";
    668     g_old_cfallocator_malloc = context->allocate;
    669     CHECK(g_old_cfallocator_malloc)
    670         << "Failed to get kCFAllocatorMalloc allocation function.";
    671     context->allocate = oom_killer_cfallocator_malloc;
    672 
    673     context = ContextForCFAllocator(kCFAllocatorMallocZone);
    674     CHECK(context) << "Failed to get context for kCFAllocatorMallocZone.";
    675     g_old_cfallocator_malloc_zone = context->allocate;
    676     CHECK(g_old_cfallocator_malloc_zone)
    677         << "Failed to get kCFAllocatorMallocZone allocation function.";
    678     context->allocate = oom_killer_cfallocator_malloc_zone;
    679   } else {
    680     NSLog(@"Internals of CFAllocator not known; out-of-memory failures via "
    681         "CFAllocator will not result in termination. http://crbug.com/45650");
    682   }
    683 #endif
    684 
    685   // === Cocoa NSObject allocation ===
    686 
    687   // Note that both +[NSObject new] and +[NSObject alloc] call through to
    688   // +[NSObject allocWithZone:].
    689 
    690   CHECK(!g_old_allocWithZone)
    691       << "Old allocator unexpectedly non-null";
    692 
    693   Class nsobject_class = [NSObject class];
    694   Method orig_method = class_getClassMethod(nsobject_class,
    695                                             @selector(allocWithZone:));
    696   g_old_allocWithZone = reinterpret_cast<allocWithZone_t>(
    697       method_getImplementation(orig_method));
    698   CHECK(g_old_allocWithZone)
    699       << "Failed to get allocWithZone allocation function.";
    700   method_setImplementation(orig_method,
    701                            reinterpret_cast<IMP>(oom_killer_allocWithZone));
    702 }
    703 
    704 }  // namespace base
    705