Home | History | Annotate | Download | only in base
      1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 
      6 #include "base/process_util.h"
      7 
      8 #import <Cocoa/Cocoa.h>
      9 #include <crt_externs.h>
     10 #include <dlfcn.h>
     11 #include <mach/mach.h>
     12 #include <mach/mach_init.h>
     13 #include <mach/mach_vm.h>
     14 #include <mach/shared_region.h>
     15 #include <mach/task.h>
     16 #include <malloc/malloc.h>
     17 #import <objc/runtime.h>
     18 #include <spawn.h>
     19 #include <sys/mman.h>
     20 #include <sys/sysctl.h>
     21 #include <sys/types.h>
     22 #include <sys/utsname.h>
     23 #include <sys/wait.h>
     24 
     25 #include <new>
     26 #include <string>
     27 
     28 #include "base/debug/debugger.h"
     29 #include "base/eintr_wrapper.h"
     30 #include "base/hash_tables.h"
     31 #include "base/logging.h"
     32 #include "base/string_util.h"
     33 #include "base/sys_info.h"
     34 #include "base/sys_string_conversions.h"
     35 #include "base/time.h"
     36 #include "third_party/apple_apsl/CFBase.h"
     37 #include "third_party/apple_apsl/malloc.h"
     38 
     39 namespace base {
     40 
     41 void RestoreDefaultExceptionHandler() {
     42   // This function is tailored to remove the Breakpad exception handler.
     43   // exception_mask matches s_exception_mask in
     44   // breakpad/src/client/mac/handler/exception_handler.cc
     45   const exception_mask_t exception_mask = EXC_MASK_BAD_ACCESS |
     46                                           EXC_MASK_BAD_INSTRUCTION |
     47                                           EXC_MASK_ARITHMETIC |
     48                                           EXC_MASK_BREAKPOINT;
     49 
     50   // Setting the exception port to MACH_PORT_NULL may not be entirely
     51   // kosher to restore the default exception handler, but in practice,
     52   // it results in the exception port being set to Apple Crash Reporter,
     53   // the desired behavior.
     54   task_set_exception_ports(mach_task_self(), exception_mask, MACH_PORT_NULL,
     55                            EXCEPTION_DEFAULT, THREAD_STATE_NONE);
     56 }
     57 
     58 ProcessIterator::ProcessIterator(const ProcessFilter* filter)
     59     : index_of_kinfo_proc_(0),
     60       filter_(filter) {
     61   // Get a snapshot of all of my processes (yes, as we loop it can go stale, but
     62   // but trying to find where we were in a constantly changing list is basically
     63   // impossible.
     64 
     65   int mib[] = { CTL_KERN, KERN_PROC, KERN_PROC_UID, geteuid() };
     66 
     67   // Since more processes could start between when we get the size and when
     68   // we get the list, we do a loop to keep trying until we get it.
     69   bool done = false;
     70   int try_num = 1;
     71   const int max_tries = 10;
     72   do {
     73     // Get the size of the buffer
     74     size_t len = 0;
     75     if (sysctl(mib, arraysize(mib), NULL, &len, NULL, 0) < 0) {
     76       LOG(ERROR) << "failed to get the size needed for the process list";
     77       kinfo_procs_.resize(0);
     78       done = true;
     79     } else {
     80       size_t num_of_kinfo_proc = len / sizeof(struct kinfo_proc);
     81       // Leave some spare room for process table growth (more could show up
     82       // between when we check and now)
     83       num_of_kinfo_proc += 16;
     84       kinfo_procs_.resize(num_of_kinfo_proc);
     85       len = num_of_kinfo_proc * sizeof(struct kinfo_proc);
     86       // Load the list of processes
     87       if (sysctl(mib, arraysize(mib), &kinfo_procs_[0], &len, NULL, 0) < 0) {
     88         // If we get a mem error, it just means we need a bigger buffer, so
     89         // loop around again.  Anything else is a real error and give up.
     90         if (errno != ENOMEM) {
     91           LOG(ERROR) << "failed to get the process list";
     92           kinfo_procs_.resize(0);
     93           done = true;
     94         }
     95       } else {
     96         // Got the list, just make sure we're sized exactly right
     97         size_t num_of_kinfo_proc = len / sizeof(struct kinfo_proc);
     98         kinfo_procs_.resize(num_of_kinfo_proc);
     99         done = true;
    100       }
    101     }
    102   } while (!done && (try_num++ < max_tries));
    103 
    104   if (!done) {
    105     LOG(ERROR) << "failed to collect the process list in a few tries";
    106     kinfo_procs_.resize(0);
    107   }
    108 }
    109 
    110 ProcessIterator::~ProcessIterator() {
    111 }
    112 
    113 bool ProcessIterator::CheckForNextProcess() {
    114   std::string data;
    115   for (; index_of_kinfo_proc_ < kinfo_procs_.size(); ++index_of_kinfo_proc_) {
    116     kinfo_proc& kinfo = kinfo_procs_[index_of_kinfo_proc_];
    117 
    118     // Skip processes just awaiting collection
    119     if ((kinfo.kp_proc.p_pid > 0) && (kinfo.kp_proc.p_stat == SZOMB))
    120       continue;
    121 
    122     int mib[] = { CTL_KERN, KERN_PROCARGS, kinfo.kp_proc.p_pid };
    123 
    124     // Find out what size buffer we need.
    125     size_t data_len = 0;
    126     if (sysctl(mib, arraysize(mib), NULL, &data_len, NULL, 0) < 0) {
    127       DVPLOG(1) << "failed to figure out the buffer size for a commandline";
    128       continue;
    129     }
    130 
    131     data.resize(data_len);
    132     if (sysctl(mib, arraysize(mib), &data[0], &data_len, NULL, 0) < 0) {
    133       DVPLOG(1) << "failed to fetch a commandline";
    134       continue;
    135     }
    136 
    137     // |data| contains all the command line parameters of the process, separated
    138     // by blocks of one or more null characters. We tokenize |data| into a
    139     // vector of strings using '\0' as a delimiter and populate
    140     // |entry_.cmd_line_args_|.
    141     std::string delimiters;
    142     delimiters.push_back('\0');
    143     Tokenize(data, delimiters, &entry_.cmd_line_args_);
    144 
    145     // |data| starts with the full executable path followed by a null character.
    146     // We search for the first instance of '\0' and extract everything before it
    147     // to populate |entry_.exe_file_|.
    148     size_t exec_name_end = data.find('\0');
    149     if (exec_name_end == std::string::npos) {
    150       LOG(ERROR) << "command line data didn't match expected format";
    151       continue;
    152     }
    153 
    154     entry_.pid_ = kinfo.kp_proc.p_pid;
    155     entry_.ppid_ = kinfo.kp_eproc.e_ppid;
    156     entry_.gid_ = kinfo.kp_eproc.e_pgid;
    157     size_t last_slash = data.rfind('/', exec_name_end);
    158     if (last_slash == std::string::npos)
    159       entry_.exe_file_.assign(data, 0, exec_name_end);
    160     else
    161       entry_.exe_file_.assign(data, last_slash + 1,
    162                               exec_name_end - last_slash - 1);
    163     // Start w/ the next entry next time through
    164     ++index_of_kinfo_proc_;
    165     // Done
    166     return true;
    167   }
    168   return false;
    169 }
    170 
    171 bool NamedProcessIterator::IncludeEntry() {
    172   return (executable_name_ == entry().exe_file() &&
    173           ProcessIterator::IncludeEntry());
    174 }
    175 
    176 
    177 // ------------------------------------------------------------------------
    178 // NOTE: about ProcessMetrics
    179 //
    180 // Getting a mach task from a pid for another process requires permissions in
    181 // general, so there doesn't really seem to be a way to do these (and spinning
    182 // up ps to fetch each stats seems dangerous to put in a base api for anyone to
    183 // call). Child processes ipc their port, so return something if available,
    184 // otherwise return 0.
    185 //
    186 
    187 ProcessMetrics::ProcessMetrics(ProcessHandle process,
    188                                ProcessMetrics::PortProvider* port_provider)
    189     : process_(process),
    190       last_time_(0),
    191       last_system_time_(0),
    192       port_provider_(port_provider) {
    193   processor_count_ = SysInfo::NumberOfProcessors();
    194 }
    195 
    196 // static
    197 ProcessMetrics* ProcessMetrics::CreateProcessMetrics(
    198     ProcessHandle process,
    199     ProcessMetrics::PortProvider* port_provider) {
    200   return new ProcessMetrics(process, port_provider);
    201 }
    202 
    203 bool ProcessMetrics::GetIOCounters(IoCounters* io_counters) const {
    204   return false;
    205 }
    206 
    207 static bool GetTaskInfo(mach_port_t task, task_basic_info_64* task_info_data) {
    208   if (task == MACH_PORT_NULL)
    209     return false;
    210   mach_msg_type_number_t count = TASK_BASIC_INFO_64_COUNT;
    211   kern_return_t kr = task_info(task,
    212                                TASK_BASIC_INFO_64,
    213                                reinterpret_cast<task_info_t>(task_info_data),
    214                                &count);
    215   // Most likely cause for failure: |task| is a zombie.
    216   return kr == KERN_SUCCESS;
    217 }
    218 
    219 size_t ProcessMetrics::GetPagefileUsage() const {
    220   task_basic_info_64 task_info_data;
    221   if (!GetTaskInfo(TaskForPid(process_), &task_info_data))
    222     return 0;
    223   return task_info_data.virtual_size;
    224 }
    225 
    226 size_t ProcessMetrics::GetPeakPagefileUsage() const {
    227   return 0;
    228 }
    229 
    230 size_t ProcessMetrics::GetWorkingSetSize() const {
    231   task_basic_info_64 task_info_data;
    232   if (!GetTaskInfo(TaskForPid(process_), &task_info_data))
    233     return 0;
    234   return task_info_data.resident_size;
    235 }
    236 
    237 size_t ProcessMetrics::GetPeakWorkingSetSize() const {
    238   return 0;
    239 }
    240 
    241 static bool GetCPUTypeForProcess(pid_t pid, cpu_type_t* cpu_type) {
    242   size_t len = sizeof(*cpu_type);
    243   int result = sysctlbyname("sysctl.proc_cputype",
    244                             cpu_type,
    245                             &len,
    246                             NULL,
    247                             0);
    248   if (result != 0) {
    249     PLOG(ERROR) << "sysctlbyname(""sysctl.proc_cputype"")";
    250     return false;
    251   }
    252 
    253   return true;
    254 }
    255 
    256 static bool IsAddressInSharedRegion(mach_vm_address_t addr, cpu_type_t type) {
    257   if (type == CPU_TYPE_I386)
    258     return addr >= SHARED_REGION_BASE_I386 &&
    259            addr < (SHARED_REGION_BASE_I386 + SHARED_REGION_SIZE_I386);
    260   else if (type == CPU_TYPE_X86_64)
    261     return addr >= SHARED_REGION_BASE_X86_64 &&
    262            addr < (SHARED_REGION_BASE_X86_64 + SHARED_REGION_SIZE_X86_64);
    263   else
    264     return false;
    265 }
    266 
    267 // This is a rough approximation of the algorithm that libtop uses.
    268 // private_bytes is the size of private resident memory.
    269 // shared_bytes is the size of shared resident memory.
    270 bool ProcessMetrics::GetMemoryBytes(size_t* private_bytes,
    271                                     size_t* shared_bytes) {
    272   kern_return_t kr;
    273   size_t private_pages_count = 0;
    274   size_t shared_pages_count = 0;
    275 
    276   if (!private_bytes && !shared_bytes)
    277     return true;
    278 
    279   mach_port_t task = TaskForPid(process_);
    280   if (task == MACH_PORT_NULL) {
    281     LOG(ERROR) << "Invalid process";
    282     return false;
    283   }
    284 
    285   cpu_type_t cpu_type;
    286   if (!GetCPUTypeForProcess(process_, &cpu_type))
    287     return false;
    288 
    289   // The same region can be referenced multiple times. To avoid double counting
    290   // we need to keep track of which regions we've already counted.
    291   base::hash_set<int> seen_objects;
    292 
    293   // We iterate through each VM region in the task's address map. For shared
    294   // memory we add up all the pages that are marked as shared. Like libtop we
    295   // try to avoid counting pages that are also referenced by other tasks. Since
    296   // we don't have access to the VM regions of other tasks the only hint we have
    297   // is if the address is in the shared region area.
    298   //
    299   // Private memory is much simpler. We simply count the pages that are marked
    300   // as private or copy on write (COW).
    301   //
    302   // See libtop_update_vm_regions in
    303   // http://www.opensource.apple.com/source/top/top-67/libtop.c
    304   mach_vm_size_t size = 0;
    305   for (mach_vm_address_t address = MACH_VM_MIN_ADDRESS;; address += size) {
    306     vm_region_top_info_data_t info;
    307     mach_msg_type_number_t info_count = VM_REGION_TOP_INFO_COUNT;
    308     mach_port_t object_name;
    309     kr = mach_vm_region(task,
    310                         &address,
    311                         &size,
    312                         VM_REGION_TOP_INFO,
    313                         (vm_region_info_t)&info,
    314                         &info_count,
    315                         &object_name);
    316     if (kr == KERN_INVALID_ADDRESS) {
    317       // We're at the end of the address space.
    318       break;
    319     } else if (kr != KERN_SUCCESS) {
    320       LOG(ERROR) << "Calling mach_vm_region failed with error: "
    321                  << mach_error_string(kr);
    322       return false;
    323     }
    324 
    325     if (IsAddressInSharedRegion(address, cpu_type) &&
    326         info.share_mode != SM_PRIVATE)
    327       continue;
    328 
    329     if (info.share_mode == SM_COW && info.ref_count == 1)
    330       info.share_mode = SM_PRIVATE;
    331 
    332     switch (info.share_mode) {
    333       case SM_PRIVATE:
    334         private_pages_count += info.private_pages_resident;
    335         private_pages_count += info.shared_pages_resident;
    336         break;
    337       case SM_COW:
    338         private_pages_count += info.private_pages_resident;
    339         // Fall through
    340       case SM_SHARED:
    341         if (seen_objects.count(info.obj_id) == 0) {
    342           // Only count the first reference to this region.
    343           seen_objects.insert(info.obj_id);
    344           shared_pages_count += info.shared_pages_resident;
    345         }
    346         break;
    347       default:
    348         break;
    349     }
    350   }
    351 
    352   vm_size_t page_size;
    353   kr = host_page_size(task, &page_size);
    354   if (kr != KERN_SUCCESS) {
    355     LOG(ERROR) << "Failed to fetch host page size, error: "
    356                << mach_error_string(kr);
    357     return false;
    358   }
    359 
    360   if (private_bytes)
    361     *private_bytes = private_pages_count * page_size;
    362   if (shared_bytes)
    363     *shared_bytes = shared_pages_count * page_size;
    364 
    365   return true;
    366 }
    367 
    368 void ProcessMetrics::GetCommittedKBytes(CommittedKBytes* usage) const {
    369 }
    370 
    371 bool ProcessMetrics::GetWorkingSetKBytes(WorkingSetKBytes* ws_usage) const {
    372   size_t priv = GetWorkingSetSize();
    373   if (!priv)
    374     return false;
    375   ws_usage->priv = priv / 1024;
    376   ws_usage->shareable = 0;
    377   ws_usage->shared = 0;
    378   return true;
    379 }
    380 
    381 #define TIME_VALUE_TO_TIMEVAL(a, r) do {  \
    382   (r)->tv_sec = (a)->seconds;             \
    383   (r)->tv_usec = (a)->microseconds;       \
    384 } while (0)
    385 
    386 double ProcessMetrics::GetCPUUsage() {
    387   mach_port_t task = TaskForPid(process_);
    388   if (task == MACH_PORT_NULL)
    389     return 0;
    390 
    391   kern_return_t kr;
    392 
    393   // Libtop explicitly loops over the threads (libtop_pinfo_update_cpu_usage()
    394   // in libtop.c), but this is more concise and gives the same results:
    395   task_thread_times_info thread_info_data;
    396   mach_msg_type_number_t thread_info_count = TASK_THREAD_TIMES_INFO_COUNT;
    397   kr = task_info(task,
    398                  TASK_THREAD_TIMES_INFO,
    399                  reinterpret_cast<task_info_t>(&thread_info_data),
    400                  &thread_info_count);
    401   if (kr != KERN_SUCCESS) {
    402     // Most likely cause: |task| is a zombie.
    403     return 0;
    404   }
    405 
    406   task_basic_info_64 task_info_data;
    407   if (!GetTaskInfo(task, &task_info_data))
    408     return 0;
    409 
    410   /* Set total_time. */
    411   // thread info contains live time...
    412   struct timeval user_timeval, system_timeval, task_timeval;
    413   TIME_VALUE_TO_TIMEVAL(&thread_info_data.user_time, &user_timeval);
    414   TIME_VALUE_TO_TIMEVAL(&thread_info_data.system_time, &system_timeval);
    415   timeradd(&user_timeval, &system_timeval, &task_timeval);
    416 
    417   // ... task info contains terminated time.
    418   TIME_VALUE_TO_TIMEVAL(&task_info_data.user_time, &user_timeval);
    419   TIME_VALUE_TO_TIMEVAL(&task_info_data.system_time, &system_timeval);
    420   timeradd(&user_timeval, &task_timeval, &task_timeval);
    421   timeradd(&system_timeval, &task_timeval, &task_timeval);
    422 
    423   struct timeval now;
    424   int retval = gettimeofday(&now, NULL);
    425   if (retval)
    426     return 0;
    427 
    428   int64 time = TimeValToMicroseconds(now);
    429   int64 task_time = TimeValToMicroseconds(task_timeval);
    430 
    431   if ((last_system_time_ == 0) || (last_time_ == 0)) {
    432     // First call, just set the last values.
    433     last_system_time_ = task_time;
    434     last_time_ = time;
    435     return 0;
    436   }
    437 
    438   int64 system_time_delta = task_time - last_system_time_;
    439   int64 time_delta = time - last_time_;
    440   DCHECK(time_delta != 0);
    441   if (time_delta == 0)
    442     return 0;
    443 
    444   // We add time_delta / 2 so the result is rounded.
    445   double cpu = static_cast<double>((system_time_delta * 100.0) / time_delta);
    446 
    447   last_system_time_ = task_time;
    448   last_time_ = time;
    449 
    450   return cpu;
    451 }
    452 
    453 mach_port_t ProcessMetrics::TaskForPid(ProcessHandle process) const {
    454   mach_port_t task = MACH_PORT_NULL;
    455   if (port_provider_)
    456     task = port_provider_->TaskForPid(process_);
    457   if (task == MACH_PORT_NULL && process_ == getpid())
    458     task = mach_task_self();
    459   return task;
    460 }
    461 
    462 // ------------------------------------------------------------------------
    463 
    464 // Bytes committed by the system.
    465 size_t GetSystemCommitCharge() {
    466   host_name_port_t host = mach_host_self();
    467   mach_msg_type_number_t count = HOST_VM_INFO_COUNT;
    468   vm_statistics_data_t data;
    469   kern_return_t kr = host_statistics(host, HOST_VM_INFO,
    470                                      reinterpret_cast<host_info_t>(&data),
    471                                      &count);
    472   if (kr) {
    473     LOG(WARNING) << "Failed to fetch host statistics.";
    474     return 0;
    475   }
    476 
    477   vm_size_t page_size;
    478   kr = host_page_size(host, &page_size);
    479   if (kr) {
    480     LOG(ERROR) << "Failed to fetch host page size.";
    481     return 0;
    482   }
    483 
    484   return (data.active_count * page_size) / 1024;
    485 }
    486 
    487 // ------------------------------------------------------------------------
    488 
    489 namespace {
    490 
    491 bool g_oom_killer_enabled;
    492 
    493 // === C malloc/calloc/valloc/realloc/posix_memalign ===
    494 
    495 typedef void* (*malloc_type)(struct _malloc_zone_t* zone,
    496                              size_t size);
    497 typedef void* (*calloc_type)(struct _malloc_zone_t* zone,
    498                              size_t num_items,
    499                              size_t size);
    500 typedef void* (*valloc_type)(struct _malloc_zone_t* zone,
    501                              size_t size);
    502 typedef void* (*realloc_type)(struct _malloc_zone_t* zone,
    503                               void* ptr,
    504                               size_t size);
    505 typedef void* (*memalign_type)(struct _malloc_zone_t* zone,
    506                                size_t alignment,
    507                                size_t size);
    508 
    509 malloc_type g_old_malloc;
    510 calloc_type g_old_calloc;
    511 valloc_type g_old_valloc;
    512 realloc_type g_old_realloc;
    513 memalign_type g_old_memalign;
    514 
    515 malloc_type g_old_malloc_purgeable;
    516 calloc_type g_old_calloc_purgeable;
    517 valloc_type g_old_valloc_purgeable;
    518 realloc_type g_old_realloc_purgeable;
    519 memalign_type g_old_memalign_purgeable;
    520 
    521 void* oom_killer_malloc(struct _malloc_zone_t* zone,
    522                         size_t size) {
    523   void* result = g_old_malloc(zone, size);
    524   if (!result && size)
    525     debug::BreakDebugger();
    526   return result;
    527 }
    528 
    529 void* oom_killer_calloc(struct _malloc_zone_t* zone,
    530                         size_t num_items,
    531                         size_t size) {
    532   void* result = g_old_calloc(zone, num_items, size);
    533   if (!result && num_items && size)
    534     debug::BreakDebugger();
    535   return result;
    536 }
    537 
    538 void* oom_killer_valloc(struct _malloc_zone_t* zone,
    539                         size_t size) {
    540   void* result = g_old_valloc(zone, size);
    541   if (!result && size)
    542     debug::BreakDebugger();
    543   return result;
    544 }
    545 
    546 void* oom_killer_realloc(struct _malloc_zone_t* zone,
    547                          void* ptr,
    548                          size_t size) {
    549   void* result = g_old_realloc(zone, ptr, size);
    550   if (!result && size)
    551     debug::BreakDebugger();
    552   return result;
    553 }
    554 
    555 void* oom_killer_memalign(struct _malloc_zone_t* zone,
    556                           size_t alignment,
    557                           size_t size) {
    558   void* result = g_old_memalign(zone, alignment, size);
    559   // Only die if posix_memalign would have returned ENOMEM, since there are
    560   // other reasons why NULL might be returned (see
    561   // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
    562   if (!result && size && alignment >= sizeof(void*)
    563       && (alignment & (alignment - 1)) == 0) {
    564     debug::BreakDebugger();
    565   }
    566   return result;
    567 }
    568 
    569 void* oom_killer_malloc_purgeable(struct _malloc_zone_t* zone,
    570                                   size_t size) {
    571   void* result = g_old_malloc_purgeable(zone, size);
    572   if (!result && size)
    573     debug::BreakDebugger();
    574   return result;
    575 }
    576 
    577 void* oom_killer_calloc_purgeable(struct _malloc_zone_t* zone,
    578                                   size_t num_items,
    579                                   size_t size) {
    580   void* result = g_old_calloc_purgeable(zone, num_items, size);
    581   if (!result && num_items && size)
    582     debug::BreakDebugger();
    583   return result;
    584 }
    585 
    586 void* oom_killer_valloc_purgeable(struct _malloc_zone_t* zone,
    587                                   size_t size) {
    588   void* result = g_old_valloc_purgeable(zone, size);
    589   if (!result && size)
    590     debug::BreakDebugger();
    591   return result;
    592 }
    593 
    594 void* oom_killer_realloc_purgeable(struct _malloc_zone_t* zone,
    595                                    void* ptr,
    596                                    size_t size) {
    597   void* result = g_old_realloc_purgeable(zone, ptr, size);
    598   if (!result && size)
    599     debug::BreakDebugger();
    600   return result;
    601 }
    602 
    603 void* oom_killer_memalign_purgeable(struct _malloc_zone_t* zone,
    604                                     size_t alignment,
    605                                     size_t size) {
    606   void* result = g_old_memalign_purgeable(zone, alignment, size);
    607   // Only die if posix_memalign would have returned ENOMEM, since there are
    608   // other reasons why NULL might be returned (see
    609   // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c ).
    610   if (!result && size && alignment >= sizeof(void*)
    611       && (alignment & (alignment - 1)) == 0) {
    612     debug::BreakDebugger();
    613   }
    614   return result;
    615 }
    616 
    617 // === C++ operator new ===
    618 
    619 void oom_killer_new() {
    620   debug::BreakDebugger();
    621 }
    622 
    623 // === Core Foundation CFAllocators ===
    624 
    625 bool CanGetContextForCFAllocator(long darwin_version) {
    626   // TODO(avi): remove at final release; http://crbug.com/74589
    627   if (darwin_version == 11) {
    628     NSLog(@"Unsure about the internals of CFAllocator but going to patch them "
    629            "anyway. Watch out for crashes inside of CFAllocatorAllocate.");
    630   }
    631   return darwin_version == 9 ||
    632          darwin_version == 10 ||
    633          darwin_version == 11;
    634 }
    635 
    636 CFAllocatorContext* ContextForCFAllocator(CFAllocatorRef allocator,
    637                                           long darwin_version) {
    638   if (darwin_version == 9 || darwin_version == 10) {
    639     ChromeCFAllocator9and10* our_allocator =
    640         const_cast<ChromeCFAllocator9and10*>(
    641             reinterpret_cast<const ChromeCFAllocator9and10*>(allocator));
    642     return &our_allocator->_context;
    643   } else if (darwin_version == 11) {
    644     ChromeCFAllocator11* our_allocator =
    645         const_cast<ChromeCFAllocator11*>(
    646             reinterpret_cast<const ChromeCFAllocator11*>(allocator));
    647     return &our_allocator->_context;
    648   } else {
    649     return NULL;
    650   }
    651 }
    652 
    653 CFAllocatorAllocateCallBack g_old_cfallocator_system_default;
    654 CFAllocatorAllocateCallBack g_old_cfallocator_malloc;
    655 CFAllocatorAllocateCallBack g_old_cfallocator_malloc_zone;
    656 
    657 void* oom_killer_cfallocator_system_default(CFIndex alloc_size,
    658                                             CFOptionFlags hint,
    659                                             void* info) {
    660   void* result = g_old_cfallocator_system_default(alloc_size, hint, info);
    661   if (!result)
    662     debug::BreakDebugger();
    663   return result;
    664 }
    665 
    666 void* oom_killer_cfallocator_malloc(CFIndex alloc_size,
    667                                     CFOptionFlags hint,
    668                                     void* info) {
    669   void* result = g_old_cfallocator_malloc(alloc_size, hint, info);
    670   if (!result)
    671     debug::BreakDebugger();
    672   return result;
    673 }
    674 
    675 void* oom_killer_cfallocator_malloc_zone(CFIndex alloc_size,
    676                                          CFOptionFlags hint,
    677                                          void* info) {
    678   void* result = g_old_cfallocator_malloc_zone(alloc_size, hint, info);
    679   if (!result)
    680     debug::BreakDebugger();
    681   return result;
    682 }
    683 
    684 // === Cocoa NSObject allocation ===
    685 
    686 typedef id (*allocWithZone_t)(id, SEL, NSZone*);
    687 allocWithZone_t g_old_allocWithZone;
    688 
    689 id oom_killer_allocWithZone(id self, SEL _cmd, NSZone* zone)
    690 {
    691   id result = g_old_allocWithZone(self, _cmd, zone);
    692   if (!result)
    693     debug::BreakDebugger();
    694   return result;
    695 }
    696 
    697 }  // namespace
    698 
    699 malloc_zone_t* GetPurgeableZone() {
    700   // malloc_default_purgeable_zone only exists on >= 10.6. Use dlsym to grab it
    701   // at runtime because it may not be present in the SDK used for compilation.
    702   typedef malloc_zone_t* (*malloc_default_purgeable_zone_t)(void);
    703   malloc_default_purgeable_zone_t malloc_purgeable_zone =
    704       reinterpret_cast<malloc_default_purgeable_zone_t>(
    705           dlsym(RTLD_DEFAULT, "malloc_default_purgeable_zone"));
    706   if (malloc_purgeable_zone)
    707     return malloc_purgeable_zone();
    708   return NULL;
    709 }
    710 
    711 void EnableTerminationOnOutOfMemory() {
    712   if (g_oom_killer_enabled)
    713     return;
    714 
    715   g_oom_killer_enabled = true;
    716 
    717   // Not SysInfo::OperatingSystemVersionNumbers as that calls through to Gestalt
    718   // which ends up (on > 10.6) spawning threads.
    719   struct utsname machine_info;
    720   if (uname(&machine_info)) {
    721     return;
    722   }
    723 
    724   // The string machine_info.release is the xnu/Darwin version number, "9.xxx"
    725   // on Mac OS X 10.5, and "10.xxx" on Mac OS X 10.6. See
    726   // http://en.wikipedia.org/wiki/Darwin_(operating_system) .
    727   long darwin_version = strtol(machine_info.release, NULL, 10);
    728 
    729   // === C malloc/calloc/valloc/realloc/posix_memalign ===
    730 
    731   // This approach is not perfect, as requests for amounts of memory larger than
    732   // MALLOC_ABSOLUTE_MAX_SIZE (currently SIZE_T_MAX - (2 * PAGE_SIZE)) will
    733   // still fail with a NULL rather than dying (see
    734   // http://opensource.apple.com/source/Libc/Libc-583/gen/malloc.c for details).
    735   // Unfortunately, it's the best we can do. Also note that this does not affect
    736   // allocations from non-default zones.
    737 
    738   CHECK(!g_old_malloc && !g_old_calloc && !g_old_valloc && !g_old_realloc &&
    739         !g_old_memalign) << "Old allocators unexpectedly non-null";
    740 
    741   CHECK(!g_old_malloc_purgeable && !g_old_calloc_purgeable &&
    742         !g_old_valloc_purgeable && !g_old_realloc_purgeable &&
    743         !g_old_memalign_purgeable) << "Old allocators unexpectedly non-null";
    744 
    745   // See http://trac.webkit.org/changeset/53362/trunk/Tools/DumpRenderTree/mac
    746   bool zone_allocators_protected = darwin_version > 10;
    747 
    748   ChromeMallocZone* default_zone =
    749       reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
    750   ChromeMallocZone* purgeable_zone =
    751       reinterpret_cast<ChromeMallocZone*>(GetPurgeableZone());
    752 
    753   vm_address_t page_start_default = NULL;
    754   vm_address_t page_start_purgeable = NULL;
    755   vm_size_t len_default = 0;
    756   vm_size_t len_purgeable = 0;
    757   if (zone_allocators_protected) {
    758     page_start_default = reinterpret_cast<vm_address_t>(default_zone) &
    759         static_cast<vm_size_t>(~(getpagesize() - 1));
    760     len_default = reinterpret_cast<vm_address_t>(default_zone) -
    761         page_start_default + sizeof(ChromeMallocZone);
    762     mprotect(reinterpret_cast<void*>(page_start_default), len_default,
    763              PROT_READ | PROT_WRITE);
    764 
    765     if (purgeable_zone) {
    766       page_start_purgeable = reinterpret_cast<vm_address_t>(purgeable_zone) &
    767           static_cast<vm_size_t>(~(getpagesize() - 1));
    768       len_purgeable = reinterpret_cast<vm_address_t>(purgeable_zone) -
    769           page_start_purgeable + sizeof(ChromeMallocZone);
    770       mprotect(reinterpret_cast<void*>(page_start_purgeable), len_purgeable,
    771                PROT_READ | PROT_WRITE);
    772     }
    773   }
    774 
    775   // Default zone
    776 
    777   g_old_malloc = default_zone->malloc;
    778   g_old_calloc = default_zone->calloc;
    779   g_old_valloc = default_zone->valloc;
    780   g_old_realloc = default_zone->realloc;
    781   CHECK(g_old_malloc && g_old_calloc && g_old_valloc && g_old_realloc)
    782       << "Failed to get system allocation functions.";
    783 
    784   default_zone->malloc = oom_killer_malloc;
    785   default_zone->calloc = oom_killer_calloc;
    786   default_zone->valloc = oom_killer_valloc;
    787   default_zone->realloc = oom_killer_realloc;
    788 
    789   if (default_zone->version >= 5) {
    790     g_old_memalign = default_zone->memalign;
    791     if (g_old_memalign)
    792       default_zone->memalign = oom_killer_memalign;
    793   }
    794 
    795   // Purgeable zone (if it exists)
    796 
    797   if (purgeable_zone) {
    798     g_old_malloc_purgeable = purgeable_zone->malloc;
    799     g_old_calloc_purgeable = purgeable_zone->calloc;
    800     g_old_valloc_purgeable = purgeable_zone->valloc;
    801     g_old_realloc_purgeable = purgeable_zone->realloc;
    802     CHECK(g_old_malloc_purgeable && g_old_calloc_purgeable &&
    803           g_old_valloc_purgeable && g_old_realloc_purgeable)
    804         << "Failed to get system allocation functions.";
    805 
    806     purgeable_zone->malloc = oom_killer_malloc_purgeable;
    807     purgeable_zone->calloc = oom_killer_calloc_purgeable;
    808     purgeable_zone->valloc = oom_killer_valloc_purgeable;
    809     purgeable_zone->realloc = oom_killer_realloc_purgeable;
    810 
    811     if (purgeable_zone->version >= 5) {
    812       g_old_memalign_purgeable = purgeable_zone->memalign;
    813       if (g_old_memalign_purgeable)
    814         purgeable_zone->memalign = oom_killer_memalign_purgeable;
    815     }
    816   }
    817 
    818   if (zone_allocators_protected) {
    819     mprotect(reinterpret_cast<void*>(page_start_default), len_default,
    820              PROT_READ);
    821     if (purgeable_zone) {
    822       mprotect(reinterpret_cast<void*>(page_start_purgeable), len_purgeable,
    823                PROT_READ);
    824     }
    825   }
    826 
    827   // === C malloc_zone_batch_malloc ===
    828 
    829   // batch_malloc is omitted because the default malloc zone's implementation
    830   // only supports batch_malloc for "tiny" allocations from the free list. It
    831   // will fail for allocations larger than "tiny", and will only allocate as
    832   // many blocks as it's able to from the free list. These factors mean that it
    833   // can return less than the requested memory even in a non-out-of-memory
    834   // situation. There's no good way to detect whether a batch_malloc failure is
    835   // due to these other factors, or due to genuine memory or address space
    836   // exhaustion. The fact that it only allocates space from the "tiny" free list
    837   // means that it's likely that a failure will not be due to memory exhaustion.
    838   // Similarly, these constraints on batch_malloc mean that callers must always
    839   // be expecting to receive less memory than was requested, even in situations
    840   // where memory pressure is not a concern. Finally, the only public interface
    841   // to batch_malloc is malloc_zone_batch_malloc, which is specific to the
    842   // system's malloc implementation. It's unlikely that anyone's even heard of
    843   // it.
    844 
    845   // === C++ operator new ===
    846 
    847   // Yes, operator new does call through to malloc, but this will catch failures
    848   // that our imperfect handling of malloc cannot.
    849 
    850   std::set_new_handler(oom_killer_new);
    851 
    852   // === Core Foundation CFAllocators ===
    853 
    854   // This will not catch allocation done by custom allocators, but will catch
    855   // all allocation done by system-provided ones.
    856 
    857   CHECK(!g_old_cfallocator_system_default && !g_old_cfallocator_malloc &&
    858         !g_old_cfallocator_malloc_zone)
    859       << "Old allocators unexpectedly non-null";
    860 
    861   bool cf_allocator_internals_known =
    862       CanGetContextForCFAllocator(darwin_version);
    863 
    864   if (cf_allocator_internals_known) {
    865     CFAllocatorContext* context =
    866         ContextForCFAllocator(kCFAllocatorSystemDefault, darwin_version);
    867     CHECK(context) << "Failed to get context for kCFAllocatorSystemDefault.";
    868     g_old_cfallocator_system_default = context->allocate;
    869     CHECK(g_old_cfallocator_system_default)
    870         << "Failed to get kCFAllocatorSystemDefault allocation function.";
    871     context->allocate = oom_killer_cfallocator_system_default;
    872 
    873     context = ContextForCFAllocator(kCFAllocatorMalloc, darwin_version);
    874     CHECK(context) << "Failed to get context for kCFAllocatorMalloc.";
    875     g_old_cfallocator_malloc = context->allocate;
    876     CHECK(g_old_cfallocator_malloc)
    877         << "Failed to get kCFAllocatorMalloc allocation function.";
    878     context->allocate = oom_killer_cfallocator_malloc;
    879 
    880     context = ContextForCFAllocator(kCFAllocatorMallocZone, darwin_version);
    881     CHECK(context) << "Failed to get context for kCFAllocatorMallocZone.";
    882     g_old_cfallocator_malloc_zone = context->allocate;
    883     CHECK(g_old_cfallocator_malloc_zone)
    884         << "Failed to get kCFAllocatorMallocZone allocation function.";
    885     context->allocate = oom_killer_cfallocator_malloc_zone;
    886   } else {
    887     NSLog(@"Internals of CFAllocator not known; out-of-memory failures via "
    888         "CFAllocator will not result in termination. http://crbug.com/45650");
    889   }
    890 
    891   // === Cocoa NSObject allocation ===
    892 
    893   // Note that both +[NSObject new] and +[NSObject alloc] call through to
    894   // +[NSObject allocWithZone:].
    895 
    896   CHECK(!g_old_allocWithZone)
    897       << "Old allocator unexpectedly non-null";
    898 
    899   Class nsobject_class = [NSObject class];
    900   Method orig_method = class_getClassMethod(nsobject_class,
    901                                             @selector(allocWithZone:));
    902   g_old_allocWithZone = reinterpret_cast<allocWithZone_t>(
    903       method_getImplementation(orig_method));
    904   CHECK(g_old_allocWithZone)
    905       << "Failed to get allocWithZone allocation function.";
    906   method_setImplementation(orig_method,
    907                            reinterpret_cast<IMP>(oom_killer_allocWithZone));
    908 }
    909 
    910 ProcessId GetParentProcessId(ProcessHandle process) {
    911   struct kinfo_proc info;
    912   size_t length = sizeof(struct kinfo_proc);
    913   int mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, process };
    914   if (sysctl(mib, 4, &info, &length, NULL, 0) < 0) {
    915     PLOG(ERROR) << "sysctl";
    916     return -1;
    917   }
    918   if (length == 0)
    919     return -1;
    920   return info.kp_eproc.e_ppid;
    921 }
    922 
    923 }  // namespace base
    924