Home | History | Annotate | Download | only in memcheck
      1 /* Copyright (C) 2007-2010 The Android Open Source Project
      2 **
      3 ** This software is licensed under the terms of the GNU General Public
      4 ** License version 2, as published by the Free Software Foundation, and
      5 ** may be copied, distributed, and modified under those terms.
      6 **
      7 ** This program is distributed in the hope that it will be useful,
      8 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
      9 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     10 ** GNU General Public License for more details.
     11 */
     12 
     13 /*
     14  * Contains implementation of routines related to process management in
     15  * memchecker framework.
     16  */
     17 
     18 #include "elff/elff_api.h"
     19 #include "memcheck.h"
     20 #include "memcheck_proc_management.h"
     21 #include "memcheck_logging.h"
     22 #include "memcheck_util.h"
     23 
     24 /* Current thread id.
     25  * This value is updated with each call to memcheck_switch, saving here
     26  * ID of the thread that becomes current. */
     27 static uint32_t current_tid = 0;
     28 
     29 /* Current thread descriptor.
     30  * This variable is used to cache current thread descriptor. This value gets
     31  * initialized on "as needed" basis, when descriptor for the current thread
     32  * is requested for the first time.
     33  * Note that every time memcheck_switch routine is called, this value gets
     34  * NULL'ed, since another thread becomes current. */
     35 static ThreadDesc* current_thread = NULL;
     36 
     37 /* Current process descriptor.
     38  * This variable is used to cache current process descriptor. This value gets
     39  * initialized on "as needed" basis, when descriptor for the current process
     40  * is requested for the first time.
     41  * Note that every time memcheck_switch routine is called, this value gets
     42  * NULL'ed, since new thread becomes current, thus process switch may have
     43  * occurred as well. */
     44 static ProcDesc*    current_process = NULL;
     45 
     46 /* List of running processes. */
     47 static QLIST_HEAD(proc_list, ProcDesc) proc_list;
     48 
     49 /* List of running threads. */
     50 static QLIST_HEAD(thread_list, ThreadDesc) thread_list;
     51 
     52 // =============================================================================
     53 // Static routines
     54 // =============================================================================
     55 
     56 /* Creates and lists thread descriptor for a new thread.
     57  * This routine will allocate and initialize new thread descriptor. After that
     58  * this routine will insert the descriptor into the global list of running
     59  * threads, as well as thread list in the process descriptor of the process
     60  * in context of which this thread is created.
     61  * Param:
     62  *  proc - Process descriptor of the process, in context of which new thread
     63  *      is created.
     64  *  tid - Thread ID of the thread that's being created.
     65  * Return:
     66  *  New thread descriptor on success, or NULL on failure.
     67  */
     68 static ThreadDesc*
     69 create_new_thread(ProcDesc* proc, uint32_t tid)
     70 {
     71     ThreadDesc* new_thread = (ThreadDesc*)qemu_malloc(sizeof(ThreadDesc));
     72     if (new_thread == NULL) {
     73         ME("memcheck: Unable to allocate new thread descriptor.");
     74         return NULL;
     75     }
     76     new_thread->tid = tid;
     77     new_thread->process = proc;
     78     new_thread->call_stack = NULL;
     79     new_thread->call_stack_count = 0;
     80     new_thread->call_stack_max = 0;
     81     QLIST_INSERT_HEAD(&thread_list, new_thread, global_entry);
     82     QLIST_INSERT_HEAD(&proc->threads, new_thread, proc_entry);
     83     return new_thread;
     84 }
     85 
     86 /* Creates and lists process descriptor for a new process.
     87  * This routine will allocate and initialize new process descriptor. After that
     88  * this routine will create main thread descriptor for the process (with the
     89  * thread ID equal to the new process ID), and then new process descriptor will
     90  * be inserted into the global list of running processes.
     91  * Param:
     92  *  pid - Process ID of the process that's being created.
     93  *  parent_pid - Process ID of the parent process.
     94  * Return:
     95  *  New process descriptor on success, or NULL on failure.
     96  */
     97 static ProcDesc*
     98 create_new_process(uint32_t pid, uint32_t parent_pid)
     99 {
    100     // Create and init new process descriptor.
    101     ProcDesc* new_proc = (ProcDesc*)qemu_malloc(sizeof(ProcDesc));
    102     if (new_proc == NULL) {
    103         ME("memcheck: Unable to allocate new process descriptor");
    104         return NULL;
    105     }
    106     QLIST_INIT(&new_proc->threads);
    107     allocmap_init(&new_proc->alloc_map);
    108     mmrangemap_init(&new_proc->mmrange_map);
    109     new_proc->pid = pid;
    110     new_proc->parent_pid = parent_pid;
    111     new_proc->image_path = NULL;
    112     new_proc->flags = 0;
    113 
    114     if (parent_pid != 0) {
    115         /* If new process has been forked, it inherits a copy of parent's
    116          * process heap, as well as parent's mmaping of loaded modules. So, on
    117          * fork we're required to copy parent's allocation descriptors map, as
    118          * well as parent's mmapping map to the new process. */
    119         int failed;
    120         ProcDesc* parent = get_process_from_pid(parent_pid);
    121         if (parent == NULL) {
    122             ME("memcheck: Unable to get parent process pid=%u for new process pid=%u",
    123                parent_pid, pid);
    124             qemu_free(new_proc);
    125             return NULL;
    126         }
    127 
    128         /* Copy parent's allocation map, setting "inherited" flag, and clearing
    129          * parent's "transition" flag in the copied entries. */
    130         failed = allocmap_copy(&new_proc->alloc_map, &parent->alloc_map,
    131                                MDESC_FLAG_INHERITED_ON_FORK,
    132                                MDESC_FLAG_TRANSITION_ENTRY);
    133         if (failed) {
    134             ME("memcheck: Unable to copy process' %s[pid=%u] allocation map to new process pid=%u",
    135                parent->image_path, parent_pid, pid);
    136             allocmap_empty(&new_proc->alloc_map);
    137             qemu_free(new_proc);
    138             return NULL;
    139         }
    140 
    141         // Copy parent's memory mappings map.
    142         failed = mmrangemap_copy(&new_proc->mmrange_map, &parent->mmrange_map);
    143         if (failed) {
    144             ME("memcheck: Unable to copy process' %s[pid=%u] mmrange map to new process pid=%u",
    145                parent->image_path, parent_pid, pid);
    146             mmrangemap_empty(&new_proc->mmrange_map);
    147             allocmap_empty(&new_proc->alloc_map);
    148             qemu_free(new_proc);
    149             return NULL;
    150         }
    151     }
    152 
    153     // Create and register main thread descriptor for new process.
    154     if(create_new_thread(new_proc, pid) == NULL) {
    155         mmrangemap_empty(&new_proc->mmrange_map);
    156         allocmap_empty(&new_proc->alloc_map);
    157         qemu_free(new_proc);
    158         return NULL;
    159     }
    160 
    161     // List new process.
    162     QLIST_INSERT_HEAD(&proc_list, new_proc, global_entry);
    163 
    164     return new_proc;
    165 }
    166 
    167 /* Finds thread descriptor for a thread id in the global list of running
    168  * threads.
    169  * Param:
    170  *  tid - Thread ID to look up thread descriptor for.
    171  * Return:
    172  *  Found thread descriptor, or NULL if thread descriptor has not been found.
    173  */
    174 static ThreadDesc*
    175 get_thread_from_tid(uint32_t tid)
    176 {
    177     ThreadDesc* thread;
    178 
    179     /* There is a pretty good chance that when this call is made, it's made
    180      * to get descriptor for the current thread. Lets see if it is so, so
    181      * we don't have to iterate through the entire list. */
    182     if (tid == current_tid && current_thread != NULL) {
    183         return current_thread;
    184     }
    185 
    186     QLIST_FOREACH(thread, &thread_list, global_entry) {
    187         if (tid == thread->tid) {
    188             if (tid == current_tid) {
    189                 current_thread = thread;
    190             }
    191             return thread;
    192         }
    193     }
    194     return NULL;
    195 }
    196 
    197 /* Gets thread descriptor for the current thread.
    198  * Return:
    199  *  Found thread descriptor, or NULL if thread descriptor has not been found.
    200  */
    201 ThreadDesc*
    202 get_current_thread(void)
    203 {
    204     // Lets see if current thread descriptor has been cached.
    205     if (current_thread == NULL) {
    206         /* Descriptor is not cached. Look it up in the list. Note that
    207          * get_thread_from_tid(current_tid) is not used here in order to
    208          * optimize this code for performance, as this routine is called from
    209          * the performance sensitive path. */
    210         ThreadDesc* thread;
    211         QLIST_FOREACH(thread, &thread_list, global_entry) {
    212             if (current_tid == thread->tid) {
    213                 current_thread = thread;
    214                 return current_thread;
    215             }
    216         }
    217     }
    218     return current_thread;
    219 }
    220 
    221 /* Finds process descriptor for a thread id.
    222  * Param:
    223  *  tid - Thread ID to look up process descriptor for.
    224  * Return:
    225  *  Process descriptor for the thread, or NULL, if process descriptor
    226  *  has not been found.
    227  */
    228 static inline ProcDesc*
    229 get_process_from_tid(uint32_t tid)
    230 {
    231     const ThreadDesc* thread = get_thread_from_tid(tid);
    232     return (thread != NULL) ? thread->process : NULL;
    233 }
    234 
    235 /* Sets, or replaces process image path in process descriptor.
    236  * Generally, new process' image path is unknown untill we calculate it in
    237  * the handler for TRACE_DEV_REG_CMDLINE event. This routine is called from
    238  * TRACE_DEV_REG_CMDLINE event handler to set, or replace process image path.
    239  * Param:
    240  *  proc - Descriptor of the process where to set, or replace image path.
    241  *  image_path - Image path to the process, transmitted with
    242  *      TRACE_DEV_REG_CMDLINE event.
    243  * set_flags_on_replace - Flags to be set when current image path for the
    244  *      process has been actually replaced with the new one.
    245  * Return:
    246  *  Zero on success, or -1 on failure.
    247  */
    248 static int
    249 procdesc_set_image_path(ProcDesc* proc,
    250                         const char* image_path,
    251                         uint32_t set_flags_on_replace)
    252 {
    253     if (image_path == NULL || proc == NULL) {
    254         return 0;
    255     }
    256 
    257     if (proc->image_path != NULL) {
    258         /* Process could have been forked, and inherited image path of the
    259          * parent process. However, it seems that "fork" in terms of TRACE_XXX
    260          * is not necessarly a strict "fork", but rather new process creation
    261          * in general. So, if that's the case we need to override image path
    262          * inherited from the parent process. */
    263         if (!strcmp(proc->image_path, image_path)) {
    264             // Paths are the same. Just bail out.
    265             return 0;
    266         }
    267         qemu_free(proc->image_path);
    268         proc->image_path = NULL;
    269     }
    270 
    271     // Save new image path into process' descriptor.
    272     proc->image_path = qemu_malloc(strlen(image_path) + 1);
    273     if (proc->image_path == NULL) {
    274         ME("memcheck: Unable to allocate %u bytes for image path %s to set it for pid=%u",
    275            strlen(image_path) + 1, image_path, proc->pid);
    276         return -1;
    277     }
    278     strcpy(proc->image_path, image_path);
    279     proc->flags |= set_flags_on_replace;
    280     return 0;
    281 }
    282 
    283 /* Frees thread descriptor. */
    284 static void
    285 threaddesc_free(ThreadDesc* thread)
    286 {
    287     uint32_t indx;
    288 
    289     if (thread == NULL) {
    290         return;
    291     }
    292 
    293     if (thread->call_stack != NULL) {
    294         for (indx = 0; indx < thread->call_stack_count; indx++) {
    295             if (thread->call_stack[indx].module_path != NULL) {
    296                 qemu_free(thread->call_stack[indx].module_path);
    297             }
    298         }
    299         qemu_free(thread->call_stack);
    300     }
    301     qemu_free(thread);
    302 }
    303 
    304 // =============================================================================
    305 // Process management API
    306 // =============================================================================
    307 
    308 void
    309 memcheck_init_proc_management(void)
    310 {
    311     QLIST_INIT(&proc_list);
    312     QLIST_INIT(&thread_list);
    313 }
    314 
    315 ProcDesc*
    316 get_process_from_pid(uint32_t pid)
    317 {
    318     ProcDesc* proc;
    319 
    320     /* Chances are that pid addresses the current process. Lets check this,
    321      * so we don't have to iterate through the entire project list. */
    322     if (current_thread != NULL && current_thread->process->pid == pid) {
    323         current_process = current_thread->process;
    324         return current_process;
    325     }
    326 
    327     QLIST_FOREACH(proc, &proc_list, global_entry) {
    328         if (pid == proc->pid) {
    329             break;
    330         }
    331     }
    332     return proc;
    333 }
    334 
    335 ProcDesc*
    336 get_current_process(void)
    337 {
    338     if (current_process == NULL) {
    339         const ThreadDesc* cur_thread = get_current_thread();
    340         if (cur_thread != NULL) {
    341             current_process = cur_thread->process;
    342         }
    343     }
    344     return current_process;
    345 }
    346 
    347 void
    348 memcheck_on_call(target_ulong from, target_ulong ret)
    349 {
    350     const uint32_t grow_by = 32;
    351     const uint32_t max_stack = grow_by;
    352     ThreadDesc* thread = get_current_thread();
    353     if (thread == NULL) {
    354         return;
    355     }
    356 
    357     /* We're not saving call stack until process starts execution. */
    358     if (!procdesc_is_executing(thread->process)) {
    359         return;
    360     }
    361 
    362     const MMRangeDesc* rdesc = procdesc_get_range_desc(thread->process, from);
    363     if (rdesc == NULL) {
    364         ME("memcheck: Unable to find mapping for guest PC 0x%08X in process %s[pid=%u]",
    365            from, thread->process->image_path, thread->process->pid);
    366         return;
    367     }
    368 
    369     /* Limit calling stack size. There are cases when calling stack can be
    370      * quite deep due to recursion (up to 4000 entries). */
    371     if (thread->call_stack_count >= max_stack) {
    372 #if 0
    373         /* This happens quite often. */
    374         MD("memcheck: Thread stack for %s[pid=%u, tid=%u] is too big: %u",
    375            thread->process->image_path, thread->process->pid, thread->tid,
    376            thread->call_stack_count);
    377 #endif
    378         return;
    379     }
    380 
    381     if (thread->call_stack_count >= thread->call_stack_max) {
    382         /* Expand calling stack array buffer. */
    383         thread->call_stack_max += grow_by;
    384         ThreadCallStackEntry* new_array =
    385             qemu_malloc(thread->call_stack_max * sizeof(ThreadCallStackEntry));
    386         if (new_array == NULL) {
    387             ME("memcheck: Unable to allocate %u bytes for calling stack.",
    388                thread->call_stack_max * sizeof(ThreadCallStackEntry));
    389             thread->call_stack_max -= grow_by;
    390             return;
    391         }
    392         if (thread->call_stack_count != 0) {
    393             memcpy(new_array, thread->call_stack,
    394                    thread->call_stack_count * sizeof(ThreadCallStackEntry));
    395         }
    396         if (thread->call_stack != NULL) {
    397             qemu_free(thread->call_stack);
    398         }
    399         thread->call_stack = new_array;
    400     }
    401     thread->call_stack[thread->call_stack_count].call_address = from;
    402     thread->call_stack[thread->call_stack_count].call_address_rel =
    403             mmrangedesc_get_module_offset(rdesc, from);
    404     thread->call_stack[thread->call_stack_count].ret_address = ret;
    405     thread->call_stack[thread->call_stack_count].ret_address_rel =
    406             mmrangedesc_get_module_offset(rdesc, ret);
    407     thread->call_stack[thread->call_stack_count].module_path =
    408             qemu_malloc(strlen(rdesc->path) + 1);
    409     if (thread->call_stack[thread->call_stack_count].module_path == NULL) {
    410         ME("memcheck: Unable to allocate %u bytes for module path in the thread calling stack.",
    411             strlen(rdesc->path) + 1);
    412         return;
    413     }
    414     strcpy(thread->call_stack[thread->call_stack_count].module_path,
    415            rdesc->path);
    416     thread->call_stack_count++;
    417 }
    418 
    419 void
    420 memcheck_on_ret(target_ulong ret)
    421 {
    422     ThreadDesc* thread = get_current_thread();
    423     if (thread == NULL) {
    424         return;
    425     }
    426 
    427     /* We're not saving call stack until process starts execution. */
    428     if (!procdesc_is_executing(thread->process)) {
    429         return;
    430     }
    431 
    432     if (thread->call_stack_count > 0) {
    433         int indx = (int)thread->call_stack_count - 1;
    434         for (; indx >= 0; indx--) {
    435             if (thread->call_stack[indx].ret_address == ret) {
    436                 thread->call_stack_count = indx;
    437                 return;
    438             }
    439         }
    440     }
    441 }
    442 
    443 // =============================================================================
    444 // Handlers for events, generated by the kernel.
    445 // =============================================================================
    446 
    447 void
    448 memcheck_init_pid(uint32_t new_pid)
    449 {
    450     create_new_process(new_pid, 0);
    451     T(PROC_NEW_PID, "memcheck: init_pid(pid=%u) in current thread tid=%u\n",
    452       new_pid, current_tid);
    453 }
    454 
    455 void
    456 memcheck_switch(uint32_t tid)
    457 {
    458     /* Since new thread became active, we have to invalidate cached
    459      * descriptors for current thread and process. */
    460     current_thread = NULL;
    461     current_process = NULL;
    462     current_tid = tid;
    463 }
    464 
    465 void
    466 memcheck_fork(uint32_t tgid, uint32_t new_pid)
    467 {
    468     ProcDesc* parent_proc;
    469     ProcDesc* new_proc;
    470 
    471     /* tgid may match new_pid, in which case current process is the
    472      * one that's being forked, otherwise tgid identifies process
    473      * that's being forked. */
    474     if (new_pid == tgid) {
    475         parent_proc = get_current_process();
    476     } else {
    477         parent_proc = get_process_from_tid(tgid);
    478     }
    479 
    480     if (parent_proc == NULL) {
    481         ME("memcheck: FORK(%u, %u): Unable to look up parent process. Current tid=%u",
    482            tgid, new_pid, current_tid);
    483         return;
    484     }
    485 
    486     if (parent_proc->pid != get_current_process()->pid) {
    487         MD("memcheck: FORK(%u, %u): parent %s[pid=%u] is not the current process %s[pid=%u]",
    488            tgid, new_pid, parent_proc->image_path, parent_proc->pid,
    489            get_current_process()->image_path, get_current_process()->pid);
    490     }
    491 
    492     new_proc = create_new_process(new_pid, parent_proc->pid);
    493     if (new_proc == NULL) {
    494         return;
    495     }
    496 
    497     /* Since we're possibly forking parent process, we need to inherit
    498      * parent's image path in the forked process. */
    499     procdesc_set_image_path(new_proc, parent_proc->image_path, 0);
    500 
    501     T(PROC_FORK, "memcheck: FORK(tgid=%u, new_pid=%u) by %s[pid=%u] (tid=%u)\n",
    502       tgid, new_pid, parent_proc->image_path, parent_proc->pid, current_tid);
    503 }
    504 
    505 void
    506 memcheck_clone(uint32_t tgid, uint32_t new_tid)
    507 {
    508     ProcDesc* parent_proc;
    509 
    510     /* tgid may match new_pid, in which case current process is the
    511      * one that creates thread, otherwise tgid identifies process
    512      * that creates thread. */
    513     if (new_tid == tgid) {
    514         parent_proc = get_current_process();
    515     } else {
    516         parent_proc = get_process_from_tid(tgid);
    517     }
    518 
    519     if (parent_proc == NULL) {
    520         ME("memcheck: CLONE(%u, %u) Unable to look up parent process. Current tid=%u",
    521            tgid, new_tid, current_tid);
    522         return;
    523     }
    524 
    525     if (parent_proc->pid != get_current_process()->pid) {
    526         ME("memcheck: CLONE(%u, %u): parent %s[pid=%u] is not the current process %s[pid=%u]",
    527            tgid, new_tid, parent_proc->image_path, parent_proc->pid,
    528            get_current_process()->image_path, get_current_process()->pid);
    529     }
    530 
    531     create_new_thread(parent_proc, new_tid);
    532 
    533     T(PROC_CLONE, "memcheck: CLONE(tgid=%u, new_tid=%u) by %s[pid=%u] (tid=%u)\n",
    534       tgid, new_tid, parent_proc->image_path, parent_proc->pid, current_tid);
    535 }
    536 
    537 void
    538 memcheck_set_cmd_line(const char* cmd_arg, unsigned cmdlen)
    539 {
    540     char parsed[4096];
    541     int n;
    542 
    543     ProcDesc* current_proc = get_current_process();
    544     if (current_proc == NULL) {
    545         ME("memcheck: CMDL(%s, %u): Unable to look up process for current tid=%3u",
    546            cmd_arg, cmdlen, current_tid);
    547         return;
    548     }
    549 
    550     /* Image path is the first agrument in cmd line. Note that due to
    551      * limitations of TRACE_XXX cmdlen can never exceed CLIENT_PAGE_SIZE */
    552     memcpy(parsed, cmd_arg, cmdlen);
    553 
    554     // Cut first argument off the entire command line.
    555     for (n = 0; n < cmdlen; n++) {
    556         if (parsed[n] == ' ') {
    557             break;
    558         }
    559     }
    560     parsed[n] = '\0';
    561 
    562     // Save process' image path into descriptor.
    563     procdesc_set_image_path(current_proc, parsed,
    564                             PROC_FLAG_IMAGE_PATH_REPLACED);
    565     current_proc->flags |= PROC_FLAG_EXECUTING;
    566 
    567     /* At this point we need to discard memory mappings inherited from
    568      * the parent process, since this process has become "independent" from
    569      * its parent. */
    570     mmrangemap_empty(&current_proc->mmrange_map);
    571     T(PROC_START, "memcheck: Executing process %s[pid=%u]\n",
    572       current_proc->image_path, current_proc->pid);
    573 }
    574 
    575 void
    576 memcheck_exit(uint32_t exit_code)
    577 {
    578     ProcDesc* proc;
    579     int leaks_reported = 0;
    580     MallocDescEx leaked_alloc;
    581 
    582     // Exiting thread descriptor.
    583     ThreadDesc* thread = get_current_thread();
    584     if (thread == NULL) {
    585         ME("memcheck: EXIT(%u): Unable to look up thread for current tid=%u",
    586            exit_code, current_tid);
    587         return;
    588     }
    589     proc = thread->process;
    590 
    591     // Since current thread is exiting, we need to NULL its cached descriptor.
    592     current_thread = NULL;
    593 
    594     // Unlist the thread from its process as well as global lists.
    595     QLIST_REMOVE(thread, proc_entry);
    596     QLIST_REMOVE(thread, global_entry);
    597     threaddesc_free(thread);
    598 
    599     /* Lets see if this was last process thread, which would indicate
    600      * process termination. */
    601     if (!QLIST_EMPTY(&proc->threads)) {
    602         return;
    603     }
    604 
    605     // Process is terminating. Report leaks and free resources.
    606     proc->flags |= PROC_FLAG_EXITING;
    607 
    608     /* Empty allocation descriptors map for the exiting process,
    609      * reporting leaking blocks in the process. */
    610     while (!allocmap_pull_first(&proc->alloc_map, &leaked_alloc)) {
    611         /* We should "forgive" blocks that were inherited from the
    612          * parent process on fork, or were allocated while process was
    613          * in "transition" state. */
    614         if (!mallocdescex_is_inherited_on_fork(&leaked_alloc) &&
    615             !mallocdescex_is_transition_entry(&leaked_alloc)) {
    616             if (!leaks_reported) {
    617                 // First leak detected. Print report's header.
    618                 T(CHECK_LEAK, "memcheck: Process %s[pid=%u] is exiting leaking allocated blocks:\n",
    619                   proc->image_path, proc->pid);
    620             }
    621             if (trace_flags & TRACE_CHECK_LEAK_ENABLED) {
    622                 // Dump leaked block information.
    623                 printf("   Leaked block %u:\n", leaks_reported + 1);
    624                 memcheck_dump_malloc_desc(&leaked_alloc, 0, 0);
    625                 if (leaked_alloc.call_stack != NULL) {
    626                     const int max_stack = 24;
    627                     if (max_stack >= leaked_alloc.call_stack_count) {
    628                         printf("      Call stack:\n");
    629                     } else {
    630                         printf("      Call stack (first %u of %u entries):\n",
    631                                max_stack, leaked_alloc.call_stack_count);
    632                     }
    633                     uint32_t stk;
    634                     for (stk = 0;
    635                          stk < leaked_alloc.call_stack_count && stk < max_stack;
    636                          stk++) {
    637                         const MMRangeDesc* rdesc =
    638                            procdesc_find_mapentry(proc,
    639                                                   leaked_alloc.call_stack[stk]);
    640                         if (rdesc != NULL) {
    641                             Elf_AddressInfo elff_info;
    642                             ELFF_HANDLE elff_handle = NULL;
    643                             uint32_t rel =
    644                                 mmrangedesc_get_module_offset(rdesc,
    645                                                   leaked_alloc.call_stack[stk]);
    646                             printf("         Frame %u: PC=0x%08X (relative 0x%08X) in module %s\n",
    647                                    stk, leaked_alloc.call_stack[stk], rel,
    648                                    rdesc->path);
    649                             if (memcheck_get_address_info(leaked_alloc.call_stack[stk],
    650                                                           rdesc, &elff_info,
    651                                                           &elff_handle) == 0) {
    652                                 printf("            Routine %s @ %s/%s:%u\n",
    653                                        elff_info.routine_name,
    654                                        elff_info.dir_name,
    655                                        elff_info.file_name,
    656                                        elff_info.line_number);
    657                                 elff_free_pc_address_info(elff_handle,
    658                                                           &elff_info);
    659                                 elff_close(elff_handle);
    660                             }
    661                         } else {
    662                             printf("         Frame %u: PC=0x%08X in module <unknown>\n",
    663                                    stk, leaked_alloc.call_stack[stk]);
    664 
    665                         }
    666                     }
    667                 }
    668             }
    669             leaks_reported++;
    670         }
    671     }
    672 
    673     if (leaks_reported) {
    674         T(CHECK_LEAK, "memcheck: Process %s[pid=%u] is leaking %u allocated blocks.\n",
    675           proc->image_path, proc->pid, leaks_reported);
    676     }
    677 
    678     T(PROC_EXIT, "memcheck: Exiting process %s[pid=%u] in thread %u. Memory leaks detected: %u\n",
    679       proc->image_path, proc->pid, current_tid, leaks_reported);
    680 
    681     /* Since current process is exiting, we need to NULL its cached descriptor,
    682      * and unlist it from the list of running processes. */
    683     current_process = NULL;
    684     QLIST_REMOVE(proc, global_entry);
    685 
    686     // Empty process' mmapings map.
    687     mmrangemap_empty(&proc->mmrange_map);
    688     if (proc->image_path != NULL) {
    689         qemu_free(proc->image_path);
    690     }
    691     qemu_free(proc);
    692 }
    693 
    694 void
    695 memcheck_mmap_exepath(target_ulong vstart,
    696                       target_ulong vend,
    697                       target_ulong exec_offset,
    698                       const char* path)
    699 {
    700     MMRangeDesc desc;
    701     MMRangeDesc replaced;
    702     RBTMapResult ins_res;
    703 
    704     ProcDesc* proc = get_current_process();
    705     if (proc == NULL) {
    706         ME("memcheck: MMAP(0x%08X, 0x%08X, 0x%08X, %s) Unable to look up current process. Current tid=%u",
    707            vstart, vend, exec_offset, path, current_tid);
    708         return;
    709     }
    710 
    711     /* First, unmap an overlapped section */
    712     memcheck_unmap(vstart, vend);
    713 
    714     /* Add new mapping. */
    715     desc.map_start = vstart;
    716     desc.map_end = vend;
    717     desc.exec_offset = exec_offset;
    718     desc.path = qemu_malloc(strlen(path) + 1);
    719     if (desc.path == NULL) {
    720         ME("memcheck: MMAP(0x%08X, 0x%08X, 0x%08X, %s) Unable to allocate path for the entry.",
    721            vstart, vend, exec_offset, path);
    722         return;
    723     }
    724     strcpy(desc.path, path);
    725 
    726     ins_res = mmrangemap_insert(&proc->mmrange_map, &desc, &replaced);
    727     if (ins_res == RBT_MAP_RESULT_ERROR) {
    728         ME("memcheck: %s[pid=%u] unable to insert memory mapping entry: 0x%08X - 0x%08X",
    729            proc->image_path, proc->pid, vstart, vend);
    730         qemu_free(desc.path);
    731         return;
    732     }
    733 
    734     if (ins_res == RBT_MAP_RESULT_ENTRY_REPLACED) {
    735         MD("memcheck: %s[pid=%u] MMRANGE %s[0x%08X - 0x%08X] is replaced with %s[0x%08X - 0x%08X]",
    736            proc->image_path, proc->pid, replaced.path, replaced.map_start,
    737            replaced.map_end, desc.path, desc.map_start, desc.map_end);
    738         qemu_free(replaced.path);
    739     }
    740 
    741     T(PROC_MMAP, "memcheck: %s[pid=%u] %s is mapped: 0x%08X - 0x%08X + 0x%08X\n",
    742       proc->image_path, proc->pid, path, vstart, vend, exec_offset);
    743 }
    744 
    745 void
    746 memcheck_unmap(target_ulong vstart, target_ulong vend)
    747 {
    748     MMRangeDesc desc;
    749     ProcDesc* proc = get_current_process();
    750     if (proc == NULL) {
    751         ME("memcheck: UNMAP(0x%08X, 0x%08X) Unable to look up current process. Current tid=%u",
    752            vstart, vend, current_tid);
    753         return;
    754     }
    755 
    756     if (mmrangemap_pull(&proc->mmrange_map, vstart, vend, &desc)) {
    757         return;
    758     }
    759 
    760     if (desc.map_start >= vstart && desc.map_end <= vend) {
    761         /* Entire mapping has been deleted. */
    762         T(PROC_MMAP, "memcheck: %s[pid=%u] %s is unmapped: [0x%08X - 0x%08X + 0x%08X]\n",
    763           proc->image_path, proc->pid, desc.path, vstart, vend, desc.exec_offset);
    764         qemu_free(desc.path);
    765         return;
    766     }
    767 
    768     /* This can be first stage of "remap" request, when part of the existing
    769      * mapping has been unmapped. If that's so, lets cut unmapped part from the
    770      * block that we just pulled, and add whatever's left back to the map. */
    771     T(PROC_MMAP, "memcheck: REMAP(0x%08X, 0x%08X + 0x%08X) -> (0x%08X, 0x%08X)\n",
    772        desc.map_start, desc.map_end, desc.exec_offset, vstart, vend);
    773     if (desc.map_start == vstart) {
    774         /* We cut part from the beginning. Add the tail back. */
    775         desc.exec_offset += vend - desc.map_start;
    776         desc.map_start = vend;
    777         mmrangemap_insert(&proc->mmrange_map, &desc, NULL);
    778     } else if (desc.map_end == vend) {
    779         /* We cut part from the tail. Add the beginning back. */
    780         desc.map_end = vstart;
    781         mmrangemap_insert(&proc->mmrange_map, &desc, NULL);
    782     } else {
    783         /* We cut piece in the middle. */
    784         MMRangeDesc tail;
    785         tail.map_start = vend;
    786         tail.map_end = desc.map_end;
    787         tail.exec_offset = vend - desc.map_start + desc.exec_offset;
    788         tail.path = qemu_malloc(strlen(desc.path) + 1);
    789         strcpy(tail.path, desc.path);
    790         mmrangemap_insert(&proc->mmrange_map, &tail, NULL);
    791         desc.map_end = vstart;
    792         mmrangemap_insert(&proc->mmrange_map, &desc, NULL);
    793     }
    794 }
    795