Home | History | Annotate | Download | only in memcheck
      1 /* Copyright (C) 2007-2010 The Android Open Source Project
      2 **
      3 ** This software is licensed under the terms of the GNU General Public
      4 ** License version 2, as published by the Free Software Foundation, and
      5 ** may be copied, distributed, and modified under those terms.
      6 **
      7 ** This program is distributed in the hope that it will be useful,
      8 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
      9 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     10 ** GNU General Public License for more details.
     11 */
     12 
     13 /*
     14  * Contains implementation of routines related to process management in
     15  * memchecker framework.
     16  */
     17 
     18 /* This file should compile iff qemu is built with memory checking
     19  * configuration turned on. */
     20 #ifndef CONFIG_MEMCHECK
     21 #error CONFIG_MEMCHECK is not defined.
     22 #endif  // CONFIG_MEMCHECK
     23 
     24 #include "elff/elff_api.h"
     25 #include "memcheck.h"
     26 #include "memcheck_proc_management.h"
     27 #include "memcheck_logging.h"
     28 
     29 /* Current thread id.
     30  * This value is updated with each call to memcheck_switch, saving here
     31  * ID of the thread that becomes current. */
     32 static uint32_t current_tid = 0;
     33 
     34 /* Current thread descriptor.
     35  * This variable is used to cache current thread descriptor. This value gets
     36  * initialized on "as needed" basis, when descriptor for the current thread
     37  * is requested for the first time.
     38  * Note that every time memcheck_switch routine is called, this value gets
     39  * NULL'ed, since another thread becomes current. */
     40 static ThreadDesc* current_thread = NULL;
     41 
     42 /* Current process descriptor.
     43  * This variable is used to cache current process descriptor. This value gets
     44  * initialized on "as needed" basis, when descriptor for the current process
     45  * is requested for the first time.
     46  * Note that every time memcheck_switch routine is called, this value gets
     47  * NULL'ed, since new thread becomes current, thus process switch may have
     48  * occurred as well. */
     49 static ProcDesc*    current_process = NULL;
     50 
     51 /* List of running processes. */
     52 static QLIST_HEAD(proc_list, ProcDesc) proc_list;
     53 
     54 /* List of running threads. */
     55 static QLIST_HEAD(thread_list, ThreadDesc) thread_list;
     56 
     57 // =============================================================================
     58 // Static routines
     59 // =============================================================================
     60 
     61 /* Creates and lists thread descriptor for a new thread.
     62  * This routine will allocate and initialize new thread descriptor. After that
     63  * this routine will insert the descriptor into the global list of running
     64  * threads, as well as thread list in the process descriptor of the process
     65  * in context of which this thread is created.
     66  * Param:
     67  *  proc - Process descriptor of the process, in context of which new thread
     68  *      is created.
     69  *  tid - Thread ID of the thread that's being created.
     70  * Return:
     71  *  New thread descriptor on success, or NULL on failure.
     72  */
     73 static ThreadDesc*
     74 create_new_thread(ProcDesc* proc, uint32_t tid)
     75 {
     76     ThreadDesc* new_thread = (ThreadDesc*)qemu_malloc(sizeof(ThreadDesc));
     77     if (new_thread == NULL) {
     78         ME("memcheck: Unable to allocate new thread descriptor.");
     79         return NULL;
     80     }
     81     new_thread->tid = tid;
     82     new_thread->process = proc;
     83     new_thread->call_stack = NULL;
     84     new_thread->call_stack_count = 0;
     85     new_thread->call_stack_max = 0;
     86     QLIST_INSERT_HEAD(&thread_list, new_thread, global_entry);
     87     QLIST_INSERT_HEAD(&proc->threads, new_thread, proc_entry);
     88     return new_thread;
     89 }
     90 
     91 /* Creates and lists process descriptor for a new process.
     92  * This routine will allocate and initialize new process descriptor. After that
     93  * this routine will create main thread descriptor for the process (with the
     94  * thread ID equal to the new process ID), and then new process descriptor will
     95  * be inserted into the global list of running processes.
     96  * Param:
     97  *  pid - Process ID of the process that's being created.
     98  *  parent_pid - Process ID of the parent process.
     99  * Return:
    100  *  New process descriptor on success, or NULL on failure.
    101  */
    102 static ProcDesc*
    103 create_new_process(uint32_t pid, uint32_t parent_pid)
    104 {
    105     // Create and init new process descriptor.
    106     ProcDesc* new_proc = (ProcDesc*)qemu_malloc(sizeof(ProcDesc));
    107     if (new_proc == NULL) {
    108         ME("memcheck: Unable to allocate new process descriptor");
    109         return NULL;
    110     }
    111     QLIST_INIT(&new_proc->threads);
    112     allocmap_init(&new_proc->alloc_map);
    113     mmrangemap_init(&new_proc->mmrange_map);
    114     new_proc->pid = pid;
    115     new_proc->parent_pid = parent_pid;
    116     new_proc->image_path = NULL;
    117     new_proc->flags = 0;
    118 
    119     if (parent_pid != 0) {
    120         /* If new process has been forked, it inherits a copy of parent's
    121          * process heap, as well as parent's mmaping of loaded modules. So, on
    122          * fork we're required to copy parent's allocation descriptors map, as
    123          * well as parent's mmapping map to the new process. */
    124         int failed;
    125         ProcDesc* parent = get_process_from_pid(parent_pid);
    126         if (parent == NULL) {
    127             ME("memcheck: Unable to get parent process pid=%u for new process pid=%u",
    128                parent_pid, pid);
    129             qemu_free(new_proc);
    130             return NULL;
    131         }
    132 
    133         /* Copy parent's allocation map, setting "inherited" flag, and clearing
    134          * parent's "transition" flag in the copied entries. */
    135         failed = allocmap_copy(&new_proc->alloc_map, &parent->alloc_map,
    136                                MDESC_FLAG_INHERITED_ON_FORK,
    137                                MDESC_FLAG_TRANSITION_ENTRY);
    138         if (failed) {
    139             ME("memcheck: Unable to copy process' %s[pid=%u] allocation map to new process pid=%u",
    140                parent->image_path, parent_pid, pid);
    141             allocmap_empty(&new_proc->alloc_map);
    142             qemu_free(new_proc);
    143             return NULL;
    144         }
    145 
    146         // Copy parent's memory mappings map.
    147         failed = mmrangemap_copy(&new_proc->mmrange_map, &parent->mmrange_map);
    148         if (failed) {
    149             ME("memcheck: Unable to copy process' %s[pid=%u] mmrange map to new process pid=%u",
    150                parent->image_path, parent_pid, pid);
    151             mmrangemap_empty(&new_proc->mmrange_map);
    152             allocmap_empty(&new_proc->alloc_map);
    153             qemu_free(new_proc);
    154             return NULL;
    155         }
    156     }
    157 
    158     // Create and register main thread descriptor for new process.
    159     if(create_new_thread(new_proc, pid) == NULL) {
    160         mmrangemap_empty(&new_proc->mmrange_map);
    161         allocmap_empty(&new_proc->alloc_map);
    162         qemu_free(new_proc);
    163         return NULL;
    164     }
    165 
    166     // List new process.
    167     QLIST_INSERT_HEAD(&proc_list, new_proc, global_entry);
    168 
    169     return new_proc;
    170 }
    171 
    172 /* Finds thread descriptor for a thread id in the global list of running
    173  * threads.
    174  * Param:
    175  *  tid - Thread ID to look up thread descriptor for.
    176  * Return:
    177  *  Found thread descriptor, or NULL if thread descriptor has not been found.
    178  */
    179 static ThreadDesc*
    180 get_thread_from_tid(uint32_t tid)
    181 {
    182     ThreadDesc* thread;
    183 
    184     /* There is a pretty good chance that when this call is made, it's made
    185      * to get descriptor for the current thread. Lets see if it is so, so
    186      * we don't have to iterate through the entire list. */
    187     if (tid == current_tid && current_thread != NULL) {
    188         return current_thread;
    189     }
    190 
    191     QLIST_FOREACH(thread, &thread_list, global_entry) {
    192         if (tid == thread->tid) {
    193             if (tid == current_tid) {
    194                 current_thread = thread;
    195             }
    196             return thread;
    197         }
    198     }
    199     return NULL;
    200 }
    201 
    202 /* Gets thread descriptor for the current thread.
    203  * Return:
    204  *  Found thread descriptor, or NULL if thread descriptor has not been found.
    205  */
    206 ThreadDesc*
    207 get_current_thread(void)
    208 {
    209     // Lets see if current thread descriptor has been cached.
    210     if (current_thread == NULL) {
    211         /* Descriptor is not cached. Look it up in the list. Note that
    212          * get_thread_from_tid(current_tid) is not used here in order to
    213          * optimize this code for performance, as this routine is called from
    214          * the performance sensitive path. */
    215         ThreadDesc* thread;
    216         QLIST_FOREACH(thread, &thread_list, global_entry) {
    217             if (current_tid == thread->tid) {
    218                 current_thread = thread;
    219                 return current_thread;
    220             }
    221         }
    222     }
    223     return current_thread;
    224 }
    225 
    226 /* Finds process descriptor for a thread id.
    227  * Param:
    228  *  tid - Thread ID to look up process descriptor for.
    229  * Return:
    230  *  Process descriptor for the thread, or NULL, if process descriptor
    231  *  has not been found.
    232  */
    233 static inline ProcDesc*
    234 get_process_from_tid(uint32_t tid)
    235 {
    236     const ThreadDesc* thread = get_thread_from_tid(tid);
    237     return (thread != NULL) ? thread->process : NULL;
    238 }
    239 
    240 /* Sets, or replaces process image path in process descriptor.
    241  * Generally, new process' image path is unknown untill we calculate it in
    242  * the handler for TRACE_DEV_REG_CMDLINE event. This routine is called from
    243  * TRACE_DEV_REG_CMDLINE event handler to set, or replace process image path.
    244  * Param:
    245  *  proc - Descriptor of the process where to set, or replace image path.
    246  *  image_path - Image path to the process, transmitted with
    247  *      TRACE_DEV_REG_CMDLINE event.
    248  * set_flags_on_replace - Flags to be set when current image path for the
    249  *      process has been actually replaced with the new one.
    250  * Return:
    251  *  Zero on success, or -1 on failure.
    252  */
    253 static int
    254 procdesc_set_image_path(ProcDesc* proc,
    255                         const char* image_path,
    256                         uint32_t set_flags_on_replace)
    257 {
    258     if (image_path == NULL || proc == NULL) {
    259         return 0;
    260     }
    261 
    262     if (proc->image_path != NULL) {
    263         /* Process could have been forked, and inherited image path of the
    264          * parent process. However, it seems that "fork" in terms of TRACE_XXX
    265          * is not necessarly a strict "fork", but rather new process creation
    266          * in general. So, if that's the case we need to override image path
    267          * inherited from the parent process. */
    268         if (!strcmp(proc->image_path, image_path)) {
    269             // Paths are the same. Just bail out.
    270             return 0;
    271         }
    272         qemu_free(proc->image_path);
    273         proc->image_path = NULL;
    274     }
    275 
    276     // Save new image path into process' descriptor.
    277     proc->image_path = qemu_malloc(strlen(image_path) + 1);
    278     if (proc->image_path == NULL) {
    279         ME("memcheck: Unable to allocate %u bytes for image path %s to set it for pid=%u",
    280            strlen(image_path) + 1, image_path, proc->pid);
    281         return -1;
    282     }
    283     strcpy(proc->image_path, image_path);
    284     proc->flags |= set_flags_on_replace;
    285     return 0;
    286 }
    287 
    288 /* Frees thread descriptor. */
    289 static void
    290 threaddesc_free(ThreadDesc* thread)
    291 {
    292     uint32_t indx;
    293 
    294     if (thread == NULL) {
    295         return;
    296     }
    297 
    298     if (thread->call_stack != NULL) {
    299         for (indx = 0; indx < thread->call_stack_count; indx++) {
    300             if (thread->call_stack[indx].module_path != NULL) {
    301                 qemu_free(thread->call_stack[indx].module_path);
    302             }
    303         }
    304         qemu_free(thread->call_stack);
    305     }
    306     qemu_free(thread);
    307 }
    308 
    309 // =============================================================================
    310 // Process management API
    311 // =============================================================================
    312 
    313 void
    314 memcheck_init_proc_management(void)
    315 {
    316     QLIST_INIT(&proc_list);
    317     QLIST_INIT(&thread_list);
    318 }
    319 
    320 ProcDesc*
    321 get_process_from_pid(uint32_t pid)
    322 {
    323     ProcDesc* proc;
    324 
    325     /* Chances are that pid addresses the current process. Lets check this,
    326      * so we don't have to iterate through the entire project list. */
    327     if (current_thread != NULL && current_thread->process->pid == pid) {
    328         current_process = current_thread->process;
    329         return current_process;
    330     }
    331 
    332     QLIST_FOREACH(proc, &proc_list, global_entry) {
    333         if (pid == proc->pid) {
    334             break;
    335         }
    336     }
    337     return proc;
    338 }
    339 
    340 ProcDesc*
    341 get_current_process(void)
    342 {
    343     if (current_process == NULL) {
    344         const ThreadDesc* cur_thread = get_current_thread();
    345         if (cur_thread != NULL) {
    346             current_process = cur_thread->process;
    347         }
    348     }
    349     return current_process;
    350 }
    351 
    352 void
    353 memcheck_on_call(target_ulong from, target_ulong ret)
    354 {
    355     const uint32_t grow_by = 32;
    356     const uint32_t max_stack = grow_by;
    357     ThreadDesc* thread = get_current_thread();
    358     if (thread == NULL) {
    359         return;
    360     }
    361 
    362     /* We're not saving call stack until process starts execution. */
    363     if (!procdesc_is_executing(thread->process)) {
    364         return;
    365     }
    366 
    367     const MMRangeDesc* rdesc = procdesc_get_range_desc(thread->process, from);
    368     if (rdesc == NULL) {
    369         ME("memcheck: Unable to find mapping for guest PC 0x%08X in process %s[pid=%u]",
    370            from, thread->process->image_path, thread->process->pid);
    371         return;
    372     }
    373 
    374     /* Limit calling stack size. There are cases when calling stack can be
    375      * quite deep due to recursion (up to 4000 entries). */
    376     if (thread->call_stack_count >= max_stack) {
    377 #if 0
    378         /* This happens quite often. */
    379         MD("memcheck: Thread stack for %s[pid=%u, tid=%u] is too big: %u",
    380            thread->process->image_path, thread->process->pid, thread->tid,
    381            thread->call_stack_count);
    382 #endif
    383         return;
    384     }
    385 
    386     if (thread->call_stack_count >= thread->call_stack_max) {
    387         /* Expand calling stack array buffer. */
    388         thread->call_stack_max += grow_by;
    389         ThreadCallStackEntry* new_array =
    390             qemu_malloc(thread->call_stack_max * sizeof(ThreadCallStackEntry));
    391         if (new_array == NULL) {
    392             ME("memcheck: Unable to allocate %u bytes for calling stack.",
    393                thread->call_stack_max * sizeof(ThreadCallStackEntry));
    394             thread->call_stack_max -= grow_by;
    395             return;
    396         }
    397         if (thread->call_stack_count != 0) {
    398             memcpy(new_array, thread->call_stack,
    399                    thread->call_stack_count * sizeof(ThreadCallStackEntry));
    400         }
    401         if (thread->call_stack != NULL) {
    402             qemu_free(thread->call_stack);
    403         }
    404         thread->call_stack = new_array;
    405     }
    406     thread->call_stack[thread->call_stack_count].call_address = from;
    407     thread->call_stack[thread->call_stack_count].call_address_rel =
    408             mmrangedesc_get_module_offset(rdesc, from);
    409     thread->call_stack[thread->call_stack_count].ret_address = ret;
    410     thread->call_stack[thread->call_stack_count].ret_address_rel =
    411             mmrangedesc_get_module_offset(rdesc, ret);
    412     thread->call_stack[thread->call_stack_count].module_path =
    413             qemu_malloc(strlen(rdesc->path) + 1);
    414     if (thread->call_stack[thread->call_stack_count].module_path == NULL) {
    415         ME("memcheck: Unable to allocate %u bytes for module path in the thread calling stack.",
    416             strlen(rdesc->path) + 1);
    417         return;
    418     }
    419     strcpy(thread->call_stack[thread->call_stack_count].module_path,
    420            rdesc->path);
    421     thread->call_stack_count++;
    422 }
    423 
    424 void
    425 memcheck_on_ret(target_ulong ret)
    426 {
    427     ThreadDesc* thread = get_current_thread();
    428     if (thread == NULL) {
    429         return;
    430     }
    431 
    432     /* We're not saving call stack until process starts execution. */
    433     if (!procdesc_is_executing(thread->process)) {
    434         return;
    435     }
    436 
    437     if (thread->call_stack_count > 0) {
    438         int indx = (int)thread->call_stack_count - 1;
    439         for (; indx >= 0; indx--) {
    440             if (thread->call_stack[indx].ret_address == ret) {
    441                 thread->call_stack_count = indx;
    442                 return;
    443             }
    444         }
    445     }
    446 }
    447 
    448 // =============================================================================
    449 // Handlers for events, generated by the kernel.
    450 // =============================================================================
    451 
    452 void
    453 memcheck_init_pid(uint32_t new_pid)
    454 {
    455     create_new_process(new_pid, 0);
    456     T(PROC_NEW_PID, "memcheck: init_pid(pid=%u) in current thread tid=%u\n",
    457       new_pid, current_tid);
    458 }
    459 
    460 void
    461 memcheck_switch(uint32_t tid)
    462 {
    463     /* Since new thread became active, we have to invalidate cached
    464      * descriptors for current thread and process. */
    465     current_thread = NULL;
    466     current_process = NULL;
    467     current_tid = tid;
    468 }
    469 
    470 void
    471 memcheck_fork(uint32_t tgid, uint32_t new_pid)
    472 {
    473     ProcDesc* parent_proc;
    474     ProcDesc* new_proc;
    475 
    476     /* tgid may match new_pid, in which case current process is the
    477      * one that's being forked, otherwise tgid identifies process
    478      * that's being forked. */
    479     if (new_pid == tgid) {
    480         parent_proc = get_current_process();
    481     } else {
    482         parent_proc = get_process_from_tid(tgid);
    483     }
    484 
    485     if (parent_proc == NULL) {
    486         ME("memcheck: FORK(%u, %u): Unable to look up parent process. Current tid=%u",
    487            tgid, new_pid, current_tid);
    488         return;
    489     }
    490 
    491     if (parent_proc->pid != get_current_process()->pid) {
    492         MD("memcheck: FORK(%u, %u): parent %s[pid=%u] is not the current process %s[pid=%u]",
    493            tgid, new_pid, parent_proc->image_path, parent_proc->pid,
    494            get_current_process()->image_path, get_current_process()->pid);
    495     }
    496 
    497     new_proc = create_new_process(new_pid, parent_proc->pid);
    498     if (new_proc == NULL) {
    499         return;
    500     }
    501 
    502     /* Since we're possibly forking parent process, we need to inherit
    503      * parent's image path in the forked process. */
    504     procdesc_set_image_path(new_proc, parent_proc->image_path, 0);
    505 
    506     T(PROC_FORK, "memcheck: FORK(tgid=%u, new_pid=%u) by %s[pid=%u] (tid=%u)\n",
    507       tgid, new_pid, parent_proc->image_path, parent_proc->pid, current_tid);
    508 }
    509 
    510 void
    511 memcheck_clone(uint32_t tgid, uint32_t new_tid)
    512 {
    513     ProcDesc* parent_proc;
    514 
    515     /* tgid may match new_pid, in which case current process is the
    516      * one that creates thread, otherwise tgid identifies process
    517      * that creates thread. */
    518     if (new_tid == tgid) {
    519         parent_proc = get_current_process();
    520     } else {
    521         parent_proc = get_process_from_tid(tgid);
    522     }
    523 
    524     if (parent_proc == NULL) {
    525         ME("memcheck: CLONE(%u, %u) Unable to look up parent process. Current tid=%u",
    526            tgid, new_tid, current_tid);
    527         return;
    528     }
    529 
    530     if (parent_proc->pid != get_current_process()->pid) {
    531         ME("memcheck: CLONE(%u, %u): parent %s[pid=%u] is not the current process %s[pid=%u]",
    532            tgid, new_tid, parent_proc->image_path, parent_proc->pid,
    533            get_current_process()->image_path, get_current_process()->pid);
    534     }
    535 
    536     create_new_thread(parent_proc, new_tid);
    537 
    538     T(PROC_CLONE, "memcheck: CLONE(tgid=%u, new_tid=%u) by %s[pid=%u] (tid=%u)\n",
    539       tgid, new_tid, parent_proc->image_path, parent_proc->pid, current_tid);
    540 }
    541 
    542 void
    543 memcheck_set_cmd_line(const char* cmd_arg, unsigned cmdlen)
    544 {
    545     char parsed[4096];
    546     int n;
    547 
    548     ProcDesc* current_proc = get_current_process();
    549     if (current_proc == NULL) {
    550         ME("memcheck: CMDL(%s, %u): Unable to look up process for current tid=%3u",
    551            cmd_arg, cmdlen, current_tid);
    552         return;
    553     }
    554 
    555     /* Image path is the first agrument in cmd line. Note that due to
    556      * limitations of TRACE_XXX cmdlen can never exceed CLIENT_PAGE_SIZE */
    557     memcpy(parsed, cmd_arg, cmdlen);
    558 
    559     // Cut first argument off the entire command line.
    560     for (n = 0; n < cmdlen; n++) {
    561         if (parsed[n] == ' ') {
    562             break;
    563         }
    564     }
    565     parsed[n] = '\0';
    566 
    567     // Save process' image path into descriptor.
    568     procdesc_set_image_path(current_proc, parsed,
    569                             PROC_FLAG_IMAGE_PATH_REPLACED);
    570     current_proc->flags |= PROC_FLAG_EXECUTING;
    571 
    572     /* At this point we need to discard memory mappings inherited from
    573      * the parent process, since this process has become "independent" from
    574      * its parent. */
    575     mmrangemap_empty(&current_proc->mmrange_map);
    576     T(PROC_START, "memcheck: Executing process %s[pid=%u]\n",
    577       current_proc->image_path, current_proc->pid);
    578 }
    579 
    580 void
    581 memcheck_exit(uint32_t exit_code)
    582 {
    583     ProcDesc* proc;
    584     int leaks_reported = 0;
    585     MallocDescEx leaked_alloc;
    586 
    587     // Exiting thread descriptor.
    588     ThreadDesc* thread = get_current_thread();
    589     if (thread == NULL) {
    590         ME("memcheck: EXIT(%u): Unable to look up thread for current tid=%u",
    591            exit_code, current_tid);
    592         return;
    593     }
    594     proc = thread->process;
    595 
    596     // Since current thread is exiting, we need to NULL its cached descriptor.
    597     current_thread = NULL;
    598 
    599     // Unlist the thread from its process as well as global lists.
    600     QLIST_REMOVE(thread, proc_entry);
    601     QLIST_REMOVE(thread, global_entry);
    602     threaddesc_free(thread);
    603 
    604     /* Lets see if this was last process thread, which would indicate
    605      * process termination. */
    606     if (!QLIST_EMPTY(&proc->threads)) {
    607         return;
    608     }
    609 
    610     // Process is terminating. Report leaks and free resources.
    611     proc->flags |= PROC_FLAG_EXITING;
    612 
    613     /* Empty allocation descriptors map for the exiting process,
    614      * reporting leaking blocks in the process. */
    615     while (!allocmap_pull_first(&proc->alloc_map, &leaked_alloc)) {
    616         /* We should "forgive" blocks that were inherited from the
    617          * parent process on fork, or were allocated while process was
    618          * in "transition" state. */
    619         if (!mallocdescex_is_inherited_on_fork(&leaked_alloc) &&
    620             !mallocdescex_is_transition_entry(&leaked_alloc)) {
    621             if (!leaks_reported) {
    622                 // First leak detected. Print report's header.
    623                 T(CHECK_LEAK, "memcheck: Process %s[pid=%u] is exiting leaking allocated blocks:\n",
    624                   proc->image_path, proc->pid);
    625             }
    626             if (trace_flags & TRACE_CHECK_LEAK_ENABLED) {
    627                 // Dump leaked block information.
    628                 printf("   Leaked block %u:\n", leaks_reported + 1);
    629                 memcheck_dump_malloc_desc(&leaked_alloc, 0, 0);
    630                 if (leaked_alloc.call_stack != NULL) {
    631                     const int max_stack = 24;
    632                     if (max_stack >= leaked_alloc.call_stack_count) {
    633                         printf("      Call stack:\n");
    634                     } else {
    635                         printf("      Call stack (first %u of %u entries):\n",
    636                                max_stack, leaked_alloc.call_stack_count);
    637                     }
    638                     uint32_t stk;
    639                     for (stk = 0;
    640                          stk < leaked_alloc.call_stack_count && stk < max_stack;
    641                          stk++) {
    642                         const MMRangeDesc* rdesc =
    643                            procdesc_find_mapentry(proc,
    644                                                   leaked_alloc.call_stack[stk]);
    645                         if (rdesc != NULL) {
    646                             Elf_AddressInfo elff_info;
    647                             ELFF_HANDLE elff_handle = NULL;
    648                             uint32_t rel =
    649                                 mmrangedesc_get_module_offset(rdesc,
    650                                                   leaked_alloc.call_stack[stk]);
    651                             printf("         Frame %u: PC=0x%08X (relative 0x%08X) in module %s\n",
    652                                    stk, leaked_alloc.call_stack[stk], rel,
    653                                    rdesc->path);
    654                             if (memcheck_get_address_info(leaked_alloc.call_stack[stk],
    655                                                           rdesc, &elff_info,
    656                                                           &elff_handle) == 0) {
    657                                 printf("            Routine %s @ %s/%s:%u\n",
    658                                        elff_info.routine_name,
    659                                        elff_info.dir_name,
    660                                        elff_info.file_name,
    661                                        elff_info.line_number);
    662                                 elff_free_pc_address_info(elff_handle,
    663                                                           &elff_info);
    664                                 elff_close(elff_handle);
    665                             }
    666                         } else {
    667                             printf("         Frame %u: PC=0x%08X in module <unknown>\n",
    668                                    stk, leaked_alloc.call_stack[stk]);
    669 
    670                         }
    671                     }
    672                 }
    673             }
    674             leaks_reported++;
    675         }
    676     }
    677 
    678     if (leaks_reported) {
    679         T(CHECK_LEAK, "memcheck: Process %s[pid=%u] is leaking %u allocated blocks.\n",
    680           proc->image_path, proc->pid, leaks_reported);
    681     }
    682 
    683     T(PROC_EXIT, "memcheck: Exiting process %s[pid=%u] in thread %u. Memory leaks detected: %u\n",
    684       proc->image_path, proc->pid, current_tid, leaks_reported);
    685 
    686     /* Since current process is exiting, we need to NULL its cached descriptor,
    687      * and unlist it from the list of running processes. */
    688     current_process = NULL;
    689     QLIST_REMOVE(proc, global_entry);
    690 
    691     // Empty process' mmapings map.
    692     mmrangemap_empty(&proc->mmrange_map);
    693     if (proc->image_path != NULL) {
    694         qemu_free(proc->image_path);
    695     }
    696     qemu_free(proc);
    697 }
    698 
    699 void
    700 memcheck_mmap_exepath(target_ulong vstart,
    701                       target_ulong vend,
    702                       target_ulong exec_offset,
    703                       const char* path)
    704 {
    705     MMRangeDesc desc;
    706     MMRangeDesc replaced;
    707     RBTMapResult ins_res;
    708 
    709     ProcDesc* proc = get_current_process();
    710     if (proc == NULL) {
    711         ME("memcheck: MMAP(0x%08X, 0x%08X, 0x%08X, %s) Unable to look up current process. Current tid=%u",
    712            vstart, vend, exec_offset, path, current_tid);
    713         return;
    714     }
    715 
    716     /* First, unmap an overlapped section */
    717     memcheck_unmap(vstart, vend);
    718 
    719     /* Add new mapping. */
    720     desc.map_start = vstart;
    721     desc.map_end = vend;
    722     desc.exec_offset = exec_offset;
    723     desc.path = qemu_malloc(strlen(path) + 1);
    724     if (desc.path == NULL) {
    725         ME("memcheck: MMAP(0x%08X, 0x%08X, 0x%08X, %s) Unable to allocate path for the entry.",
    726            vstart, vend, exec_offset, path);
    727         return;
    728     }
    729     strcpy(desc.path, path);
    730 
    731     ins_res = mmrangemap_insert(&proc->mmrange_map, &desc, &replaced);
    732     if (ins_res == RBT_MAP_RESULT_ERROR) {
    733         ME("memcheck: %s[pid=%u] unable to insert memory mapping entry: 0x%08X - 0x%08X",
    734            proc->image_path, proc->pid, vstart, vend);
    735         qemu_free(desc.path);
    736         return;
    737     }
    738 
    739     if (ins_res == RBT_MAP_RESULT_ENTRY_REPLACED) {
    740         MD("memcheck: %s[pid=%u] MMRANGE %s[0x%08X - 0x%08X] is replaced with %s[0x%08X - 0x%08X]",
    741            proc->image_path, proc->pid, replaced.path, replaced.map_start,
    742            replaced.map_end, desc.path, desc.map_start, desc.map_end);
    743         qemu_free(replaced.path);
    744     }
    745 
    746     T(PROC_MMAP, "memcheck: %s[pid=%u] %s is mapped: 0x%08X - 0x%08X + 0x%08X\n",
    747       proc->image_path, proc->pid, path, vstart, vend, exec_offset);
    748 }
    749 
    750 void
    751 memcheck_unmap(target_ulong vstart, target_ulong vend)
    752 {
    753     MMRangeDesc desc;
    754     ProcDesc* proc = get_current_process();
    755     if (proc == NULL) {
    756         ME("memcheck: UNMAP(0x%08X, 0x%08X) Unable to look up current process. Current tid=%u",
    757            vstart, vend, current_tid);
    758         return;
    759     }
    760 
    761     if (mmrangemap_pull(&proc->mmrange_map, vstart, vend, &desc)) {
    762         return;
    763     }
    764 
    765     if (desc.map_start >= vstart && desc.map_end <= vend) {
    766         /* Entire mapping has been deleted. */
    767         T(PROC_MMAP, "memcheck: %s[pid=%u] %s is unmapped: [0x%08X - 0x%08X + 0x%08X]\n",
    768           proc->image_path, proc->pid, desc.path, vstart, vend, desc.exec_offset);
    769         qemu_free(desc.path);
    770         return;
    771     }
    772 
    773     /* This can be first stage of "remap" request, when part of the existing
    774      * mapping has been unmapped. If that's so, lets cut unmapped part from the
    775      * block that we just pulled, and add whatever's left back to the map. */
    776     T(PROC_MMAP, "memcheck: REMAP(0x%08X, 0x%08X + 0x%08X) -> (0x%08X, 0x%08X)\n",
    777        desc.map_start, desc.map_end, desc.exec_offset, vstart, vend);
    778     if (desc.map_start == vstart) {
    779         /* We cut part from the beginning. Add the tail back. */
    780         desc.exec_offset += vend - desc.map_start;
    781         desc.map_start = vend;
    782         mmrangemap_insert(&proc->mmrange_map, &desc, NULL);
    783     } else if (desc.map_end == vend) {
    784         /* We cut part from the tail. Add the beginning back. */
    785         desc.map_end = vstart;
    786         mmrangemap_insert(&proc->mmrange_map, &desc, NULL);
    787     } else {
    788         /* We cut piece in the middle. */
    789         MMRangeDesc tail;
    790         tail.map_start = vend;
    791         tail.map_end = desc.map_end;
    792         tail.exec_offset = vend - desc.map_start + desc.exec_offset;
    793         tail.path = qemu_malloc(strlen(desc.path) + 1);
    794         strcpy(tail.path, desc.path);
    795         mmrangemap_insert(&proc->mmrange_map, &tail, NULL);
    796         desc.map_end = vstart;
    797         mmrangemap_insert(&proc->mmrange_map, &desc, NULL);
    798     }
    799 }
    800