Home | History | Annotate | Download | only in memcheck
      1 /* Copyright (C) 2007-2010 The Android Open Source Project
      2 **
      3 ** This software is licensed under the terms of the GNU General Public
      4 ** License version 2, as published by the Free Software Foundation, and
      5 ** may be copied, distributed, and modified under those terms.
      6 **
      7 ** This program is distributed in the hope that it will be useful,
      8 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
      9 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     10 ** GNU General Public License for more details.
     11 */
     12 
     13 /*
     14  * Contains implementation of routines that implement a red-black tree of
     15  * memory blocks allocated by the guest system.
     16  */
     17 
     18 /* This file should compile iff qemu is built with memory checking
     19  * configuration turned on. */
     20 #ifndef CONFIG_MEMCHECK
     21 #error CONFIG_MEMCHECK is not defined.
     22 #endif  // CONFIG_MEMCHECK
     23 
     24 #include "memcheck_malloc_map.h"
     25 #include "memcheck_util.h"
     26 #include "memcheck_logging.h"
     27 
     28 /* Global flag, indicating whether or not __ld/__stx_mmu should be instrumented
     29  * for checking for access violations. If read / write access violation check.
     30  * Defined in memcheck.c
     31  */
     32 extern int memcheck_instrument_mmu;
     33 
     34 /* Allocation descriptor stored in the map. */
     35 typedef struct AllocMapEntry {
     36     /* R-B tree entry. */
     37     RB_ENTRY(AllocMapEntry) rb_entry;
     38 
     39     /* Allocation descriptor for this entry. */
     40     MallocDescEx            desc;
     41 } AllocMapEntry;
     42 
     43 // =============================================================================
     44 // Inlines
     45 // =============================================================================
     46 
     47 /* Gets address of the beginning of an allocation block for the given entry in
     48  * the map.
     49  * Param:
     50  *  adesc - Entry in the allocation descriptors map.
     51  * Return:
     52  *  Address of the beginning of an allocation block for the given entry in the
     53  *  map.
     54  */
     55 static inline target_ulong
     56 allocmapentry_alloc_begins(const AllocMapEntry* adesc)
     57 {
     58     return adesc->desc.malloc_desc.ptr;
     59 }
     60 
     61 /* Gets address of the end of an allocation block for the given entry in
     62  * the map.
     63  * Param:
     64  *  adesc - Entry in the allocation descriptors map.
     65  * Return:
     66  *  Address of the end of an allocation block for the given entry in the map.
     67  */
     68 static inline target_ulong
     69 allocmapentry_alloc_ends(const AllocMapEntry* adesc)
     70 {
     71     return mallocdesc_get_alloc_end(&adesc->desc.malloc_desc);
     72 }
     73 
     74 // =============================================================================
     75 // R-B Tree implementation
     76 // =============================================================================
     77 
     78 /* Compare routine for the allocation descriptors map.
     79  * Param:
     80  *  d1 - First map entry to compare.
     81  *  d2 - Second map entry to compare.
     82  * Return:
     83  *  0 - Descriptors are equal. Note that descriptors are considered to be
     84  *      equal iff memory blocks they describe intersect in any part.
     85  *  1 - d1 is greater than d2
     86  *  -1 - d1 is less than d2.
     87  */
     88 static inline int
     89 cmp_rb(AllocMapEntry* d1, AllocMapEntry* d2)
     90 {
     91     const target_ulong start1 = allocmapentry_alloc_begins(d1);
     92     const target_ulong start2 = allocmapentry_alloc_begins(d2);
     93 
     94     if (start1 < start2) {
     95         return (allocmapentry_alloc_ends(d1) - 1) < start2 ? -1 : 0;
     96     }
     97     return (allocmapentry_alloc_ends(d2) - 1) < start1 ? 1 : 0;
     98 }
     99 
    100 /* Expands RB macros here. */
    101 RB_GENERATE(AllocMap, AllocMapEntry, rb_entry, cmp_rb);
    102 
    103 // =============================================================================
    104 // Static routines
    105 // =============================================================================
    106 
    107 /* Inserts new (or replaces existing) entry into allocation descriptors map.
    108  * See comments on allocmap_insert routine in the header file for details
    109  * about this routine.
    110  */
    111 static RBTMapResult
    112 allocmap_insert_desc(AllocMap* map,
    113                      AllocMapEntry* adesc,
    114                      MallocDescEx* replaced)
    115 {
    116     AllocMapEntry* existing = AllocMap_RB_INSERT(map, adesc);
    117     if (existing == NULL) {
    118         return RBT_MAP_RESULT_ENTRY_INSERTED;
    119     }
    120 
    121     // Matching entry exists. Lets see if we need to replace it.
    122     if (replaced == NULL) {
    123         return RBT_MAP_RESULT_ENTRY_ALREADY_EXISTS;
    124     }
    125 
    126     /* Copy existing entry to the provided buffer and replace it
    127      * with the new one. */
    128     memcpy(replaced, &existing->desc, sizeof(MallocDescEx));
    129     AllocMap_RB_REMOVE(map, existing);
    130     qemu_free(existing);
    131     AllocMap_RB_INSERT(map, adesc);
    132     return RBT_MAP_RESULT_ENTRY_REPLACED;
    133 }
    134 
    135 /* Finds an entry in the allocation descriptors map that matches the given
    136  * address range.
    137  * Param:
    138  *  map - Allocation descriptors map where to search for an entry.
    139  *  address - Virtual address in the guest's user space to find matching
    140  *      entry for.
    141  * Return:
    142  *  Address of an allocation descriptors map entry that matches the given
    143  *  address, or NULL if no such entry has been found.
    144  */
    145 static inline AllocMapEntry*
    146 allocmap_find_entry(const AllocMap* map,
    147                     target_ulong address,
    148                     uint32_t block_size)
    149 {
    150     AllocMapEntry adesc;
    151     adesc.desc.malloc_desc.ptr = address;
    152     adesc.desc.malloc_desc.requested_bytes = block_size;
    153     adesc.desc.malloc_desc.prefix_size = 0;
    154     adesc.desc.malloc_desc.suffix_size = 0;
    155     return AllocMap_RB_FIND((AllocMap*)map, &adesc);
    156 }
    157 
    158 // =============================================================================
    159 // Map API
    160 // =============================================================================
    161 
    162 void
    163 allocmap_init(AllocMap* map)
    164 {
    165     RB_INIT(map);
    166 }
    167 
    168 RBTMapResult
    169 allocmap_insert(AllocMap* map, const MallocDescEx* desc, MallocDescEx* replaced)
    170 {
    171     RBTMapResult ret;
    172 
    173     // Allocate and initialize new map entry.
    174     AllocMapEntry* adesc = qemu_malloc(sizeof(AllocMapEntry));
    175     if (adesc == NULL) {
    176         ME("memcheck: Unable to allocate new AllocMapEntry on insert.");
    177         return RBT_MAP_RESULT_ERROR;
    178     }
    179     memcpy(&adesc->desc, desc, sizeof(MallocDescEx));
    180 
    181     // Insert new entry into the map.
    182     ret = allocmap_insert_desc(map, adesc, replaced);
    183     if (ret == RBT_MAP_RESULT_ENTRY_ALREADY_EXISTS ||
    184         ret == RBT_MAP_RESULT_ERROR) {
    185         /* Another descriptor already exists for this block, or an error
    186          * occurred. We have to tree new descriptor, as it wasn't inserted. */
    187         qemu_free(adesc);
    188     }
    189     return ret;
    190 }
    191 
    192 MallocDescEx*
    193 allocmap_find(const AllocMap* map, target_ulong address, uint32_t block_size)
    194 {
    195     AllocMapEntry* adesc = allocmap_find_entry(map, address, block_size);
    196     return adesc != NULL ? &adesc->desc : NULL;
    197 }
    198 
    199 int
    200 allocmap_pull(AllocMap* map, target_ulong address, MallocDescEx* pulled)
    201 {
    202     AllocMapEntry* adesc = allocmap_find_entry(map, address, 1);
    203     if (adesc != NULL) {
    204         memcpy(pulled, &adesc->desc, sizeof(MallocDescEx));
    205         AllocMap_RB_REMOVE(map, adesc);
    206         qemu_free(adesc);
    207         return 0;
    208     } else {
    209         return -1;
    210     }
    211 }
    212 
    213 int
    214 allocmap_pull_first(AllocMap* map, MallocDescEx* pulled)
    215 {
    216     AllocMapEntry* first = RB_MIN(AllocMap, map);
    217     if (first != NULL) {
    218         memcpy(pulled, &first->desc, sizeof(MallocDescEx));
    219         AllocMap_RB_REMOVE(map, first);
    220         qemu_free(first);
    221         return 0;
    222     } else {
    223         return -1;
    224     }
    225 }
    226 
    227 int
    228 allocmap_copy(AllocMap* to,
    229               const AllocMap* from,
    230               uint32_t set_flags,
    231               uint32_t clear_flags)
    232 {
    233     AllocMapEntry* entry;
    234     RB_FOREACH(entry, AllocMap, (AllocMap*)from) {
    235         RBTMapResult ins_res;
    236         AllocMapEntry* new_entry =
    237                 (AllocMapEntry*)qemu_malloc(sizeof(AllocMapEntry));
    238         if (new_entry == NULL) {
    239             ME("memcheck: Unable to allocate new AllocMapEntry on copy.");
    240             return -1;
    241         }
    242         memcpy(new_entry, entry, sizeof(AllocMapEntry));
    243         new_entry->desc.flags &= ~clear_flags;
    244         new_entry->desc.flags |= set_flags;
    245         if (entry->desc.call_stack_count) {
    246             new_entry->desc.call_stack =
    247                qemu_malloc(entry->desc.call_stack_count * sizeof(target_ulong));
    248             memcpy(new_entry->desc.call_stack, entry->desc.call_stack,
    249                    entry->desc.call_stack_count * sizeof(target_ulong));
    250         } else {
    251             new_entry->desc.call_stack = NULL;
    252         }
    253         new_entry->desc.call_stack_count = entry->desc.call_stack_count;
    254         ins_res = allocmap_insert_desc(to, new_entry, NULL);
    255         if (ins_res == RBT_MAP_RESULT_ENTRY_INSERTED) {
    256             if (memcheck_instrument_mmu) {
    257                 // Invalidate TLB cache for inserted entry.
    258                 invalidate_tlb_cache(new_entry->desc.malloc_desc.ptr,
    259                         mallocdesc_get_alloc_end(&new_entry->desc.malloc_desc));
    260             }
    261         } else {
    262             ME("memcheck: Unable to insert new map entry on copy. Insert returned %u",
    263                ins_res);
    264             if (new_entry->desc.call_stack != NULL) {
    265                 qemu_free(new_entry->desc.call_stack);
    266             }
    267             qemu_free(new_entry);
    268             return -1;
    269         }
    270     }
    271 
    272     return 0;
    273 }
    274 
    275 int
    276 allocmap_empty(AllocMap* map)
    277 {
    278     MallocDescEx pulled;
    279     int removed = 0;
    280 
    281     while (!allocmap_pull_first(map, &pulled)) {
    282         removed++;
    283         if (pulled.call_stack != NULL) {
    284             qemu_free(pulled.call_stack);
    285         }
    286     }
    287 
    288     return removed;
    289 }
    290