Home | History | Annotate | Download | only in vm
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 /*
     18  * Linear memory allocation, tied to class loaders.
     19  */
     20 #include "Dalvik.h"
     21 
     22 #include <sys/mman.h>
     23 #include <limits.h>
     24 #include <errno.h>
     25 
     26 //#define DISABLE_LINEAR_ALLOC
     27 
     28 // Use ashmem to name the LinearAlloc section
     29 #define USE_ASHMEM 1
     30 
     31 #ifdef USE_ASHMEM
     32 #include <cutils/ashmem.h>
     33 #endif /* USE_ASHMEM */
     34 
     35 /*
     36 Overview
     37 
     38 This is intended to be a simple, fast allocator for "write-once" storage.
     39 The expectation is that this will hold small allocations that don't change,
     40 such as parts of classes (vtables, fields, methods, interfaces).  Because
     41 the lifetime of these items is tied to classes, which in turn are tied
     42 to class loaders, we associate the storage with a ClassLoader object.
     43 
     44 [ We don't yet support class unloading, and our ClassLoader implementation
     45 is in flux, so for now we just have a single global region and the
     46 "classLoader" argument is ignored. ]
     47 
     48 By storing the data here, rather than on the system heap, we reduce heap
     49 clutter, speed class loading, reduce the memory footprint (reduced heap
     50 structure overhead), and most importantly we increase the number of pages
     51 that remain shared between processes launched in "Zygote mode".
     52 
     53 The 4 bytes preceding each block contain the block length.  This allows us
     54 to support "free" and "realloc" calls in a limited way.  We don't free
     55 storage once it has been allocated, but in some circumstances it could be
     56 useful to erase storage to garbage values after a "free" or "realloc".
     57 (Bad idea if we're trying to share pages.)  We need to align to 8-byte
     58 boundaries for some architectures, so we have a 50-50 chance of getting
     59 this for free in a given block.
     60 
     61 A NULL value for the "classLoader" argument refers to the bootstrap class
     62 loader, which is never unloaded (until the VM shuts down).
     63 
     64 Because the memory is not expected to be updated, we can use mprotect to
     65 guard the pages on debug builds.  Handy when tracking down corruption.
     66 */
     67 
     68 /* alignment for allocations; must be power of 2, and currently >= hdr_xtra */
     69 #define BLOCK_ALIGN         8
     70 
     71 /* default length of memory segment (worst case is probably "dexopt") */
     72 #define DEFAULT_MAX_LENGTH  (5*1024*1024)
     73 
     74 /* leave enough space for a length word */
     75 #define HEADER_EXTRA        4
     76 
     77 /* overload the length word */
     78 #define LENGTHFLAG_FREE    0x80000000
     79 #define LENGTHFLAG_RW      0x40000000
     80 #define LENGTHFLAG_MASK    (~(LENGTHFLAG_FREE|LENGTHFLAG_RW))
     81 
     82 
     83 /* fwd */
     84 static void checkAllFree(Object* classLoader);
     85 
     86 
     87 /*
     88  * Someday, retrieve the linear alloc struct associated with a particular
     89  * class loader.  For now, always use the boostrap loader's instance.
     90  */
     91 static inline LinearAllocHdr* getHeader(Object* classLoader)
     92 {
     93     return gDvm.pBootLoaderAlloc;
     94 }
     95 
     96 /*
     97  * Convert a pointer to memory to a pointer to the block header (which is
     98  * currently just a length word).
     99  */
    100 static inline u4* getBlockHeader(void* mem)
    101 {
    102     return ((u4*) mem) -1;
    103 }
    104 
    105 /*
    106  * Create a new linear allocation block.
    107  */
    108 LinearAllocHdr* dvmLinearAllocCreate(Object* classLoader)
    109 {
    110 #ifdef DISABLE_LINEAR_ALLOC
    111     return (LinearAllocHdr*) 0x12345;
    112 #endif
    113     LinearAllocHdr* pHdr;
    114 
    115     pHdr = (LinearAllocHdr*) malloc(sizeof(*pHdr));
    116 
    117 
    118     /*
    119      * "curOffset" points to the location of the next pre-block header,
    120      * which means we have to advance to the next BLOCK_ALIGN address and
    121      * back up.
    122      *
    123      * Note we leave the first page empty (see below), and start the
    124      * first entry on the second page at an offset that ensures the next
    125      * chunk of data will be properly aligned.
    126      */
    127     assert(BLOCK_ALIGN >= HEADER_EXTRA);
    128     pHdr->curOffset = pHdr->firstOffset =
    129         (BLOCK_ALIGN-HEADER_EXTRA) + SYSTEM_PAGE_SIZE;
    130     pHdr->mapLength = DEFAULT_MAX_LENGTH;
    131 
    132 #ifdef USE_ASHMEM
    133     int fd;
    134 
    135     fd = ashmem_create_region("dalvik-LinearAlloc", DEFAULT_MAX_LENGTH);
    136     if (fd < 0) {
    137         LOGE("ashmem LinearAlloc failed %s", strerror(errno));
    138         free(pHdr);
    139         return NULL;
    140     }
    141 
    142     pHdr->mapAddr = mmap(NULL, pHdr->mapLength, PROT_READ | PROT_WRITE,
    143         MAP_PRIVATE, fd, 0);
    144     if (pHdr->mapAddr == MAP_FAILED) {
    145         LOGE("LinearAlloc mmap(%d) failed: %s\n", pHdr->mapLength,
    146             strerror(errno));
    147         free(pHdr);
    148         close(fd);
    149         return NULL;
    150     }
    151 
    152     close(fd);
    153 #else /*USE_ASHMEM*/
    154     // MAP_ANON is listed as "deprecated" on Linux,
    155     // but MAP_ANONYMOUS is not defined under Mac OS X.
    156     pHdr->mapAddr = mmap(NULL, pHdr->mapLength, PROT_READ | PROT_WRITE,
    157         MAP_PRIVATE | MAP_ANON, -1, 0);
    158     if (pHdr->mapAddr == MAP_FAILED) {
    159         LOGE("LinearAlloc mmap(%d) failed: %s\n", pHdr->mapLength,
    160             strerror(errno));
    161         free(pHdr);
    162         return NULL;
    163     }
    164 #endif /*USE_ASHMEM*/
    165 
    166     /* region expected to begin on a page boundary */
    167     assert(((int) pHdr->mapAddr & (SYSTEM_PAGE_SIZE-1)) == 0);
    168 
    169     /* the system should initialize newly-mapped memory to zero */
    170     assert(*(u4*) (pHdr->mapAddr + pHdr->curOffset) == 0);
    171 
    172     /*
    173      * Disable access to all except starting page.  We will enable pages
    174      * as we use them.  This helps prevent bad pointers from working.  The
    175      * pages start out PROT_NONE, become read/write while we access them,
    176      * then go to read-only after we finish our changes.
    177      *
    178      * We have to make the first page readable because we have 4 pad bytes,
    179      * followed by 4 length bytes, giving an initial offset of 8.  The
    180      * generic code below assumes that there could have been a previous
    181      * allocation that wrote into those 4 pad bytes, therefore the page
    182      * must have been marked readable by the previous allocation.
    183      *
    184      * We insert an extra page in here to force a break in the memory map
    185      * so we can see ourselves more easily in "showmap".  Otherwise this
    186      * stuff blends into the neighboring pages.  [TODO: do we still need
    187      * the extra page now that we have ashmem?]
    188      */
    189     if (mprotect(pHdr->mapAddr, pHdr->mapLength, PROT_NONE) != 0) {
    190         LOGW("LinearAlloc init mprotect failed: %s\n", strerror(errno));
    191         free(pHdr);
    192         return NULL;
    193     }
    194     if (mprotect(pHdr->mapAddr + SYSTEM_PAGE_SIZE, SYSTEM_PAGE_SIZE,
    195             ENFORCE_READ_ONLY ? PROT_READ : PROT_READ|PROT_WRITE) != 0)
    196     {
    197         LOGW("LinearAlloc init mprotect #2 failed: %s\n", strerror(errno));
    198         free(pHdr);
    199         return NULL;
    200     }
    201 
    202     if (ENFORCE_READ_ONLY) {
    203         /* allocate the per-page ref count */
    204         int numPages = (pHdr->mapLength+SYSTEM_PAGE_SIZE-1) / SYSTEM_PAGE_SIZE;
    205         pHdr->writeRefCount = calloc(numPages, sizeof(short));
    206         if (pHdr->writeRefCount == NULL) {
    207             free(pHdr);
    208             return NULL;
    209         }
    210     }
    211 
    212     dvmInitMutex(&pHdr->lock);
    213 
    214     LOGV("LinearAlloc: created region at %p-%p\n",
    215         pHdr->mapAddr, pHdr->mapAddr + pHdr->mapLength-1);
    216 
    217     return pHdr;
    218 }
    219 
    220 /*
    221  * Destroy a linear allocation area.
    222  *
    223  * We do a trivial "has everything been freed?" check before unmapping the
    224  * memory and freeing the LinearAllocHdr.
    225  */
    226 void dvmLinearAllocDestroy(Object* classLoader)
    227 {
    228 #ifdef DISABLE_LINEAR_ALLOC
    229     return;
    230 #endif
    231     LinearAllocHdr* pHdr = getHeader(classLoader);
    232     if (pHdr == NULL)
    233         return;
    234 
    235     checkAllFree(classLoader);
    236 
    237     //dvmLinearAllocDump(classLoader);
    238 
    239     if (gDvm.verboseShutdown) {
    240         LOGV("Unmapping linear allocator base=%p\n", pHdr->mapAddr);
    241         LOGD("LinearAlloc %p used %d of %d (%d%%)\n",
    242             classLoader, pHdr->curOffset, pHdr->mapLength,
    243             (pHdr->curOffset * 100) / pHdr->mapLength);
    244     }
    245 
    246     if (munmap(pHdr->mapAddr, pHdr->mapLength) != 0) {
    247         LOGW("LinearAlloc munmap(%p, %d) failed: %s\n",
    248             pHdr->mapAddr, pHdr->mapLength, strerror(errno));
    249     }
    250     free(pHdr);
    251 }
    252 
    253 /*
    254  * Allocate "size" bytes of storage, associated with a particular class
    255  * loader.
    256  *
    257  * It's okay for size to be zero.
    258  *
    259  * We always leave "curOffset" pointing at the next place where we will
    260  * store the header that precedes the returned storage.
    261  *
    262  * This aborts the VM on failure, so it's not necessary to check for a
    263  * NULL return value.
    264  */
    265 void* dvmLinearAlloc(Object* classLoader, size_t size)
    266 {
    267     LinearAllocHdr* pHdr = getHeader(classLoader);
    268     int startOffset, nextOffset;
    269     int lastGoodOff, firstWriteOff, lastWriteOff;
    270 
    271 #ifdef DISABLE_LINEAR_ALLOC
    272     return calloc(1, size);
    273 #endif
    274 
    275     LOGVV("--- LinearAlloc(%p, %d)\n", classLoader, size);
    276 
    277     /*
    278      * What we'd like to do is just determine the new end-of-alloc size
    279      * and atomic-swap the updated value in.  The trouble is that, the
    280      * first time we reach a new page, we need to call mprotect() to
    281      * make the page available, and we don't want to call mprotect() on
    282      * every allocation.  The troubled situation is:
    283      *  - thread A allocs across a page boundary, but gets preempted
    284      *    before mprotect() completes
    285      *  - thread B allocs within the new page, and doesn't call mprotect()
    286      */
    287     dvmLockMutex(&pHdr->lock);
    288 
    289     startOffset = pHdr->curOffset;
    290     assert(((startOffset + HEADER_EXTRA) & (BLOCK_ALIGN-1)) == 0);
    291 
    292     /*
    293      * Compute the new offset.  The old offset points at the address where
    294      * we will store the hidden block header, so we advance past that,
    295      * add the size of data they want, add another header's worth so we
    296      * know we have room for that, and round up to BLOCK_ALIGN.  That's
    297      * the next location where we'll put user data.  We then subtract the
    298      * chunk header size off so we're back to the header pointer.
    299      *
    300      * Examples:
    301      *   old=12 size=3 new=((12+(4*2)+3+7) & ~7)-4 = 24-4 --> 20
    302      *   old=12 size=5 new=((12+(4*2)+5+7) & ~7)-4 = 32-4 --> 28
    303      */
    304     nextOffset = ((startOffset + HEADER_EXTRA*2 + size + (BLOCK_ALIGN-1))
    305                     & ~(BLOCK_ALIGN-1)) - HEADER_EXTRA;
    306     LOGVV("--- old=%d size=%d new=%d\n", startOffset, size, nextOffset);
    307 
    308     if (nextOffset > pHdr->mapLength) {
    309         /*
    310          * We don't have to abort here.  We could fall back on the system
    311          * malloc(), and have our "free" call figure out what to do.  Only
    312          * works if the users of these functions actually free everything
    313          * they allocate.
    314          */
    315         LOGE("LinearAlloc exceeded capacity (%d), last=%d\n",
    316             pHdr->mapLength, (int) size);
    317         dvmAbort();
    318     }
    319 
    320     /*
    321      * Round up "size" to encompass the entire region, including the 0-7
    322      * pad bytes before the next chunk header.  This way we get maximum
    323      * utility out of "realloc", and when we're doing ENFORCE_READ_ONLY
    324      * stuff we always treat the full extent.
    325      */
    326     size = nextOffset - (startOffset + HEADER_EXTRA);
    327     LOGVV("--- (size now %d)\n", size);
    328 
    329     /*
    330      * See if we are starting on or have crossed into a new page.  If so,
    331      * call mprotect on the page(s) we're about to write to.  We have to
    332      * page-align the start address, but don't have to make the length a
    333      * SYSTEM_PAGE_SIZE multiple (but we do it anyway).
    334      *
    335      * Note that "startOffset" is not the last *allocated* byte, but rather
    336      * the offset of the first *unallocated* byte (which we are about to
    337      * write the chunk header to).  "nextOffset" is similar.
    338      *
    339      * If ENFORCE_READ_ONLY is enabled, we have to call mprotect even if
    340      * we've written to this page before, because it might be read-only.
    341      */
    342     lastGoodOff = (startOffset-1) & ~(SYSTEM_PAGE_SIZE-1);
    343     firstWriteOff = startOffset & ~(SYSTEM_PAGE_SIZE-1);
    344     lastWriteOff = (nextOffset-1) & ~(SYSTEM_PAGE_SIZE-1);
    345     LOGVV("---  lastGood=0x%04x firstWrite=0x%04x lastWrite=0x%04x\n",
    346         lastGoodOff, firstWriteOff, lastWriteOff);
    347     if (lastGoodOff != lastWriteOff || ENFORCE_READ_ONLY) {
    348         int cc, start, len;
    349 
    350         start = firstWriteOff;
    351         assert(start <= nextOffset);
    352         len = (lastWriteOff - firstWriteOff) + SYSTEM_PAGE_SIZE;
    353 
    354         LOGVV("---    calling mprotect(start=%d len=%d RW)\n", start, len);
    355         cc = mprotect(pHdr->mapAddr + start, len, PROT_READ | PROT_WRITE);
    356         if (cc != 0) {
    357             LOGE("LinearAlloc mprotect (+%d %d) failed: %s\n",
    358                 start, len, strerror(errno));
    359             /* we're going to fail soon, might as do it now */
    360             dvmAbort();
    361         }
    362     }
    363 
    364     /* update the ref counts on the now-writable pages */
    365     if (ENFORCE_READ_ONLY) {
    366         int i, start, end;
    367 
    368         start = firstWriteOff / SYSTEM_PAGE_SIZE;
    369         end = lastWriteOff / SYSTEM_PAGE_SIZE;
    370 
    371         LOGVV("---  marking pages %d-%d RW (alloc %d at %p)\n",
    372             start, end, size, pHdr->mapAddr + startOffset + HEADER_EXTRA);
    373         for (i = start; i <= end; i++)
    374             pHdr->writeRefCount[i]++;
    375     }
    376 
    377     /* stow the size in the header */
    378     if (ENFORCE_READ_ONLY)
    379         *(u4*)(pHdr->mapAddr + startOffset) = size | LENGTHFLAG_RW;
    380     else
    381         *(u4*)(pHdr->mapAddr + startOffset) = size;
    382 
    383     /*
    384      * Update data structure.
    385      */
    386     pHdr->curOffset = nextOffset;
    387 
    388     dvmUnlockMutex(&pHdr->lock);
    389     return pHdr->mapAddr + startOffset + HEADER_EXTRA;
    390 }
    391 
    392 /*
    393  * Helper function, replaces strdup().
    394  */
    395 char* dvmLinearStrdup(Object* classLoader, const char* str)
    396 {
    397 #ifdef DISABLE_LINEAR_ALLOC
    398     return strdup(str);
    399 #endif
    400     int len = strlen(str);
    401     void* mem = dvmLinearAlloc(classLoader, len+1);
    402     memcpy(mem, str, len+1);
    403     if (ENFORCE_READ_ONLY)
    404         dvmLinearSetReadOnly(classLoader, mem);
    405     return (char*) mem;
    406 }
    407 
    408 /*
    409  * "Reallocate" a piece of memory.
    410  *
    411  * If the new size is <= the old size, we return the original pointer
    412  * without doing anything.
    413  *
    414  * If the new size is > the old size, we allocate new storage, copy the
    415  * old stuff over, and mark the new stuff as free.
    416  */
    417 void* dvmLinearRealloc(Object* classLoader, void* mem, size_t newSize)
    418 {
    419 #ifdef DISABLE_LINEAR_ALLOC
    420     return realloc(mem, newSize);
    421 #endif
    422     LinearAllocHdr* pHdr = getHeader(classLoader);
    423 
    424     /* make sure we have the right region (and mem != NULL) */
    425     assert(mem != NULL);
    426     assert(mem >= (void*) pHdr->mapAddr &&
    427            mem < (void*) (pHdr->mapAddr + pHdr->curOffset));
    428 
    429     const u4* pLen = getBlockHeader(mem);
    430     LOGV("--- LinearRealloc(%d) old=%d\n", newSize, *pLen);
    431 
    432     /* handle size reduction case */
    433     if (*pLen >= newSize) {
    434         if (ENFORCE_READ_ONLY)
    435             dvmLinearSetReadWrite(classLoader, mem);
    436         return mem;
    437     }
    438 
    439     void* newMem;
    440 
    441     newMem = dvmLinearAlloc(classLoader, newSize);
    442     assert(newMem != NULL);
    443     memcpy(newMem, mem, *pLen);
    444     dvmLinearFree(classLoader, mem);
    445 
    446     return newMem;
    447 }
    448 
    449 
    450 /*
    451  * Update the read/write status of one or more pages.
    452  */
    453 static void updatePages(Object* classLoader, void* mem, int direction)
    454 {
    455     LinearAllocHdr* pHdr = getHeader(classLoader);
    456     dvmLockMutex(&pHdr->lock);
    457 
    458     /* make sure we have the right region */
    459     assert(mem >= (void*) pHdr->mapAddr &&
    460            mem < (void*) (pHdr->mapAddr + pHdr->curOffset));
    461 
    462     u4* pLen = getBlockHeader(mem);
    463     u4 len = *pLen & LENGTHFLAG_MASK;
    464     int firstPage, lastPage;
    465 
    466     firstPage = ((u1*)pLen - (u1*)pHdr->mapAddr) / SYSTEM_PAGE_SIZE;
    467     lastPage = ((u1*)mem - (u1*)pHdr->mapAddr + (len-1)) / SYSTEM_PAGE_SIZE;
    468     LOGVV("--- updating pages %d-%d (%d)\n", firstPage, lastPage, direction);
    469 
    470     int i, cc;
    471 
    472     /*
    473      * Update individual pages.  We could do some sort of "lazy update" to
    474      * combine mprotect calls, but that's almost certainly more trouble
    475      * than it's worth.
    476      */
    477     for (i = firstPage; i <= lastPage; i++) {
    478         if (direction < 0) {
    479             /*
    480              * Trying to mark read-only.
    481              */
    482             if (i == firstPage) {
    483                 if ((*pLen & LENGTHFLAG_RW) == 0) {
    484                     LOGW("Double RO on %p\n", mem);
    485                     dvmAbort();
    486                 } else
    487                     *pLen &= ~LENGTHFLAG_RW;
    488             }
    489 
    490             if (pHdr->writeRefCount[i] == 0) {
    491                 LOGE("Can't make page %d any less writable\n", i);
    492                 dvmAbort();
    493             }
    494             pHdr->writeRefCount[i]--;
    495             if (pHdr->writeRefCount[i] == 0) {
    496                 LOGVV("---  prot page %d RO\n", i);
    497                 cc = mprotect(pHdr->mapAddr + SYSTEM_PAGE_SIZE * i,
    498                         SYSTEM_PAGE_SIZE, PROT_READ);
    499                 assert(cc == 0);
    500             }
    501         } else {
    502             /*
    503              * Trying to mark writable.
    504              */
    505             if (pHdr->writeRefCount[i] >= 32767) {
    506                 LOGE("Can't make page %d any more writable\n", i);
    507                 dvmAbort();
    508             }
    509             if (pHdr->writeRefCount[i] == 0) {
    510                 LOGVV("---  prot page %d RW\n", i);
    511                 cc = mprotect(pHdr->mapAddr + SYSTEM_PAGE_SIZE * i,
    512                         SYSTEM_PAGE_SIZE, PROT_READ | PROT_WRITE);
    513                 assert(cc == 0);
    514             }
    515             pHdr->writeRefCount[i]++;
    516 
    517             if (i == firstPage) {
    518                 if ((*pLen & LENGTHFLAG_RW) != 0) {
    519                     LOGW("Double RW on %p\n", mem);
    520                     dvmAbort();
    521                 } else
    522                     *pLen |= LENGTHFLAG_RW;
    523             }
    524         }
    525     }
    526 
    527     dvmUnlockMutex(&pHdr->lock);
    528 }
    529 
    530 /*
    531  * Try to mark the pages in which a chunk of memory lives as read-only.
    532  * Whether or not the pages actually change state depends on how many
    533  * others are trying to access the same pages.
    534  *
    535  * Only call here if ENFORCE_READ_ONLY is true.
    536  */
    537 void dvmLinearSetReadOnly(Object* classLoader, void* mem)
    538 {
    539 #ifdef DISABLE_LINEAR_ALLOC
    540     return;
    541 #endif
    542     updatePages(classLoader, mem, -1);
    543 }
    544 
    545 /*
    546  * Make the pages on which "mem" sits read-write.
    547  *
    548  * This covers the header as well as the data itself.  (We could add a
    549  * "header-only" mode for dvmLinearFree.)
    550  *
    551  * Only call here if ENFORCE_READ_ONLY is true.
    552  */
    553 void dvmLinearSetReadWrite(Object* classLoader, void* mem)
    554 {
    555 #ifdef DISABLE_LINEAR_ALLOC
    556     return;
    557 #endif
    558     updatePages(classLoader, mem, 1);
    559 }
    560 
    561 /*
    562  * Mark an allocation as free.
    563  */
    564 void dvmLinearFree(Object* classLoader, void* mem)
    565 {
    566 #ifdef DISABLE_LINEAR_ALLOC
    567     free(mem);
    568     return;
    569 #endif
    570     if (mem == NULL)
    571         return;
    572 
    573     LinearAllocHdr* pHdr = getHeader(classLoader);
    574 
    575     /* make sure we have the right region */
    576     assert(mem >= (void*) pHdr->mapAddr &&
    577            mem < (void*) (pHdr->mapAddr + pHdr->curOffset));
    578 
    579     if (ENFORCE_READ_ONLY)
    580         dvmLinearSetReadWrite(classLoader, mem);
    581 
    582     u4* pLen = getBlockHeader(mem);
    583     *pLen |= LENGTHFLAG_FREE;
    584 
    585     if (ENFORCE_READ_ONLY)
    586         dvmLinearSetReadOnly(classLoader, mem);
    587 }
    588 
    589 /*
    590  * For debugging, dump the contents of a linear alloc area.
    591  *
    592  * We grab the lock so that the header contents and list output are
    593  * consistent.
    594  */
    595 void dvmLinearAllocDump(Object* classLoader)
    596 {
    597 #ifdef DISABLE_LINEAR_ALLOC
    598     return;
    599 #endif
    600     LinearAllocHdr* pHdr = getHeader(classLoader);
    601 
    602     dvmLockMutex(&pHdr->lock);
    603 
    604     LOGI("LinearAlloc classLoader=%p\n", classLoader);
    605     LOGI("  mapAddr=%p mapLength=%d firstOffset=%d\n",
    606         pHdr->mapAddr, pHdr->mapLength, pHdr->firstOffset);
    607     LOGI("  curOffset=%d\n", pHdr->curOffset);
    608 
    609     int off = pHdr->firstOffset;
    610     u4 rawLen, fullLen;
    611 
    612     while (off < pHdr->curOffset) {
    613         rawLen = *(u4*) (pHdr->mapAddr + off);
    614         fullLen = ((HEADER_EXTRA*2 + (rawLen & LENGTHFLAG_MASK))
    615                     & ~(BLOCK_ALIGN-1));
    616 
    617         LOGI("  %p (%3d): %clen=%d%s\n", pHdr->mapAddr + off + HEADER_EXTRA,
    618             (int) ((off + HEADER_EXTRA) / SYSTEM_PAGE_SIZE),
    619             (rawLen & LENGTHFLAG_FREE) != 0 ? '*' : ' ',
    620             rawLen & LENGTHFLAG_MASK,
    621             (rawLen & LENGTHFLAG_RW) != 0 ? " [RW]" : "");
    622 
    623         off += fullLen;
    624     }
    625 
    626     if (ENFORCE_READ_ONLY) {
    627         LOGI("writeRefCount map:\n");
    628 
    629         int numPages = (pHdr->mapLength+SYSTEM_PAGE_SIZE-1) / SYSTEM_PAGE_SIZE;
    630         int zstart = 0;
    631         int i;
    632 
    633         for (i = 0; i < numPages; i++) {
    634             int count = pHdr->writeRefCount[i];
    635 
    636             if (count != 0) {
    637                 if (zstart < i-1)
    638                     printf(" %d-%d: zero\n", zstart, i-1);
    639                 else if (zstart == i-1)
    640                     printf(" %d: zero\n", zstart);
    641                 zstart = i+1;
    642                 printf(" %d: %d\n", i, count);
    643             }
    644         }
    645         if (zstart < i)
    646             printf(" %d-%d: zero\n", zstart, i-1);
    647     }
    648 
    649     LOGD("LinearAlloc %p using %d of %d (%d%%)\n",
    650         classLoader, pHdr->curOffset, pHdr->mapLength,
    651         (pHdr->curOffset * 100) / pHdr->mapLength);
    652 
    653     dvmUnlockMutex(&pHdr->lock);
    654 }
    655 
    656 /*
    657  * Verify that all blocks are freed.
    658  *
    659  * This should only be done as we're shutting down, but there could be a
    660  * daemon thread that's still trying to do something, so we grab the locks.
    661  */
    662 static void checkAllFree(Object* classLoader)
    663 {
    664 #ifdef DISABLE_LINEAR_ALLOC
    665     return;
    666 #endif
    667     LinearAllocHdr* pHdr = getHeader(classLoader);
    668 
    669     dvmLockMutex(&pHdr->lock);
    670 
    671     int off = pHdr->firstOffset;
    672     u4 rawLen, fullLen;
    673 
    674     while (off < pHdr->curOffset) {
    675         rawLen = *(u4*) (pHdr->mapAddr + off);
    676         fullLen = ((HEADER_EXTRA*2 + (rawLen & LENGTHFLAG_MASK))
    677                     & ~(BLOCK_ALIGN-1));
    678 
    679         if ((rawLen & LENGTHFLAG_FREE) == 0) {
    680             LOGW("LinearAlloc %p not freed: %p len=%d\n", classLoader,
    681                 pHdr->mapAddr + off + HEADER_EXTRA, rawLen & LENGTHFLAG_MASK);
    682         }
    683 
    684         off += fullLen;
    685     }
    686 
    687     dvmUnlockMutex(&pHdr->lock);
    688 }
    689 
    690 /*
    691  * Determine if [start, start+length) is contained in the in-use area of
    692  * a single LinearAlloc.  The full set of linear allocators is scanned.
    693  *
    694  * [ Since we currently only have one region, this is pretty simple.  In
    695  * the future we'll need to traverse a table of class loaders. ]
    696  */
    697 bool dvmLinearAllocContains(const void* start, size_t length)
    698 {
    699     LinearAllocHdr* pHdr = getHeader(NULL);
    700 
    701     if (pHdr == NULL)
    702         return false;
    703 
    704     return (char*) start >= pHdr->mapAddr &&
    705            ((char*)start + length) <= (pHdr->mapAddr + pHdr->curOffset);
    706 }
    707 
    708