1 /* 2 * Copyright (C) 2009 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #include "Dalvik.h" 18 #include "CompilerInternals.h" 19 20 static ArenaMemBlock *arenaHead, *currentArena; 21 static int numArenaBlocks; 22 23 /* Allocate the initial memory block for arena-based allocation */ 24 bool dvmCompilerHeapInit(void) 25 { 26 assert(arenaHead == NULL); 27 arenaHead = 28 (ArenaMemBlock *) malloc(sizeof(ArenaMemBlock) + ARENA_DEFAULT_SIZE); 29 if (arenaHead == NULL) { 30 LOGE("No memory left to create compiler heap memory\n"); 31 return false; 32 } 33 arenaHead->blockSize = ARENA_DEFAULT_SIZE; 34 currentArena = arenaHead; 35 currentArena->bytesAllocated = 0; 36 currentArena->next = NULL; 37 numArenaBlocks = 1; 38 39 return true; 40 } 41 42 /* Arena-based malloc for compilation tasks */ 43 void * dvmCompilerNew(size_t size, bool zero) 44 { 45 size = (size + 3) & ~3; 46 retry: 47 /* Normal case - space is available in the current page */ 48 if (size + currentArena->bytesAllocated <= currentArena->blockSize) { 49 void *ptr; 50 ptr = ¤tArena->ptr[currentArena->bytesAllocated]; 51 currentArena->bytesAllocated += size; 52 if (zero) { 53 memset(ptr, 0, size); 54 } 55 return ptr; 56 } else { 57 /* 58 * See if there are previously allocated arena blocks before the last 59 * reset 60 */ 61 if (currentArena->next) { 62 currentArena = currentArena->next; 63 goto retry; 64 } 65 66 size_t blockSize = (size < ARENA_DEFAULT_SIZE) ? 67 ARENA_DEFAULT_SIZE : size; 68 /* Time to allocate a new arena */ 69 ArenaMemBlock *newArena = (ArenaMemBlock *) 70 malloc(sizeof(ArenaMemBlock) + blockSize); 71 if (newArena == NULL) { 72 LOGE("Arena allocation failure"); 73 dvmAbort(); 74 } 75 newArena->blockSize = blockSize; 76 newArena->bytesAllocated = 0; 77 newArena->next = NULL; 78 currentArena->next = newArena; 79 currentArena = newArena; 80 numArenaBlocks++; 81 if (numArenaBlocks > 10) 82 LOGI("Total arena pages for JIT: %d", numArenaBlocks); 83 goto retry; 84 } 85 return NULL; 86 } 87 88 /* Reclaim all the arena blocks allocated so far */ 89 void dvmCompilerArenaReset(void) 90 { 91 ArenaMemBlock *block; 92 93 for (block = arenaHead; block; block = block->next) { 94 block->bytesAllocated = 0; 95 } 96 currentArena = arenaHead; 97 } 98 99 /* Growable List initialization */ 100 void dvmInitGrowableList(GrowableList *gList, size_t initLength) 101 { 102 gList->numAllocated = initLength; 103 gList->numUsed = 0; 104 gList->elemList = (void **) dvmCompilerNew(sizeof(void *) * initLength, 105 true); 106 } 107 108 /* Expand the capacity of a growable list */ 109 static void expandGrowableList(GrowableList *gList) 110 { 111 int newLength = gList->numAllocated; 112 if (newLength < 128) { 113 newLength <<= 1; 114 } else { 115 newLength += 128; 116 } 117 void *newArray = dvmCompilerNew(sizeof(void *) * newLength, true); 118 memcpy(newArray, gList->elemList, sizeof(void *) * gList->numAllocated); 119 gList->numAllocated = newLength; 120 gList->elemList = newArray; 121 } 122 123 /* Insert a new element into the growable list */ 124 void dvmInsertGrowableList(GrowableList *gList, void *elem) 125 { 126 assert(gList->numAllocated != 0); 127 if (gList->numUsed == gList->numAllocated) { 128 expandGrowableList(gList); 129 } 130 gList->elemList[gList->numUsed++] = elem; 131 } 132 133 /* Debug Utility - dump a compilation unit */ 134 void dvmCompilerDumpCompilationUnit(CompilationUnit *cUnit) 135 { 136 int i; 137 BasicBlock *bb; 138 char *blockTypeNames[] = { 139 "Normal Chaining Cell", 140 "Hot Chaining Cell", 141 "Singleton Chaining Cell", 142 "Predicted Chaining Cell", 143 "Backward Branch", 144 "Chaining Cell Gap", 145 "N/A", 146 "Method Entry Block", 147 "Trace Entry Block", 148 "Code Block", 149 "Trace Exit Block", 150 "Method Exit Block", 151 "PC Reconstruction", 152 "Exception Handling", 153 }; 154 155 LOGD("Compiling %s %s", cUnit->method->clazz->descriptor, 156 cUnit->method->name); 157 LOGD("%d insns", dvmGetMethodInsnsSize(cUnit->method)); 158 LOGD("%d blocks in total", cUnit->numBlocks); 159 160 for (i = 0; i < cUnit->numBlocks; i++) { 161 bb = cUnit->blockList[i]; 162 LOGD("Block %d (%s) (insn %04x - %04x%s)\n", 163 bb->id, 164 blockTypeNames[bb->blockType], 165 bb->startOffset, 166 bb->lastMIRInsn ? bb->lastMIRInsn->offset : bb->startOffset, 167 bb->lastMIRInsn ? "" : " empty"); 168 if (bb->taken) { 169 LOGD(" Taken branch: block %d (%04x)\n", 170 bb->taken->id, bb->taken->startOffset); 171 } 172 if (bb->fallThrough) { 173 LOGD(" Fallthrough : block %d (%04x)\n", 174 bb->fallThrough->id, bb->fallThrough->startOffset); 175 } 176 } 177 } 178 179 /* 180 * dvmHashForeach callback. 181 */ 182 static int dumpMethodStats(void *compilerMethodStats, void *totalMethodStats) 183 { 184 CompilerMethodStats *methodStats = 185 (CompilerMethodStats *) compilerMethodStats; 186 CompilerMethodStats *totalStats = 187 (CompilerMethodStats *) totalMethodStats; 188 189 totalStats->dalvikSize += methodStats->dalvikSize; 190 totalStats->compiledDalvikSize += methodStats->compiledDalvikSize; 191 totalStats->nativeSize += methodStats->nativeSize; 192 193 /* Enable the following when fine-tuning the JIT performance */ 194 #if 0 195 int limit = (methodStats->dalvikSize >> 2) * 3; 196 197 /* If over 3/4 of the Dalvik code is compiled, print something */ 198 if (methodStats->compiledDalvikSize >= limit) { 199 LOGD("Method stats: %s%s, %d/%d (compiled/total Dalvik), %d (native)", 200 methodStats->method->clazz->descriptor, 201 methodStats->method->name, 202 methodStats->compiledDalvikSize, 203 methodStats->dalvikSize, 204 methodStats->nativeSize); 205 } 206 #endif 207 return 0; 208 } 209 210 /* 211 * Dump the current stats of the compiler, including number of bytes used in 212 * the code cache, arena size, and work queue length, and various JIT stats. 213 */ 214 void dvmCompilerDumpStats(void) 215 { 216 CompilerMethodStats totalMethodStats; 217 218 memset(&totalMethodStats, 0, sizeof(CompilerMethodStats)); 219 LOGD("%d compilations using %d + %d bytes", 220 gDvmJit.numCompilations, 221 gDvmJit.templateSize, 222 gDvmJit.codeCacheByteUsed - gDvmJit.templateSize); 223 LOGD("Compiler arena uses %d blocks (%d bytes each)", 224 numArenaBlocks, ARENA_DEFAULT_SIZE); 225 LOGD("Compiler work queue length is %d/%d", gDvmJit.compilerQueueLength, 226 gDvmJit.compilerMaxQueued); 227 dvmJitStats(); 228 dvmCompilerArchDump(); 229 if (gDvmJit.methodStatsTable) { 230 dvmHashForeach(gDvmJit.methodStatsTable, dumpMethodStats, 231 &totalMethodStats); 232 LOGD("Code size stats: %d/%d (compiled/total Dalvik), %d (native)", 233 totalMethodStats.compiledDalvikSize, 234 totalMethodStats.dalvikSize, 235 totalMethodStats.nativeSize); 236 } 237 } 238 239 /* 240 * Allocate a bit vector with enough space to hold at least the specified 241 * number of bits. 242 * 243 * NOTE: this is the sister implementation of dvmAllocBitVector. In this version 244 * memory is allocated from the compiler arena. 245 */ 246 BitVector* dvmCompilerAllocBitVector(int startBits, bool expandable) 247 { 248 BitVector* bv; 249 int count; 250 251 assert(sizeof(bv->storage[0]) == 4); /* assuming 32-bit units */ 252 assert(startBits >= 0); 253 254 bv = (BitVector*) dvmCompilerNew(sizeof(BitVector), false); 255 256 count = (startBits + 31) >> 5; 257 258 bv->storageSize = count; 259 bv->expandable = expandable; 260 bv->storage = (u4*) dvmCompilerNew(count * sizeof(u4), true); 261 return bv; 262 } 263 264 /* 265 * Mark the specified bit as "set". 266 * 267 * Returns "false" if the bit is outside the range of the vector and we're 268 * not allowed to expand. 269 * 270 * NOTE: this is the sister implementation of dvmSetBit. In this version 271 * memory is allocated from the compiler arena. 272 */ 273 bool dvmCompilerSetBit(BitVector *pBits, int num) 274 { 275 assert(num >= 0); 276 if (num >= pBits->storageSize * (int)sizeof(u4) * 8) { 277 if (!pBits->expandable) 278 return false; 279 280 int newSize = (num + 31) >> 5; 281 assert(newSize > pBits->storageSize); 282 u4 *newStorage = dvmCompilerNew(newSize * sizeof(u4), false); 283 memcpy(newStorage, pBits->storage, pBits->storageSize * sizeof(u4)); 284 memset(&newStorage[pBits->storageSize], 0, 285 (newSize - pBits->storageSize) * sizeof(u4)); 286 pBits->storage = newStorage; 287 pBits->storageSize = newSize; 288 } 289 290 pBits->storage[num >> 5] |= 1 << (num & 0x1f); 291 return true; 292 } 293 294 void dvmDebugBitVector(char *msg, const BitVector *bv, int length) 295 { 296 int i; 297 298 LOGE("%s", msg); 299 for (i = 0; i < length; i++) { 300 if (dvmIsBitSet(bv, i)) { 301 LOGE("Bit %d is set", i); 302 } 303 } 304 } 305 306 void dvmCompilerAbort(CompilationUnit *cUnit) 307 { 308 LOGE("Jit: aborting trace compilation, reverting to interpreter"); 309 /* Force a traceback in debug builds */ 310 assert(0); 311 /* 312 * Abort translation and force to interpret-only for this trace 313 * Matching setjmp in compiler thread work loop in Compiler.c. 314 */ 315 longjmp(*cUnit->bailPtr, 1); 316 } 317