1 /* 2 * Copyright (C) 2009 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 /* 18 * This file contains codegen and support common to all supported 19 * ARM variants. It is included by: 20 * 21 * Codegen-$(TARGET_ARCH_VARIANT).c 22 * 23 * which combines this common code with specific support found in the 24 * applicable directory below this one. 25 */ 26 27 #include "compiler/Loop.h" 28 29 /* Array holding the entry offset of each template relative to the first one */ 30 static intptr_t templateEntryOffsets[TEMPLATE_LAST_MARK]; 31 32 /* Track exercised opcodes */ 33 static int opcodeCoverage[256]; 34 35 static void setMemRefType(ArmLIR *lir, bool isLoad, int memType) 36 { 37 u8 *maskPtr; 38 u8 mask; 39 assert( EncodingMap[lir->opCode].flags & (IS_LOAD | IS_STORE)); 40 if (isLoad) { 41 maskPtr = &lir->useMask; 42 mask = ENCODE_MEM_USE; 43 } else { 44 maskPtr = &lir->defMask; 45 mask = ENCODE_MEM_DEF; 46 } 47 /* Clear out the memref flags */ 48 *maskPtr &= ~mask; 49 /* ..and then add back the one we need */ 50 switch(memType) { 51 case kLiteral: 52 assert(isLoad); 53 *maskPtr |= (ENCODE_LITERAL | ENCODE_LITPOOL_REF); 54 break; 55 case kDalvikReg: 56 *maskPtr |= (ENCODE_DALVIK_REG | ENCODE_FRAME_REF); 57 break; 58 case kHeapRef: 59 *maskPtr |= ENCODE_HEAP_REF; 60 break; 61 default: 62 LOGE("Jit: invalid memref kind - %d", memType); 63 assert(0); // Bail if debug build, set worst-case in the field 64 *maskPtr |= ENCODE_ALL; 65 } 66 } 67 68 /* 69 * Mark load/store instructions that access Dalvik registers through rFP + 70 * offset. 71 */ 72 static void annotateDalvikRegAccess(ArmLIR *lir, int regId, bool isLoad) 73 { 74 setMemRefType(lir, isLoad, kDalvikReg); 75 76 /* 77 * Store the Dalvik register id in aliasInfo. Mark he MSB if it is a 64-bit 78 * access. 79 */ 80 lir->aliasInfo = regId; 81 if (DOUBLEREG(lir->operands[0])) { 82 lir->aliasInfo |= 0x80000000; 83 } 84 } 85 86 /* 87 * Decode the register id and mark the corresponding bit(s). 88 */ 89 static inline void setupRegMask(u8 *mask, int reg) 90 { 91 u8 seed; 92 int shift; 93 int regId = reg & 0x1f; 94 95 /* 96 * Each double register is equal to a pair of single-precision FP registers 97 */ 98 seed = DOUBLEREG(reg) ? 3 : 1; 99 /* FP register starts at bit position 16 */ 100 shift = FPREG(reg) ? kFPReg0 : 0; 101 /* Expand the double register id into single offset */ 102 shift += regId; 103 *mask |= seed << shift; 104 } 105 106 /* 107 * Set up the proper fields in the resource mask 108 */ 109 static void setupResourceMasks(ArmLIR *lir) 110 { 111 int opCode = lir->opCode; 112 int flags; 113 114 if (opCode <= 0) { 115 lir->useMask = lir->defMask = 0; 116 return; 117 } 118 119 flags = EncodingMap[lir->opCode].flags; 120 121 /* Set up the mask for resources that are updated */ 122 if (flags & (IS_LOAD | IS_STORE)) { 123 /* Default to heap - will catch specialized classes later */ 124 setMemRefType(lir, flags & IS_LOAD, kHeapRef); 125 } 126 127 if (flags & IS_BRANCH) { 128 lir->defMask |= ENCODE_REG_PC; 129 lir->useMask |= ENCODE_REG_PC; 130 } 131 132 if (flags & REG_DEF0) { 133 setupRegMask(&lir->defMask, lir->operands[0]); 134 } 135 136 if (flags & REG_DEF1) { 137 setupRegMask(&lir->defMask, lir->operands[1]); 138 } 139 140 if (flags & REG_DEF_SP) { 141 lir->defMask |= ENCODE_REG_SP; 142 } 143 144 if (flags & REG_DEF_LR) { 145 lir->defMask |= ENCODE_REG_LR; 146 } 147 148 if (flags & REG_DEF_LIST0) { 149 lir->defMask |= ENCODE_REG_LIST(lir->operands[0]); 150 } 151 152 if (flags & REG_DEF_LIST1) { 153 lir->defMask |= ENCODE_REG_LIST(lir->operands[1]); 154 } 155 156 if (flags & SETS_CCODES) { 157 lir->defMask |= ENCODE_CCODE; 158 } 159 160 /* Conservatively treat the IT block */ 161 if (flags & IS_IT) { 162 lir->defMask = ENCODE_ALL; 163 } 164 165 /* Set up the mask for resources that are used */ 166 if (flags & IS_BRANCH) { 167 lir->useMask |= ENCODE_REG_PC; 168 } 169 170 if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) { 171 int i; 172 173 for (i = 0; i < 4; i++) { 174 if (flags & (1 << (kRegUse0 + i))) { 175 setupRegMask(&lir->useMask, lir->operands[i]); 176 } 177 } 178 } 179 180 if (flags & REG_USE_PC) { 181 lir->useMask |= ENCODE_REG_PC; 182 } 183 184 if (flags & REG_USE_SP) { 185 lir->useMask |= ENCODE_REG_SP; 186 } 187 188 if (flags & REG_USE_LIST0) { 189 lir->useMask |= ENCODE_REG_LIST(lir->operands[0]); 190 } 191 192 if (flags & REG_USE_LIST1) { 193 lir->useMask |= ENCODE_REG_LIST(lir->operands[1]); 194 } 195 196 if (flags & USES_CCODES) { 197 lir->useMask |= ENCODE_CCODE; 198 } 199 } 200 201 /* 202 * The following are building blocks to construct low-level IRs with 0 - 4 203 * operands. 204 */ 205 static ArmLIR *newLIR0(CompilationUnit *cUnit, ArmOpCode opCode) 206 { 207 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true); 208 assert(isPseudoOpCode(opCode) || (EncodingMap[opCode].flags & NO_OPERAND)); 209 insn->opCode = opCode; 210 setupResourceMasks(insn); 211 dvmCompilerAppendLIR(cUnit, (LIR *) insn); 212 return insn; 213 } 214 215 static ArmLIR *newLIR1(CompilationUnit *cUnit, ArmOpCode opCode, 216 int dest) 217 { 218 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true); 219 assert(isPseudoOpCode(opCode) || (EncodingMap[opCode].flags & IS_UNARY_OP)); 220 insn->opCode = opCode; 221 insn->operands[0] = dest; 222 setupResourceMasks(insn); 223 dvmCompilerAppendLIR(cUnit, (LIR *) insn); 224 return insn; 225 } 226 227 static ArmLIR *newLIR2(CompilationUnit *cUnit, ArmOpCode opCode, 228 int dest, int src1) 229 { 230 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true); 231 assert(isPseudoOpCode(opCode) || 232 (EncodingMap[opCode].flags & IS_BINARY_OP)); 233 insn->opCode = opCode; 234 insn->operands[0] = dest; 235 insn->operands[1] = src1; 236 setupResourceMasks(insn); 237 dvmCompilerAppendLIR(cUnit, (LIR *) insn); 238 return insn; 239 } 240 241 static ArmLIR *newLIR3(CompilationUnit *cUnit, ArmOpCode opCode, 242 int dest, int src1, int src2) 243 { 244 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true); 245 if (!(EncodingMap[opCode].flags & IS_TERTIARY_OP)) { 246 LOGE("Bad LIR3: %s[%d]",EncodingMap[opCode].name,opCode); 247 } 248 assert(isPseudoOpCode(opCode) || 249 (EncodingMap[opCode].flags & IS_TERTIARY_OP)); 250 insn->opCode = opCode; 251 insn->operands[0] = dest; 252 insn->operands[1] = src1; 253 insn->operands[2] = src2; 254 setupResourceMasks(insn); 255 dvmCompilerAppendLIR(cUnit, (LIR *) insn); 256 return insn; 257 } 258 259 static ArmLIR *newLIR4(CompilationUnit *cUnit, ArmOpCode opCode, 260 int dest, int src1, int src2, int info) 261 { 262 ArmLIR *insn = dvmCompilerNew(sizeof(ArmLIR), true); 263 assert(isPseudoOpCode(opCode) || 264 (EncodingMap[opCode].flags & IS_QUAD_OP)); 265 insn->opCode = opCode; 266 insn->operands[0] = dest; 267 insn->operands[1] = src1; 268 insn->operands[2] = src2; 269 insn->operands[3] = info; 270 setupResourceMasks(insn); 271 dvmCompilerAppendLIR(cUnit, (LIR *) insn); 272 return insn; 273 } 274 275 /* 276 * If the next instruction is a move-result or move-result-long, 277 * return the target Dalvik sReg[s] and convert the next to a 278 * nop. Otherwise, return INVALID_SREG. Used to optimize method inlining. 279 */ 280 static RegLocation inlinedTarget(CompilationUnit *cUnit, MIR *mir, 281 bool fpHint) 282 { 283 if (mir->next && 284 ((mir->next->dalvikInsn.opCode == OP_MOVE_RESULT) || 285 (mir->next->dalvikInsn.opCode == OP_MOVE_RESULT_OBJECT))) { 286 mir->next->dalvikInsn.opCode = OP_NOP; 287 return dvmCompilerGetDest(cUnit, mir->next, 0); 288 } else { 289 RegLocation res = LOC_DALVIK_RETURN_VAL; 290 res.fp = fpHint; 291 return res; 292 } 293 } 294 295 /* 296 * Search the existing constants in the literal pool for an exact or close match 297 * within specified delta (greater or equal to 0). 298 */ 299 static ArmLIR *scanLiteralPool(CompilationUnit *cUnit, int value, 300 unsigned int delta) 301 { 302 LIR *dataTarget = cUnit->wordList; 303 while (dataTarget) { 304 if (((unsigned) (value - ((ArmLIR *) dataTarget)->operands[0])) <= 305 delta) 306 return (ArmLIR *) dataTarget; 307 dataTarget = dataTarget->next; 308 } 309 return NULL; 310 } 311 312 /* 313 * The following are building blocks to insert constants into the pool or 314 * instruction streams. 315 */ 316 317 /* Add a 32-bit constant either in the constant pool or mixed with code */ 318 static ArmLIR *addWordData(CompilationUnit *cUnit, int value, bool inPlace) 319 { 320 /* Add the constant to the literal pool */ 321 if (!inPlace) { 322 ArmLIR *newValue = dvmCompilerNew(sizeof(ArmLIR), true); 323 newValue->operands[0] = value; 324 newValue->generic.next = cUnit->wordList; 325 cUnit->wordList = (LIR *) newValue; 326 return newValue; 327 } else { 328 /* Add the constant in the middle of code stream */ 329 newLIR1(cUnit, kArm16BitData, (value & 0xffff)); 330 newLIR1(cUnit, kArm16BitData, (value >> 16)); 331 } 332 return NULL; 333 } 334 335 static RegLocation inlinedTargetWide(CompilationUnit *cUnit, MIR *mir, 336 bool fpHint) 337 { 338 if (mir->next && 339 (mir->next->dalvikInsn.opCode == OP_MOVE_RESULT_WIDE)) { 340 mir->next->dalvikInsn.opCode = OP_NOP; 341 return dvmCompilerGetDestWide(cUnit, mir->next, 0, 1); 342 } else { 343 RegLocation res = LOC_DALVIK_RETURN_VAL_WIDE; 344 res.fp = fpHint; 345 return res; 346 } 347 } 348 349 350 /* 351 * Generate an kArmPseudoBarrier marker to indicate the boundary of special 352 * blocks. 353 */ 354 static void genBarrier(CompilationUnit *cUnit) 355 { 356 ArmLIR *barrier = newLIR0(cUnit, kArmPseudoBarrier); 357 /* Mark all resources as being clobbered */ 358 barrier->defMask = -1; 359 } 360 361 /* Create the PC reconstruction slot if not already done */ 362 extern ArmLIR *genCheckCommon(CompilationUnit *cUnit, int dOffset, 363 ArmLIR *branch, 364 ArmLIR *pcrLabel) 365 { 366 /* Forget all def info (because we might rollback here. Bug #2367397 */ 367 dvmCompilerResetDefTracking(cUnit); 368 369 /* Set up the place holder to reconstruct this Dalvik PC */ 370 if (pcrLabel == NULL) { 371 int dPC = (int) (cUnit->method->insns + dOffset); 372 pcrLabel = dvmCompilerNew(sizeof(ArmLIR), true); 373 pcrLabel->opCode = kArmPseudoPCReconstructionCell; 374 pcrLabel->operands[0] = dPC; 375 pcrLabel->operands[1] = dOffset; 376 /* Insert the place holder to the growable list */ 377 dvmInsertGrowableList(&cUnit->pcReconstructionList, pcrLabel); 378 } 379 /* Branch to the PC reconstruction code */ 380 branch->generic.target = (LIR *) pcrLabel; 381 return pcrLabel; 382 } 383