1 /* 2 * Copyright (C) 2009 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 /* 18 * This file contains codegen and support common to all supported 19 * Mips variants. It is included by: 20 * 21 * Codegen-$(TARGET_ARCH_VARIANT).c 22 * 23 * which combines this common code with specific support found in the 24 * applicable directory below this one. 25 */ 26 27 #include "compiler/Loop.h" 28 29 /* Array holding the entry offset of each template relative to the first one */ 30 static intptr_t templateEntryOffsets[TEMPLATE_LAST_MARK]; 31 32 /* Track exercised opcodes */ 33 static int opcodeCoverage[256]; 34 35 static void setMemRefType(MipsLIR *lir, bool isLoad, int memType) 36 { 37 /* MIPSTODO simplify setMemRefType() */ 38 u8 *maskPtr; 39 u8 mask = ENCODE_MEM;; 40 assert(EncodingMap[lir->opcode].flags & (IS_LOAD | IS_STORE)); 41 42 if (isLoad) { 43 maskPtr = &lir->useMask; 44 } else { 45 maskPtr = &lir->defMask; 46 } 47 /* Clear out the memref flags */ 48 *maskPtr &= ~mask; 49 /* ..and then add back the one we need */ 50 switch(memType) { 51 case kLiteral: 52 assert(isLoad); 53 *maskPtr |= ENCODE_LITERAL; 54 break; 55 case kDalvikReg: 56 *maskPtr |= ENCODE_DALVIK_REG; 57 break; 58 case kHeapRef: 59 *maskPtr |= ENCODE_HEAP_REF; 60 break; 61 case kMustNotAlias: 62 /* Currently only loads can be marked as kMustNotAlias */ 63 assert(!(EncodingMap[lir->opcode].flags & IS_STORE)); 64 *maskPtr |= ENCODE_MUST_NOT_ALIAS; 65 break; 66 default: 67 ALOGE("Jit: invalid memref kind - %d", memType); 68 assert(0); // Bail if debug build, set worst-case in the field 69 *maskPtr |= ENCODE_ALL; 70 } 71 } 72 73 /* 74 * Mark load/store instructions that access Dalvik registers through rFP + 75 * offset. 76 */ 77 static void annotateDalvikRegAccess(MipsLIR *lir, int regId, bool isLoad) 78 { 79 /* MIPSTODO simplify annotateDalvikRegAccess() */ 80 setMemRefType(lir, isLoad, kDalvikReg); 81 82 /* 83 * Store the Dalvik register id in aliasInfo. Mark he MSB if it is a 64-bit 84 * access. 85 */ 86 lir->aliasInfo = regId; 87 if (DOUBLEREG(lir->operands[0])) { 88 lir->aliasInfo |= 0x80000000; 89 } 90 } 91 92 /* 93 * Decode the register id 94 */ 95 static inline u8 getRegMaskCommon(int reg) 96 { 97 u8 seed; 98 int shift; 99 int regId = reg & 0x1f; 100 101 /* 102 * Each double register is equal to a pair of single-precision FP registers 103 */ 104 if (!DOUBLEREG(reg)) { 105 seed = 1; 106 } else { 107 assert((regId & 1) == 0); /* double registers must be even */ 108 seed = 3; 109 } 110 111 if (FPREG(reg)) { 112 assert(regId < 16); /* only 16 fp regs */ 113 shift = kFPReg0; 114 } else if (EXTRAREG(reg)) { 115 assert(regId < 3); /* only 3 extra regs */ 116 shift = kFPRegEnd; 117 } else { 118 shift = 0; 119 } 120 121 /* Expand the double register id into single offset */ 122 shift += regId; 123 return (seed << shift); 124 } 125 126 /* External version of getRegMaskCommon */ 127 u8 dvmGetRegResourceMask(int reg) 128 { 129 return getRegMaskCommon(reg); 130 } 131 132 /* 133 * Mark the corresponding bit(s). 134 */ 135 static inline void setupRegMask(u8 *mask, int reg) 136 { 137 *mask |= getRegMaskCommon(reg); 138 } 139 140 /* 141 * Set up the proper fields in the resource mask 142 */ 143 static void setupResourceMasks(MipsLIR *lir) 144 { 145 /* MIPSTODO simplify setupResourceMasks() */ 146 int opcode = lir->opcode; 147 int flags; 148 149 if (opcode <= 0) { 150 lir->useMask = lir->defMask = 0; 151 return; 152 } 153 154 flags = EncodingMap[lir->opcode].flags; 155 156 /* Set up the mask for resources that are updated */ 157 if (flags & (IS_LOAD | IS_STORE)) { 158 /* Default to heap - will catch specialized classes later */ 159 setMemRefType(lir, flags & IS_LOAD, kHeapRef); 160 } 161 162 /* 163 * Conservatively assume the branch here will call out a function that in 164 * turn will trash everything. 165 */ 166 if (flags & IS_BRANCH) { 167 lir->defMask = lir->useMask = ENCODE_ALL; 168 return; 169 } 170 171 if (flags & REG_DEF0) { 172 setupRegMask(&lir->defMask, lir->operands[0]); 173 } 174 175 if (flags & REG_DEF1) { 176 setupRegMask(&lir->defMask, lir->operands[1]); 177 } 178 179 if (flags & REG_DEF_SP) { 180 lir->defMask |= ENCODE_REG_SP; 181 } 182 183 if (flags & REG_DEF_LR) { 184 lir->defMask |= ENCODE_REG_LR; 185 } 186 187 if (flags & REG_DEF_LIST0) { 188 lir->defMask |= ENCODE_REG_LIST(lir->operands[0]); 189 } 190 191 if (flags & REG_DEF_LIST1) { 192 lir->defMask |= ENCODE_REG_LIST(lir->operands[1]); 193 } 194 195 if (flags & SETS_CCODES) { 196 lir->defMask |= ENCODE_CCODE; 197 } 198 199 /* Conservatively treat the IT block */ 200 if (flags & IS_IT) { 201 lir->defMask = ENCODE_ALL; 202 } 203 204 if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) { 205 int i; 206 207 for (i = 0; i < 4; i++) { 208 if (flags & (1 << (kRegUse0 + i))) { 209 setupRegMask(&lir->useMask, lir->operands[i]); 210 } 211 } 212 } 213 214 if (flags & REG_USE_PC) { 215 lir->useMask |= ENCODE_REG_PC; 216 } 217 218 if (flags & REG_USE_SP) { 219 lir->useMask |= ENCODE_REG_SP; 220 } 221 222 if (flags & REG_USE_LIST0) { 223 lir->useMask |= ENCODE_REG_LIST(lir->operands[0]); 224 } 225 226 if (flags & REG_USE_LIST1) { 227 lir->useMask |= ENCODE_REG_LIST(lir->operands[1]); 228 } 229 230 if (flags & USES_CCODES) { 231 lir->useMask |= ENCODE_CCODE; 232 } 233 } 234 235 /* 236 * Set up the accurate resource mask for branch instructions 237 */ 238 static void relaxBranchMasks(MipsLIR *lir) 239 { 240 int flags = EncodingMap[lir->opcode].flags; 241 242 /* Make sure only branch instructions are passed here */ 243 assert(flags & IS_BRANCH); 244 245 lir->defMask |= ENCODE_REG_PC; 246 lir->useMask |= ENCODE_REG_PC; 247 248 249 if (flags & REG_DEF_LR) { 250 lir->defMask |= ENCODE_REG_LR; 251 } 252 253 if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) { 254 int i; 255 256 for (i = 0; i < 4; i++) { 257 if (flags & (1 << (kRegUse0 + i))) { 258 setupRegMask(&lir->useMask, lir->operands[i]); 259 } 260 } 261 } 262 263 if (flags & USES_CCODES) { 264 lir->useMask |= ENCODE_CCODE; 265 } 266 } 267 268 /* 269 * The following are building blocks to construct low-level IRs with 0 - 4 270 * operands. 271 */ 272 static MipsLIR *newLIR0(CompilationUnit *cUnit, MipsOpCode opcode) 273 { 274 MipsLIR *insn = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true); 275 assert(isPseudoOpCode(opcode) || (EncodingMap[opcode].flags & NO_OPERAND)); 276 insn->opcode = opcode; 277 setupResourceMasks(insn); 278 dvmCompilerAppendLIR(cUnit, (LIR *) insn); 279 return insn; 280 } 281 282 static MipsLIR *newLIR1(CompilationUnit *cUnit, MipsOpCode opcode, 283 int dest) 284 { 285 MipsLIR *insn = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true); 286 assert(isPseudoOpCode(opcode) || (EncodingMap[opcode].flags & IS_UNARY_OP)); 287 insn->opcode = opcode; 288 insn->operands[0] = dest; 289 setupResourceMasks(insn); 290 dvmCompilerAppendLIR(cUnit, (LIR *) insn); 291 return insn; 292 } 293 294 static MipsLIR *newLIR2(CompilationUnit *cUnit, MipsOpCode opcode, 295 int dest, int src1) 296 { 297 MipsLIR *insn = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true); 298 assert(isPseudoOpCode(opcode) || 299 (EncodingMap[opcode].flags & IS_BINARY_OP)); 300 insn->opcode = opcode; 301 insn->operands[0] = dest; 302 insn->operands[1] = src1; 303 setupResourceMasks(insn); 304 dvmCompilerAppendLIR(cUnit, (LIR *) insn); 305 return insn; 306 } 307 308 static MipsLIR *newLIR3(CompilationUnit *cUnit, MipsOpCode opcode, 309 int dest, int src1, int src2) 310 { 311 MipsLIR *insn = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true); 312 if (!(EncodingMap[opcode].flags & IS_TERTIARY_OP)) { 313 ALOGE("Bad LIR3: %s[%d]",EncodingMap[opcode].name,opcode); 314 } 315 assert(isPseudoOpCode(opcode) || 316 (EncodingMap[opcode].flags & IS_TERTIARY_OP)); 317 insn->opcode = opcode; 318 insn->operands[0] = dest; 319 insn->operands[1] = src1; 320 insn->operands[2] = src2; 321 setupResourceMasks(insn); 322 dvmCompilerAppendLIR(cUnit, (LIR *) insn); 323 return insn; 324 } 325 326 static MipsLIR *newLIR4(CompilationUnit *cUnit, MipsOpCode opcode, 327 int dest, int src1, int src2, int info) 328 { 329 MipsLIR *insn = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true); 330 assert(isPseudoOpCode(opcode) || 331 (EncodingMap[opcode].flags & IS_QUAD_OP)); 332 insn->opcode = opcode; 333 insn->operands[0] = dest; 334 insn->operands[1] = src1; 335 insn->operands[2] = src2; 336 insn->operands[3] = info; 337 setupResourceMasks(insn); 338 dvmCompilerAppendLIR(cUnit, (LIR *) insn); 339 return insn; 340 } 341 342 /* 343 * If the next instruction is a move-result or move-result-long, 344 * return the target Dalvik sReg[s] and convert the next to a 345 * nop. Otherwise, return INVALID_SREG. Used to optimize method inlining. 346 */ 347 static RegLocation inlinedTarget(CompilationUnit *cUnit, MIR *mir, 348 bool fpHint) 349 { 350 if (mir->next && 351 ((mir->next->dalvikInsn.opcode == OP_MOVE_RESULT) || 352 (mir->next->dalvikInsn.opcode == OP_MOVE_RESULT_OBJECT))) { 353 mir->next->dalvikInsn.opcode = OP_NOP; 354 return dvmCompilerGetDest(cUnit, mir->next, 0); 355 } else { 356 RegLocation res = LOC_DALVIK_RETURN_VAL; 357 res.fp = fpHint; 358 return res; 359 } 360 } 361 362 /* 363 * The following are building blocks to insert constants into the pool or 364 * instruction streams. 365 */ 366 367 /* Add a 32-bit constant either in the constant pool or mixed with code */ 368 static MipsLIR *addWordData(CompilationUnit *cUnit, LIR **constantListP, 369 int value) 370 { 371 /* Add the constant to the literal pool */ 372 if (constantListP) { 373 MipsLIR *newValue = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true); 374 newValue->operands[0] = value; 375 newValue->generic.next = *constantListP; 376 *constantListP = (LIR *) newValue; 377 return newValue; 378 } else { 379 /* Add the constant in the middle of code stream */ 380 newLIR1(cUnit, kMips32BitData, value); 381 } 382 return NULL; 383 } 384 385 static RegLocation inlinedTargetWide(CompilationUnit *cUnit, MIR *mir, 386 bool fpHint) 387 { 388 if (mir->next && 389 (mir->next->dalvikInsn.opcode == OP_MOVE_RESULT_WIDE)) { 390 mir->next->dalvikInsn.opcode = OP_NOP; 391 return dvmCompilerGetDestWide(cUnit, mir->next, 0, 1); 392 } else { 393 RegLocation res = LOC_DALVIK_RETURN_VAL_WIDE; 394 res.fp = fpHint; 395 return res; 396 } 397 } 398 399 400 /* 401 * Generate an kMipsPseudoBarrier marker to indicate the boundary of special 402 * blocks. 403 */ 404 static void genBarrier(CompilationUnit *cUnit) 405 { 406 MipsLIR *barrier = newLIR0(cUnit, kMipsPseudoBarrier); 407 /* Mark all resources as being clobbered */ 408 barrier->defMask = -1; 409 } 410 411 /* Create the PC reconstruction slot if not already done */ 412 extern MipsLIR *genCheckCommon(CompilationUnit *cUnit, int dOffset, 413 MipsLIR *branch, 414 MipsLIR *pcrLabel) 415 { 416 /* Forget all def info (because we might rollback here. Bug #2367397 */ 417 dvmCompilerResetDefTracking(cUnit); 418 419 /* Set up the place holder to reconstruct this Dalvik PC */ 420 if (pcrLabel == NULL) { 421 int dPC = (int) (cUnit->method->insns + dOffset); 422 pcrLabel = (MipsLIR *) dvmCompilerNew(sizeof(MipsLIR), true); 423 pcrLabel->opcode = kMipsPseudoPCReconstructionCell; 424 pcrLabel->operands[0] = dPC; 425 pcrLabel->operands[1] = dOffset; 426 /* Insert the place holder to the growable list */ 427 dvmInsertGrowableList(&cUnit->pcReconstructionList, 428 (intptr_t) pcrLabel); 429 } 430 /* Branch to the PC reconstruction code */ 431 branch->generic.target = (LIR *) pcrLabel; 432 433 /* Clear the conservative flags for branches that punt to the interpreter */ 434 relaxBranchMasks(branch); 435 436 return pcrLabel; 437 } 438