Home | History | Annotate | Download | only in arm
      1 /*
      2  * Copyright (C) 2009 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 /*
     18  * This file contains codegen and support common to all supported
     19  * ARM variants.  It is included by:
     20  *
     21  *        Codegen-$(TARGET_ARCH_VARIANT).c
     22  *
     23  * which combines this common code with specific support found in the
     24  * applicable directory below this one.
     25  */
     26 
     27 #include "compiler/Loop.h"
     28 
     29 /* Array holding the entry offset of each template relative to the first one */
     30 static intptr_t templateEntryOffsets[TEMPLATE_LAST_MARK];
     31 
     32 /* Track exercised opcodes */
     33 static int opcodeCoverage[kNumPackedOpcodes];
     34 
     35 static void setMemRefType(ArmLIR *lir, bool isLoad, int memType)
     36 {
     37     u8 *maskPtr;
     38     u8 mask = ENCODE_MEM;;
     39     assert(EncodingMap[lir->opcode].flags & (IS_LOAD | IS_STORE));
     40     if (isLoad) {
     41         maskPtr = &lir->useMask;
     42     } else {
     43         maskPtr = &lir->defMask;
     44     }
     45     /* Clear out the memref flags */
     46     *maskPtr &= ~mask;
     47     /* ..and then add back the one we need */
     48     switch(memType) {
     49         case kLiteral:
     50             assert(isLoad);
     51             *maskPtr |= ENCODE_LITERAL;
     52             break;
     53         case kDalvikReg:
     54             *maskPtr |= ENCODE_DALVIK_REG;
     55             break;
     56         case kHeapRef:
     57             *maskPtr |= ENCODE_HEAP_REF;
     58             break;
     59         case kMustNotAlias:
     60             /* Currently only loads can be marked as kMustNotAlias */
     61             assert(!(EncodingMap[lir->opcode].flags & IS_STORE));
     62             *maskPtr |= ENCODE_MUST_NOT_ALIAS;
     63             break;
     64         default:
     65             ALOGE("Jit: invalid memref kind - %d", memType);
     66             assert(0);  // Bail if debug build, set worst-case in the field
     67             *maskPtr |= ENCODE_ALL;
     68     }
     69 }
     70 
     71 /*
     72  * Mark load/store instructions that access Dalvik registers through r5FP +
     73  * offset.
     74  */
     75 static void annotateDalvikRegAccess(ArmLIR *lir, int regId, bool isLoad)
     76 {
     77     setMemRefType(lir, isLoad, kDalvikReg);
     78 
     79     /*
     80      * Store the Dalvik register id in aliasInfo. Mark he MSB if it is a 64-bit
     81      * access.
     82      */
     83     lir->aliasInfo = regId;
     84     if (DOUBLEREG(lir->operands[0])) {
     85         lir->aliasInfo |= 0x80000000;
     86     }
     87 }
     88 
     89 /*
     90  * Decode the register id.
     91  */
     92 static inline u8 getRegMaskCommon(int reg)
     93 {
     94     u8 seed;
     95     int shift;
     96     int regId = reg & 0x1f;
     97 
     98     /*
     99      * Each double register is equal to a pair of single-precision FP registers
    100      */
    101     seed = DOUBLEREG(reg) ? 3 : 1;
    102     /* FP register starts at bit position 16 */
    103     shift = FPREG(reg) ? kFPReg0 : 0;
    104     /* Expand the double register id into single offset */
    105     shift += regId;
    106     return (seed << shift);
    107 }
    108 
    109 /* External version of getRegMaskCommon */
    110 u8 dvmGetRegResourceMask(int reg)
    111 {
    112     return getRegMaskCommon(reg);
    113 }
    114 
    115 /*
    116  * Mark the corresponding bit(s).
    117  */
    118 static inline void setupRegMask(u8 *mask, int reg)
    119 {
    120     *mask |= getRegMaskCommon(reg);
    121 }
    122 
    123 /*
    124  * Set up the proper fields in the resource mask
    125  */
    126 static void setupResourceMasks(ArmLIR *lir)
    127 {
    128     int opcode = lir->opcode;
    129     int flags;
    130 
    131     if (opcode <= 0) {
    132         lir->useMask = lir->defMask = 0;
    133         return;
    134     }
    135 
    136     flags = EncodingMap[lir->opcode].flags;
    137 
    138     /* Set up the mask for resources that are updated */
    139     if (flags & (IS_LOAD | IS_STORE)) {
    140         /* Default to heap - will catch specialized classes later */
    141         setMemRefType(lir, flags & IS_LOAD, kHeapRef);
    142     }
    143 
    144     /*
    145      * Conservatively assume the branch here will call out a function that in
    146      * turn will trash everything.
    147      */
    148     if (flags & IS_BRANCH) {
    149         lir->defMask = lir->useMask = ENCODE_ALL;
    150         return;
    151     }
    152 
    153     if (flags & REG_DEF0) {
    154         setupRegMask(&lir->defMask, lir->operands[0]);
    155     }
    156 
    157     if (flags & REG_DEF1) {
    158         setupRegMask(&lir->defMask, lir->operands[1]);
    159     }
    160 
    161     if (flags & REG_DEF_SP) {
    162         lir->defMask |= ENCODE_REG_SP;
    163     }
    164 
    165     if (flags & REG_DEF_LR) {
    166         lir->defMask |= ENCODE_REG_LR;
    167     }
    168 
    169     if (flags & REG_DEF_LIST0) {
    170         lir->defMask |= ENCODE_REG_LIST(lir->operands[0]);
    171     }
    172 
    173     if (flags & REG_DEF_LIST1) {
    174         lir->defMask |= ENCODE_REG_LIST(lir->operands[1]);
    175     }
    176 
    177     if (flags & SETS_CCODES) {
    178         lir->defMask |= ENCODE_CCODE;
    179     }
    180 
    181     /* Conservatively treat the IT block */
    182     if (flags & IS_IT) {
    183         lir->defMask = ENCODE_ALL;
    184     }
    185 
    186     if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) {
    187         int i;
    188 
    189         for (i = 0; i < 4; i++) {
    190             if (flags & (1 << (kRegUse0 + i))) {
    191                 setupRegMask(&lir->useMask, lir->operands[i]);
    192             }
    193         }
    194     }
    195 
    196     if (flags & REG_USE_PC) {
    197         lir->useMask |= ENCODE_REG_PC;
    198     }
    199 
    200     if (flags & REG_USE_SP) {
    201         lir->useMask |= ENCODE_REG_SP;
    202     }
    203 
    204     if (flags & REG_USE_LIST0) {
    205         lir->useMask |= ENCODE_REG_LIST(lir->operands[0]);
    206     }
    207 
    208     if (flags & REG_USE_LIST1) {
    209         lir->useMask |= ENCODE_REG_LIST(lir->operands[1]);
    210     }
    211 
    212     if (flags & USES_CCODES) {
    213         lir->useMask |= ENCODE_CCODE;
    214     }
    215 
    216     /* Fixup for kThumbPush/lr and kThumbPop/pc */
    217     if (opcode == kThumbPush || opcode == kThumbPop) {
    218         u8 r8Mask = getRegMaskCommon(r8);
    219         if ((opcode == kThumbPush) && (lir->useMask & r8Mask)) {
    220             lir->useMask &= ~r8Mask;
    221             lir->useMask |= ENCODE_REG_LR;
    222         } else if ((opcode == kThumbPop) && (lir->defMask & r8Mask)) {
    223             lir->defMask &= ~r8Mask;
    224             lir->defMask |= ENCODE_REG_PC;
    225         }
    226     }
    227 }
    228 
    229 /*
    230  * Set up the accurate resource mask for branch instructions
    231  */
    232 static void relaxBranchMasks(ArmLIR *lir)
    233 {
    234     int flags = EncodingMap[lir->opcode].flags;
    235 
    236     /* Make sure only branch instructions are passed here */
    237     assert(flags & IS_BRANCH);
    238 
    239     lir->useMask = lir->defMask = ENCODE_REG_PC;
    240 
    241     if (flags & REG_DEF_LR) {
    242         lir->defMask |= ENCODE_REG_LR;
    243     }
    244 
    245     if (flags & (REG_USE0 | REG_USE1 | REG_USE2 | REG_USE3)) {
    246         int i;
    247 
    248         for (i = 0; i < 4; i++) {
    249             if (flags & (1 << (kRegUse0 + i))) {
    250                 setupRegMask(&lir->useMask, lir->operands[i]);
    251             }
    252         }
    253     }
    254 
    255     if (flags & USES_CCODES) {
    256         lir->useMask |= ENCODE_CCODE;
    257     }
    258 }
    259 
    260 /*
    261  * The following are building blocks to construct low-level IRs with 0 - 4
    262  * operands.
    263  */
    264 static ArmLIR *newLIR0(CompilationUnit *cUnit, ArmOpcode opcode)
    265 {
    266     ArmLIR *insn = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
    267     assert(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & NO_OPERAND));
    268     insn->opcode = opcode;
    269     setupResourceMasks(insn);
    270     dvmCompilerAppendLIR(cUnit, (LIR *) insn);
    271     return insn;
    272 }
    273 
    274 static ArmLIR *newLIR1(CompilationUnit *cUnit, ArmOpcode opcode,
    275                            int dest)
    276 {
    277     ArmLIR *insn = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
    278     assert(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & IS_UNARY_OP));
    279     insn->opcode = opcode;
    280     insn->operands[0] = dest;
    281     setupResourceMasks(insn);
    282     dvmCompilerAppendLIR(cUnit, (LIR *) insn);
    283     return insn;
    284 }
    285 
    286 static ArmLIR *newLIR2(CompilationUnit *cUnit, ArmOpcode opcode,
    287                            int dest, int src1)
    288 {
    289     ArmLIR *insn = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
    290     assert(isPseudoOpcode(opcode) ||
    291            (EncodingMap[opcode].flags & IS_BINARY_OP));
    292     insn->opcode = opcode;
    293     insn->operands[0] = dest;
    294     insn->operands[1] = src1;
    295     setupResourceMasks(insn);
    296     dvmCompilerAppendLIR(cUnit, (LIR *) insn);
    297     return insn;
    298 }
    299 
    300 static ArmLIR *newLIR3(CompilationUnit *cUnit, ArmOpcode opcode,
    301                            int dest, int src1, int src2)
    302 {
    303     ArmLIR *insn = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
    304     if (!(EncodingMap[opcode].flags & IS_TERTIARY_OP)) {
    305         ALOGE("Bad LIR3: %s[%d]",EncodingMap[opcode].name,opcode);
    306     }
    307     assert(isPseudoOpcode(opcode) ||
    308            (EncodingMap[opcode].flags & IS_TERTIARY_OP));
    309     insn->opcode = opcode;
    310     insn->operands[0] = dest;
    311     insn->operands[1] = src1;
    312     insn->operands[2] = src2;
    313     setupResourceMasks(insn);
    314     dvmCompilerAppendLIR(cUnit, (LIR *) insn);
    315     return insn;
    316 }
    317 
    318 #if defined(_ARMV7_A) || defined(_ARMV7_A_NEON)
    319 static ArmLIR *newLIR4(CompilationUnit *cUnit, ArmOpcode opcode,
    320                            int dest, int src1, int src2, int info)
    321 {
    322     ArmLIR *insn = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
    323     assert(isPseudoOpcode(opcode) ||
    324            (EncodingMap[opcode].flags & IS_QUAD_OP));
    325     insn->opcode = opcode;
    326     insn->operands[0] = dest;
    327     insn->operands[1] = src1;
    328     insn->operands[2] = src2;
    329     insn->operands[3] = info;
    330     setupResourceMasks(insn);
    331     dvmCompilerAppendLIR(cUnit, (LIR *) insn);
    332     return insn;
    333 }
    334 #endif
    335 
    336 /*
    337  * If the next instruction is a move-result or move-result-long,
    338  * return the target Dalvik sReg[s] and convert the next to a
    339  * nop.  Otherwise, return INVALID_SREG.  Used to optimize method inlining.
    340  */
    341 static RegLocation inlinedTarget(CompilationUnit *cUnit, MIR *mir,
    342                                   bool fpHint)
    343 {
    344     if (mir->next &&
    345         ((mir->next->dalvikInsn.opcode == OP_MOVE_RESULT) ||
    346          (mir->next->dalvikInsn.opcode == OP_MOVE_RESULT_OBJECT))) {
    347         mir->next->dalvikInsn.opcode = OP_NOP;
    348         return dvmCompilerGetDest(cUnit, mir->next, 0);
    349     } else {
    350         RegLocation res = LOC_DALVIK_RETURN_VAL;
    351         res.fp = fpHint;
    352         return res;
    353     }
    354 }
    355 
    356 /*
    357  * Search the existing constants in the literal pool for an exact or close match
    358  * within specified delta (greater or equal to 0).
    359  */
    360 static ArmLIR *scanLiteralPool(LIR *dataTarget, int value, unsigned int delta)
    361 {
    362     while (dataTarget) {
    363         if (((unsigned) (value - ((ArmLIR *) dataTarget)->operands[0])) <=
    364             delta)
    365             return (ArmLIR *) dataTarget;
    366         dataTarget = dataTarget->next;
    367     }
    368     return NULL;
    369 }
    370 
    371 /* Search the existing constants in the literal pool for an exact wide match */
    372 ArmLIR* scanLiteralPoolWide(LIR* dataTarget, int valLo, int valHi)
    373 {
    374   bool lowMatch = false;
    375   ArmLIR* lowTarget = NULL;
    376   while (dataTarget) {
    377     if (lowMatch && (((ArmLIR *)dataTarget)->operands[0] == valHi)) {
    378       return lowTarget;
    379     }
    380     lowMatch = false;
    381     if (((ArmLIR *) dataTarget)->operands[0] == valLo) {
    382       lowMatch = true;
    383       lowTarget = (ArmLIR *) dataTarget;
    384     }
    385     dataTarget = dataTarget->next;
    386   }
    387   return NULL;
    388 }
    389 
    390 /*
    391  * The following are building blocks to insert constants into the pool or
    392  * instruction streams.
    393  */
    394 
    395 /* Add a 32-bit constant either in the constant pool or mixed with code */
    396 static ArmLIR *addWordData(CompilationUnit *cUnit, LIR **constantListP,
    397                            int value)
    398 {
    399     /* Add the constant to the literal pool */
    400     if (constantListP) {
    401         ArmLIR *newValue = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
    402         newValue->operands[0] = value;
    403         newValue->generic.next = *constantListP;
    404         *constantListP = (LIR *) newValue;
    405         return newValue;
    406     } else {
    407         /* Add the constant in the middle of code stream */
    408         newLIR1(cUnit, kArm16BitData, (value & 0xffff));
    409         newLIR1(cUnit, kArm16BitData, (value >> 16));
    410     }
    411     return NULL;
    412 }
    413 
    414 /* Add a 64-bit constant to the literal pool or mixed with code */
    415 ArmLIR* addWideData(CompilationUnit* cUnit, LIR** constantListP,
    416                  int valLo, int valHi)
    417 {
    418     addWordData(cUnit, constantListP, valHi);
    419     return addWordData(cUnit, constantListP, valLo);
    420 }
    421 
    422 static RegLocation inlinedTargetWide(CompilationUnit *cUnit, MIR *mir,
    423                                       bool fpHint)
    424 {
    425     if (mir->next &&
    426         (mir->next->dalvikInsn.opcode == OP_MOVE_RESULT_WIDE)) {
    427         mir->next->dalvikInsn.opcode = OP_NOP;
    428         return dvmCompilerGetDestWide(cUnit, mir->next, 0, 1);
    429     } else {
    430         RegLocation res = LOC_DALVIK_RETURN_VAL_WIDE;
    431         res.fp = fpHint;
    432         return res;
    433     }
    434 }
    435 
    436 
    437 /*
    438  * Generate an kArmPseudoBarrier marker to indicate the boundary of special
    439  * blocks.
    440  */
    441 static void genBarrier(CompilationUnit *cUnit)
    442 {
    443     ArmLIR *barrier = newLIR0(cUnit, kArmPseudoBarrier);
    444     /* Mark all resources as being clobbered */
    445     barrier->defMask = -1;
    446 }
    447 
    448 /* Create the PC reconstruction slot if not already done */
    449 static ArmLIR *genCheckCommon(CompilationUnit *cUnit, int dOffset,
    450                               ArmLIR *branch,
    451                               ArmLIR *pcrLabel)
    452 {
    453     /* Forget all def info (because we might rollback here.  Bug #2367397 */
    454     dvmCompilerResetDefTracking(cUnit);
    455 
    456     /* Set up the place holder to reconstruct this Dalvik PC */
    457     if (pcrLabel == NULL) {
    458         int dPC = (int) (cUnit->method->insns + dOffset);
    459         pcrLabel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true);
    460         pcrLabel->opcode = kArmPseudoPCReconstructionCell;
    461         pcrLabel->operands[0] = dPC;
    462         pcrLabel->operands[1] = dOffset;
    463         /* Insert the place holder to the growable list */
    464         dvmInsertGrowableList(&cUnit->pcReconstructionList,
    465                               (intptr_t) pcrLabel);
    466     }
    467     /* Branch to the PC reconstruction code */
    468     branch->generic.target = (LIR *) pcrLabel;
    469 
    470     /* Clear the conservative flags for branches that punt to the interpreter */
    471     relaxBranchMasks(branch);
    472 
    473     return pcrLabel;
    474 }
    475