Home | History | Annotate | Download | only in arm
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "arm_lir.h"
     18 #include "codegen_arm.h"
     19 #include "dex/quick/mir_to_lir-inl.h"
     20 
     21 namespace art {
     22 
     23 /* This file contains codegen for the Thumb ISA. */
     24 
     25 static int EncodeImmSingle(int value) {
     26   int res;
     27   int bit_a =  (value & 0x80000000) >> 31;
     28   int not_bit_b = (value & 0x40000000) >> 30;
     29   int bit_b =  (value & 0x20000000) >> 29;
     30   int b_smear =  (value & 0x3e000000) >> 25;
     31   int slice =   (value & 0x01f80000) >> 19;
     32   int zeroes =  (value & 0x0007ffff);
     33   if (zeroes != 0)
     34     return -1;
     35   if (bit_b) {
     36     if ((not_bit_b != 0) || (b_smear != 0x1f))
     37       return -1;
     38   } else {
     39     if ((not_bit_b != 1) || (b_smear != 0x0))
     40       return -1;
     41   }
     42   res = (bit_a << 7) | (bit_b << 6) | slice;
     43   return res;
     44 }
     45 
     46 /*
     47  * Determine whether value can be encoded as a Thumb2 floating point
     48  * immediate.  If not, return -1.  If so return encoded 8-bit value.
     49  */
     50 static int EncodeImmDouble(int64_t value) {
     51   int res;
     52   int bit_a = (value & 0x8000000000000000ll) >> 63;
     53   int not_bit_b = (value & 0x4000000000000000ll) >> 62;
     54   int bit_b = (value & 0x2000000000000000ll) >> 61;
     55   int b_smear = (value & 0x3fc0000000000000ll) >> 54;
     56   int slice =  (value & 0x003f000000000000ll) >> 48;
     57   uint64_t zeroes = (value & 0x0000ffffffffffffll);
     58   if (zeroes != 0)
     59     return -1;
     60   if (bit_b) {
     61     if ((not_bit_b != 0) || (b_smear != 0xff))
     62       return -1;
     63   } else {
     64     if ((not_bit_b != 1) || (b_smear != 0x0))
     65       return -1;
     66   }
     67   res = (bit_a << 7) | (bit_b << 6) | slice;
     68   return res;
     69 }
     70 
     71 LIR* ArmMir2Lir::LoadFPConstantValue(int r_dest, int value) {
     72   DCHECK(ARM_SINGLEREG(r_dest));
     73   if (value == 0) {
     74     // TODO: we need better info about the target CPU.  a vector exclusive or
     75     //       would probably be better here if we could rely on its existance.
     76     // Load an immediate +2.0 (which encodes to 0)
     77     NewLIR2(kThumb2Vmovs_IMM8, r_dest, 0);
     78     // +0.0 = +2.0 - +2.0
     79     return NewLIR3(kThumb2Vsubs, r_dest, r_dest, r_dest);
     80   } else {
     81     int encoded_imm = EncodeImmSingle(value);
     82     if (encoded_imm >= 0) {
     83       return NewLIR2(kThumb2Vmovs_IMM8, r_dest, encoded_imm);
     84     }
     85   }
     86   LIR* data_target = ScanLiteralPool(literal_list_, value, 0);
     87   if (data_target == NULL) {
     88     data_target = AddWordData(&literal_list_, value);
     89   }
     90   LIR* load_pc_rel = RawLIR(current_dalvik_offset_, kThumb2Vldrs,
     91                           r_dest, r15pc, 0, 0, 0, data_target);
     92   SetMemRefType(load_pc_rel, true, kLiteral);
     93   load_pc_rel->alias_info = reinterpret_cast<uintptr_t>(data_target);
     94   AppendLIR(load_pc_rel);
     95   return load_pc_rel;
     96 }
     97 
     98 static int LeadingZeros(uint32_t val) {
     99   uint32_t alt;
    100   int n;
    101   int count;
    102 
    103   count = 16;
    104   n = 32;
    105   do {
    106     alt = val >> count;
    107     if (alt != 0) {
    108       n = n - count;
    109       val = alt;
    110     }
    111     count >>= 1;
    112   } while (count);
    113   return n - val;
    114 }
    115 
    116 /*
    117  * Determine whether value can be encoded as a Thumb2 modified
    118  * immediate.  If not, return -1.  If so, return i:imm3:a:bcdefgh form.
    119  */
    120 int ArmMir2Lir::ModifiedImmediate(uint32_t value) {
    121   int z_leading;
    122   int z_trailing;
    123   uint32_t b0 = value & 0xff;
    124 
    125   /* Note: case of value==0 must use 0:000:0:0000000 encoding */
    126   if (value <= 0xFF)
    127     return b0;  // 0:000:a:bcdefgh
    128   if (value == ((b0 << 16) | b0))
    129     return (0x1 << 8) | b0; /* 0:001:a:bcdefgh */
    130   if (value == ((b0 << 24) | (b0 << 16) | (b0 << 8) | b0))
    131     return (0x3 << 8) | b0; /* 0:011:a:bcdefgh */
    132   b0 = (value >> 8) & 0xff;
    133   if (value == ((b0 << 24) | (b0 << 8)))
    134     return (0x2 << 8) | b0; /* 0:010:a:bcdefgh */
    135   /* Can we do it with rotation? */
    136   z_leading = LeadingZeros(value);
    137   z_trailing = 32 - LeadingZeros(~value & (value - 1));
    138   /* A run of eight or fewer active bits? */
    139   if ((z_leading + z_trailing) < 24)
    140     return -1;  /* No - bail */
    141   /* left-justify the constant, discarding msb (known to be 1) */
    142   value <<= z_leading + 1;
    143   /* Create bcdefgh */
    144   value >>= 25;
    145   /* Put it all together */
    146   return value | ((0x8 + z_leading) << 7); /* [01000..11111]:bcdefgh */
    147 }
    148 
    149 bool ArmMir2Lir::InexpensiveConstantInt(int32_t value) {
    150   return (ModifiedImmediate(value) >= 0) || (ModifiedImmediate(~value) >= 0);
    151 }
    152 
    153 bool ArmMir2Lir::InexpensiveConstantFloat(int32_t value) {
    154   return EncodeImmSingle(value) >= 0;
    155 }
    156 
    157 bool ArmMir2Lir::InexpensiveConstantLong(int64_t value) {
    158   return InexpensiveConstantInt(High32Bits(value)) && InexpensiveConstantInt(Low32Bits(value));
    159 }
    160 
    161 bool ArmMir2Lir::InexpensiveConstantDouble(int64_t value) {
    162   return EncodeImmDouble(value) >= 0;
    163 }
    164 
    165 /*
    166  * Load a immediate using a shortcut if possible; otherwise
    167  * grab from the per-translation literal pool.
    168  *
    169  * No additional register clobbering operation performed. Use this version when
    170  * 1) r_dest is freshly returned from AllocTemp or
    171  * 2) The codegen is under fixed register usage
    172  */
    173 LIR* ArmMir2Lir::LoadConstantNoClobber(int r_dest, int value) {
    174   LIR* res;
    175   int mod_imm;
    176 
    177   if (ARM_FPREG(r_dest)) {
    178     return LoadFPConstantValue(r_dest, value);
    179   }
    180 
    181   /* See if the value can be constructed cheaply */
    182   if (ARM_LOWREG(r_dest) && (value >= 0) && (value <= 255)) {
    183     return NewLIR2(kThumbMovImm, r_dest, value);
    184   }
    185   /* Check Modified immediate special cases */
    186   mod_imm = ModifiedImmediate(value);
    187   if (mod_imm >= 0) {
    188     res = NewLIR2(kThumb2MovImmShift, r_dest, mod_imm);
    189     return res;
    190   }
    191   mod_imm = ModifiedImmediate(~value);
    192   if (mod_imm >= 0) {
    193     res = NewLIR2(kThumb2MvnImm12, r_dest, mod_imm);
    194     return res;
    195   }
    196   /* 16-bit immediate? */
    197   if ((value & 0xffff) == value) {
    198     res = NewLIR2(kThumb2MovImm16, r_dest, value);
    199     return res;
    200   }
    201   /* Do a low/high pair */
    202   res = NewLIR2(kThumb2MovImm16, r_dest, Low16Bits(value));
    203   NewLIR2(kThumb2MovImm16H, r_dest, High16Bits(value));
    204   return res;
    205 }
    206 
    207 LIR* ArmMir2Lir::OpUnconditionalBranch(LIR* target) {
    208   LIR* res = NewLIR1(kThumbBUncond, 0 /* offset to be patched  during assembly*/);
    209   res->target = target;
    210   return res;
    211 }
    212 
    213 LIR* ArmMir2Lir::OpCondBranch(ConditionCode cc, LIR* target) {
    214   LIR* branch = NewLIR2(kThumb2BCond, 0 /* offset to be patched */,
    215                         ArmConditionEncoding(cc));
    216   branch->target = target;
    217   return branch;
    218 }
    219 
    220 LIR* ArmMir2Lir::OpReg(OpKind op, int r_dest_src) {
    221   ArmOpcode opcode = kThumbBkpt;
    222   switch (op) {
    223     case kOpBlx:
    224       opcode = kThumbBlxR;
    225       break;
    226     default:
    227       LOG(FATAL) << "Bad opcode " << op;
    228   }
    229   return NewLIR1(opcode, r_dest_src);
    230 }
    231 
    232 LIR* ArmMir2Lir::OpRegRegShift(OpKind op, int r_dest_src1, int r_src2,
    233                                int shift) {
    234   bool thumb_form = ((shift == 0) && ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2));
    235   ArmOpcode opcode = kThumbBkpt;
    236   switch (op) {
    237     case kOpAdc:
    238       opcode = (thumb_form) ? kThumbAdcRR : kThumb2AdcRRR;
    239       break;
    240     case kOpAnd:
    241       opcode = (thumb_form) ? kThumbAndRR : kThumb2AndRRR;
    242       break;
    243     case kOpBic:
    244       opcode = (thumb_form) ? kThumbBicRR : kThumb2BicRRR;
    245       break;
    246     case kOpCmn:
    247       DCHECK_EQ(shift, 0);
    248       opcode = (thumb_form) ? kThumbCmnRR : kThumb2CmnRR;
    249       break;
    250     case kOpCmp:
    251       if (thumb_form)
    252         opcode = kThumbCmpRR;
    253       else if ((shift == 0) && !ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2))
    254         opcode = kThumbCmpHH;
    255       else if ((shift == 0) && ARM_LOWREG(r_dest_src1))
    256         opcode = kThumbCmpLH;
    257       else if (shift == 0)
    258         opcode = kThumbCmpHL;
    259       else
    260         opcode = kThumb2CmpRR;
    261       break;
    262     case kOpXor:
    263       opcode = (thumb_form) ? kThumbEorRR : kThumb2EorRRR;
    264       break;
    265     case kOpMov:
    266       DCHECK_EQ(shift, 0);
    267       if (ARM_LOWREG(r_dest_src1) && ARM_LOWREG(r_src2))
    268         opcode = kThumbMovRR;
    269       else if (!ARM_LOWREG(r_dest_src1) && !ARM_LOWREG(r_src2))
    270         opcode = kThumbMovRR_H2H;
    271       else if (ARM_LOWREG(r_dest_src1))
    272         opcode = kThumbMovRR_H2L;
    273       else
    274         opcode = kThumbMovRR_L2H;
    275       break;
    276     case kOpMul:
    277       DCHECK_EQ(shift, 0);
    278       opcode = (thumb_form) ? kThumbMul : kThumb2MulRRR;
    279       break;
    280     case kOpMvn:
    281       opcode = (thumb_form) ? kThumbMvn : kThumb2MnvRR;
    282       break;
    283     case kOpNeg:
    284       DCHECK_EQ(shift, 0);
    285       opcode = (thumb_form) ? kThumbNeg : kThumb2NegRR;
    286       break;
    287     case kOpOr:
    288       opcode = (thumb_form) ? kThumbOrr : kThumb2OrrRRR;
    289       break;
    290     case kOpSbc:
    291       opcode = (thumb_form) ? kThumbSbc : kThumb2SbcRRR;
    292       break;
    293     case kOpTst:
    294       opcode = (thumb_form) ? kThumbTst : kThumb2TstRR;
    295       break;
    296     case kOpLsl:
    297       DCHECK_EQ(shift, 0);
    298       opcode = (thumb_form) ? kThumbLslRR : kThumb2LslRRR;
    299       break;
    300     case kOpLsr:
    301       DCHECK_EQ(shift, 0);
    302       opcode = (thumb_form) ? kThumbLsrRR : kThumb2LsrRRR;
    303       break;
    304     case kOpAsr:
    305       DCHECK_EQ(shift, 0);
    306       opcode = (thumb_form) ? kThumbAsrRR : kThumb2AsrRRR;
    307       break;
    308     case kOpRor:
    309       DCHECK_EQ(shift, 0);
    310       opcode = (thumb_form) ? kThumbRorRR : kThumb2RorRRR;
    311       break;
    312     case kOpAdd:
    313       opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
    314       break;
    315     case kOpSub:
    316       opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
    317       break;
    318     case kOp2Byte:
    319       DCHECK_EQ(shift, 0);
    320       return NewLIR4(kThumb2Sbfx, r_dest_src1, r_src2, 0, 8);
    321     case kOp2Short:
    322       DCHECK_EQ(shift, 0);
    323       return NewLIR4(kThumb2Sbfx, r_dest_src1, r_src2, 0, 16);
    324     case kOp2Char:
    325       DCHECK_EQ(shift, 0);
    326       return NewLIR4(kThumb2Ubfx, r_dest_src1, r_src2, 0, 16);
    327     default:
    328       LOG(FATAL) << "Bad opcode: " << op;
    329       break;
    330   }
    331   DCHECK_GE(static_cast<int>(opcode), 0);
    332   if (EncodingMap[opcode].flags & IS_BINARY_OP) {
    333     return NewLIR2(opcode, r_dest_src1, r_src2);
    334   } else if (EncodingMap[opcode].flags & IS_TERTIARY_OP) {
    335     if (EncodingMap[opcode].field_loc[2].kind == kFmtShift) {
    336       return NewLIR3(opcode, r_dest_src1, r_src2, shift);
    337     } else {
    338       return NewLIR3(opcode, r_dest_src1, r_dest_src1, r_src2);
    339     }
    340   } else if (EncodingMap[opcode].flags & IS_QUAD_OP) {
    341     return NewLIR4(opcode, r_dest_src1, r_dest_src1, r_src2, shift);
    342   } else {
    343     LOG(FATAL) << "Unexpected encoding operand count";
    344     return NULL;
    345   }
    346 }
    347 
    348 LIR* ArmMir2Lir::OpRegReg(OpKind op, int r_dest_src1, int r_src2) {
    349   return OpRegRegShift(op, r_dest_src1, r_src2, 0);
    350 }
    351 
    352 LIR* ArmMir2Lir::OpRegRegRegShift(OpKind op, int r_dest, int r_src1,
    353                                   int r_src2, int shift) {
    354   ArmOpcode opcode = kThumbBkpt;
    355   bool thumb_form = (shift == 0) && ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1) &&
    356       ARM_LOWREG(r_src2);
    357   switch (op) {
    358     case kOpAdd:
    359       opcode = (thumb_form) ? kThumbAddRRR : kThumb2AddRRR;
    360       break;
    361     case kOpSub:
    362       opcode = (thumb_form) ? kThumbSubRRR : kThumb2SubRRR;
    363       break;
    364     case kOpRsub:
    365       opcode = kThumb2RsubRRR;
    366       break;
    367     case kOpAdc:
    368       opcode = kThumb2AdcRRR;
    369       break;
    370     case kOpAnd:
    371       opcode = kThumb2AndRRR;
    372       break;
    373     case kOpBic:
    374       opcode = kThumb2BicRRR;
    375       break;
    376     case kOpXor:
    377       opcode = kThumb2EorRRR;
    378       break;
    379     case kOpMul:
    380       DCHECK_EQ(shift, 0);
    381       opcode = kThumb2MulRRR;
    382       break;
    383     case kOpOr:
    384       opcode = kThumb2OrrRRR;
    385       break;
    386     case kOpSbc:
    387       opcode = kThumb2SbcRRR;
    388       break;
    389     case kOpLsl:
    390       DCHECK_EQ(shift, 0);
    391       opcode = kThumb2LslRRR;
    392       break;
    393     case kOpLsr:
    394       DCHECK_EQ(shift, 0);
    395       opcode = kThumb2LsrRRR;
    396       break;
    397     case kOpAsr:
    398       DCHECK_EQ(shift, 0);
    399       opcode = kThumb2AsrRRR;
    400       break;
    401     case kOpRor:
    402       DCHECK_EQ(shift, 0);
    403       opcode = kThumb2RorRRR;
    404       break;
    405     default:
    406       LOG(FATAL) << "Bad opcode: " << op;
    407       break;
    408   }
    409   DCHECK_GE(static_cast<int>(opcode), 0);
    410   if (EncodingMap[opcode].flags & IS_QUAD_OP) {
    411     return NewLIR4(opcode, r_dest, r_src1, r_src2, shift);
    412   } else {
    413     DCHECK(EncodingMap[opcode].flags & IS_TERTIARY_OP);
    414     return NewLIR3(opcode, r_dest, r_src1, r_src2);
    415   }
    416 }
    417 
    418 LIR* ArmMir2Lir::OpRegRegReg(OpKind op, int r_dest, int r_src1, int r_src2) {
    419   return OpRegRegRegShift(op, r_dest, r_src1, r_src2, 0);
    420 }
    421 
    422 LIR* ArmMir2Lir::OpRegRegImm(OpKind op, int r_dest, int r_src1, int value) {
    423   LIR* res;
    424   bool neg = (value < 0);
    425   int abs_value = (neg) ? -value : value;
    426   ArmOpcode opcode = kThumbBkpt;
    427   ArmOpcode alt_opcode = kThumbBkpt;
    428   bool all_low_regs = (ARM_LOWREG(r_dest) && ARM_LOWREG(r_src1));
    429   int mod_imm = ModifiedImmediate(value);
    430   int mod_imm_neg = ModifiedImmediate(-value);
    431 
    432   switch (op) {
    433     case kOpLsl:
    434       if (all_low_regs)
    435         return NewLIR3(kThumbLslRRI5, r_dest, r_src1, value);
    436       else
    437         return NewLIR3(kThumb2LslRRI5, r_dest, r_src1, value);
    438     case kOpLsr:
    439       if (all_low_regs)
    440         return NewLIR3(kThumbLsrRRI5, r_dest, r_src1, value);
    441       else
    442         return NewLIR3(kThumb2LsrRRI5, r_dest, r_src1, value);
    443     case kOpAsr:
    444       if (all_low_regs)
    445         return NewLIR3(kThumbAsrRRI5, r_dest, r_src1, value);
    446       else
    447         return NewLIR3(kThumb2AsrRRI5, r_dest, r_src1, value);
    448     case kOpRor:
    449       return NewLIR3(kThumb2RorRRI5, r_dest, r_src1, value);
    450     case kOpAdd:
    451       if (ARM_LOWREG(r_dest) && (r_src1 == r13sp) &&
    452         (value <= 1020) && ((value & 0x3) == 0)) {
    453         return NewLIR3(kThumbAddSpRel, r_dest, r_src1, value >> 2);
    454       } else if (ARM_LOWREG(r_dest) && (r_src1 == r15pc) &&
    455           (value <= 1020) && ((value & 0x3) == 0)) {
    456         return NewLIR3(kThumbAddPcRel, r_dest, r_src1, value >> 2);
    457       }
    458       // Note: intentional fallthrough
    459     case kOpSub:
    460       if (all_low_regs && ((abs_value & 0x7) == abs_value)) {
    461         if (op == kOpAdd)
    462           opcode = (neg) ? kThumbSubRRI3 : kThumbAddRRI3;
    463         else
    464           opcode = (neg) ? kThumbAddRRI3 : kThumbSubRRI3;
    465         return NewLIR3(opcode, r_dest, r_src1, abs_value);
    466       } else if ((abs_value & 0xff) == abs_value) {
    467         if (op == kOpAdd)
    468           opcode = (neg) ? kThumb2SubRRI12 : kThumb2AddRRI12;
    469         else
    470           opcode = (neg) ? kThumb2AddRRI12 : kThumb2SubRRI12;
    471         return NewLIR3(opcode, r_dest, r_src1, abs_value);
    472       }
    473       if (mod_imm_neg >= 0) {
    474         op = (op == kOpAdd) ? kOpSub : kOpAdd;
    475         mod_imm = mod_imm_neg;
    476       }
    477       if (op == kOpSub) {
    478         opcode = kThumb2SubRRI8;
    479         alt_opcode = kThumb2SubRRR;
    480       } else {
    481         opcode = kThumb2AddRRI8;
    482         alt_opcode = kThumb2AddRRR;
    483       }
    484       break;
    485     case kOpRsub:
    486       opcode = kThumb2RsubRRI8;
    487       alt_opcode = kThumb2RsubRRR;
    488       break;
    489     case kOpAdc:
    490       opcode = kThumb2AdcRRI8;
    491       alt_opcode = kThumb2AdcRRR;
    492       break;
    493     case kOpSbc:
    494       opcode = kThumb2SbcRRI8;
    495       alt_opcode = kThumb2SbcRRR;
    496       break;
    497     case kOpOr:
    498       opcode = kThumb2OrrRRI8;
    499       alt_opcode = kThumb2OrrRRR;
    500       break;
    501     case kOpAnd:
    502       opcode = kThumb2AndRRI8;
    503       alt_opcode = kThumb2AndRRR;
    504       break;
    505     case kOpXor:
    506       opcode = kThumb2EorRRI8;
    507       alt_opcode = kThumb2EorRRR;
    508       break;
    509     case kOpMul:
    510       // TUNING: power of 2, shift & add
    511       mod_imm = -1;
    512       alt_opcode = kThumb2MulRRR;
    513       break;
    514     case kOpCmp: {
    515       int mod_imm = ModifiedImmediate(value);
    516       LIR* res;
    517       if (mod_imm >= 0) {
    518         res = NewLIR2(kThumb2CmpRI12, r_src1, mod_imm);
    519       } else {
    520         int r_tmp = AllocTemp();
    521         res = LoadConstant(r_tmp, value);
    522         OpRegReg(kOpCmp, r_src1, r_tmp);
    523         FreeTemp(r_tmp);
    524       }
    525       return res;
    526     }
    527     default:
    528       LOG(FATAL) << "Bad opcode: " << op;
    529   }
    530 
    531   if (mod_imm >= 0) {
    532     return NewLIR3(opcode, r_dest, r_src1, mod_imm);
    533   } else {
    534     int r_scratch = AllocTemp();
    535     LoadConstant(r_scratch, value);
    536     if (EncodingMap[alt_opcode].flags & IS_QUAD_OP)
    537       res = NewLIR4(alt_opcode, r_dest, r_src1, r_scratch, 0);
    538     else
    539       res = NewLIR3(alt_opcode, r_dest, r_src1, r_scratch);
    540     FreeTemp(r_scratch);
    541     return res;
    542   }
    543 }
    544 
    545 /* Handle Thumb-only variants here - otherwise punt to OpRegRegImm */
    546 LIR* ArmMir2Lir::OpRegImm(OpKind op, int r_dest_src1, int value) {
    547   bool neg = (value < 0);
    548   int abs_value = (neg) ? -value : value;
    549   bool short_form = (((abs_value & 0xff) == abs_value) && ARM_LOWREG(r_dest_src1));
    550   ArmOpcode opcode = kThumbBkpt;
    551   switch (op) {
    552     case kOpAdd:
    553       if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
    554         DCHECK_EQ((value & 0x3), 0);
    555         return NewLIR1(kThumbAddSpI7, value >> 2);
    556       } else if (short_form) {
    557         opcode = (neg) ? kThumbSubRI8 : kThumbAddRI8;
    558       }
    559       break;
    560     case kOpSub:
    561       if (!neg && (r_dest_src1 == r13sp) && (value <= 508)) { /* sp */
    562         DCHECK_EQ((value & 0x3), 0);
    563         return NewLIR1(kThumbSubSpI7, value >> 2);
    564       } else if (short_form) {
    565         opcode = (neg) ? kThumbAddRI8 : kThumbSubRI8;
    566       }
    567       break;
    568     case kOpCmp:
    569       if (ARM_LOWREG(r_dest_src1) && short_form) {
    570         opcode = (short_form) ?  kThumbCmpRI8 : kThumbCmpRR;
    571       } else if (ARM_LOWREG(r_dest_src1)) {
    572         opcode = kThumbCmpRR;
    573       } else {
    574         short_form = false;
    575         opcode = kThumbCmpHL;
    576       }
    577       break;
    578     default:
    579       /* Punt to OpRegRegImm - if bad case catch it there */
    580       short_form = false;
    581       break;
    582   }
    583   if (short_form) {
    584     return NewLIR2(opcode, r_dest_src1, abs_value);
    585   } else {
    586     return OpRegRegImm(op, r_dest_src1, r_dest_src1, value);
    587   }
    588 }
    589 
    590 LIR* ArmMir2Lir::LoadConstantWide(int r_dest_lo, int r_dest_hi, int64_t value) {
    591   LIR* res = NULL;
    592   int32_t val_lo = Low32Bits(value);
    593   int32_t val_hi = High32Bits(value);
    594   int target_reg = S2d(r_dest_lo, r_dest_hi);
    595   if (ARM_FPREG(r_dest_lo)) {
    596     if ((val_lo == 0) && (val_hi == 0)) {
    597       // TODO: we need better info about the target CPU.  a vector exclusive or
    598       //       would probably be better here if we could rely on its existance.
    599       // Load an immediate +2.0 (which encodes to 0)
    600       NewLIR2(kThumb2Vmovd_IMM8, target_reg, 0);
    601       // +0.0 = +2.0 - +2.0
    602       res = NewLIR3(kThumb2Vsubd, target_reg, target_reg, target_reg);
    603     } else {
    604       int encoded_imm = EncodeImmDouble(value);
    605       if (encoded_imm >= 0) {
    606         res = NewLIR2(kThumb2Vmovd_IMM8, target_reg, encoded_imm);
    607       }
    608     }
    609   } else {
    610     if ((InexpensiveConstantInt(val_lo) && (InexpensiveConstantInt(val_hi)))) {
    611       res = LoadConstantNoClobber(r_dest_lo, val_lo);
    612       LoadConstantNoClobber(r_dest_hi, val_hi);
    613     }
    614   }
    615   if (res == NULL) {
    616     // No short form - load from the literal pool.
    617     LIR* data_target = ScanLiteralPoolWide(literal_list_, val_lo, val_hi);
    618     if (data_target == NULL) {
    619       data_target = AddWideData(&literal_list_, val_lo, val_hi);
    620     }
    621     if (ARM_FPREG(r_dest_lo)) {
    622       res = RawLIR(current_dalvik_offset_, kThumb2Vldrd,
    623                    target_reg, r15pc, 0, 0, 0, data_target);
    624     } else {
    625       res = RawLIR(current_dalvik_offset_, kThumb2LdrdPcRel8,
    626                    r_dest_lo, r_dest_hi, r15pc, 0, 0, data_target);
    627     }
    628     SetMemRefType(res, true, kLiteral);
    629     res->alias_info = reinterpret_cast<uintptr_t>(data_target);
    630     AppendLIR(res);
    631   }
    632   return res;
    633 }
    634 
    635 int ArmMir2Lir::EncodeShift(int code, int amount) {
    636   return ((amount & 0x1f) << 2) | code;
    637 }
    638 
    639 LIR* ArmMir2Lir::LoadBaseIndexed(int rBase, int r_index, int r_dest,
    640                                  int scale, OpSize size) {
    641   bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_dest);
    642   LIR* load;
    643   ArmOpcode opcode = kThumbBkpt;
    644   bool thumb_form = (all_low_regs && (scale == 0));
    645   int reg_ptr;
    646 
    647   if (ARM_FPREG(r_dest)) {
    648     if (ARM_SINGLEREG(r_dest)) {
    649       DCHECK((size == kWord) || (size == kSingle));
    650       opcode = kThumb2Vldrs;
    651       size = kSingle;
    652     } else {
    653       DCHECK(ARM_DOUBLEREG(r_dest));
    654       DCHECK((size == kLong) || (size == kDouble));
    655       DCHECK_EQ((r_dest & 0x1), 0);
    656       opcode = kThumb2Vldrd;
    657       size = kDouble;
    658     }
    659   } else {
    660     if (size == kSingle)
    661       size = kWord;
    662   }
    663 
    664   switch (size) {
    665     case kDouble:  // fall-through
    666     case kSingle:
    667       reg_ptr = AllocTemp();
    668       if (scale) {
    669         NewLIR4(kThumb2AddRRR, reg_ptr, rBase, r_index,
    670                 EncodeShift(kArmLsl, scale));
    671       } else {
    672         OpRegRegReg(kOpAdd, reg_ptr, rBase, r_index);
    673       }
    674       load = NewLIR3(opcode, r_dest, reg_ptr, 0);
    675       FreeTemp(reg_ptr);
    676       return load;
    677     case kWord:
    678       opcode = (thumb_form) ? kThumbLdrRRR : kThumb2LdrRRR;
    679       break;
    680     case kUnsignedHalf:
    681       opcode = (thumb_form) ? kThumbLdrhRRR : kThumb2LdrhRRR;
    682       break;
    683     case kSignedHalf:
    684       opcode = (thumb_form) ? kThumbLdrshRRR : kThumb2LdrshRRR;
    685       break;
    686     case kUnsignedByte:
    687       opcode = (thumb_form) ? kThumbLdrbRRR : kThumb2LdrbRRR;
    688       break;
    689     case kSignedByte:
    690       opcode = (thumb_form) ? kThumbLdrsbRRR : kThumb2LdrsbRRR;
    691       break;
    692     default:
    693       LOG(FATAL) << "Bad size: " << size;
    694   }
    695   if (thumb_form)
    696     load = NewLIR3(opcode, r_dest, rBase, r_index);
    697   else
    698     load = NewLIR4(opcode, r_dest, rBase, r_index, scale);
    699 
    700   return load;
    701 }
    702 
    703 LIR* ArmMir2Lir::StoreBaseIndexed(int rBase, int r_index, int r_src,
    704                                   int scale, OpSize size) {
    705   bool all_low_regs = ARM_LOWREG(rBase) && ARM_LOWREG(r_index) && ARM_LOWREG(r_src);
    706   LIR* store = NULL;
    707   ArmOpcode opcode = kThumbBkpt;
    708   bool thumb_form = (all_low_regs && (scale == 0));
    709   int reg_ptr;
    710 
    711   if (ARM_FPREG(r_src)) {
    712     if (ARM_SINGLEREG(r_src)) {
    713       DCHECK((size == kWord) || (size == kSingle));
    714       opcode = kThumb2Vstrs;
    715       size = kSingle;
    716     } else {
    717       DCHECK(ARM_DOUBLEREG(r_src));
    718       DCHECK((size == kLong) || (size == kDouble));
    719       DCHECK_EQ((r_src & 0x1), 0);
    720       opcode = kThumb2Vstrd;
    721       size = kDouble;
    722     }
    723   } else {
    724     if (size == kSingle)
    725       size = kWord;
    726   }
    727 
    728   switch (size) {
    729     case kDouble:  // fall-through
    730     case kSingle:
    731       reg_ptr = AllocTemp();
    732       if (scale) {
    733         NewLIR4(kThumb2AddRRR, reg_ptr, rBase, r_index,
    734                 EncodeShift(kArmLsl, scale));
    735       } else {
    736         OpRegRegReg(kOpAdd, reg_ptr, rBase, r_index);
    737       }
    738       store = NewLIR3(opcode, r_src, reg_ptr, 0);
    739       FreeTemp(reg_ptr);
    740       return store;
    741     case kWord:
    742       opcode = (thumb_form) ? kThumbStrRRR : kThumb2StrRRR;
    743       break;
    744     case kUnsignedHalf:
    745     case kSignedHalf:
    746       opcode = (thumb_form) ? kThumbStrhRRR : kThumb2StrhRRR;
    747       break;
    748     case kUnsignedByte:
    749     case kSignedByte:
    750       opcode = (thumb_form) ? kThumbStrbRRR : kThumb2StrbRRR;
    751       break;
    752     default:
    753       LOG(FATAL) << "Bad size: " << size;
    754   }
    755   if (thumb_form)
    756     store = NewLIR3(opcode, r_src, rBase, r_index);
    757   else
    758     store = NewLIR4(opcode, r_src, rBase, r_index, scale);
    759 
    760   return store;
    761 }
    762 
    763 /*
    764  * Load value from base + displacement.  Optionally perform null check
    765  * on base (which must have an associated s_reg and MIR).  If not
    766  * performing null check, incoming MIR can be null.
    767  */
    768 LIR* ArmMir2Lir::LoadBaseDispBody(int rBase, int displacement, int r_dest,
    769                                   int r_dest_hi, OpSize size, int s_reg) {
    770   LIR* load = NULL;
    771   ArmOpcode opcode = kThumbBkpt;
    772   bool short_form = false;
    773   bool thumb2Form = (displacement < 4092 && displacement >= 0);
    774   bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_dest));
    775   int encoded_disp = displacement;
    776   bool is64bit = false;
    777   bool already_generated = false;
    778   switch (size) {
    779     case kDouble:
    780     case kLong:
    781       is64bit = true;
    782       if (ARM_FPREG(r_dest)) {
    783         if (ARM_SINGLEREG(r_dest)) {
    784           DCHECK(ARM_FPREG(r_dest_hi));
    785           r_dest = S2d(r_dest, r_dest_hi);
    786         }
    787         opcode = kThumb2Vldrd;
    788         if (displacement <= 1020) {
    789           short_form = true;
    790           encoded_disp >>= 2;
    791         }
    792         break;
    793       } else {
    794         if (displacement <= 1020) {
    795           load = NewLIR4(kThumb2LdrdI8, r_dest, r_dest_hi, rBase, displacement >> 2);
    796         } else {
    797           load = LoadBaseDispBody(rBase, displacement, r_dest,
    798                                  -1, kWord, s_reg);
    799           LoadBaseDispBody(rBase, displacement + 4, r_dest_hi,
    800                            -1, kWord, INVALID_SREG);
    801         }
    802         already_generated = true;
    803       }
    804     case kSingle:
    805     case kWord:
    806       if (ARM_FPREG(r_dest)) {
    807         opcode = kThumb2Vldrs;
    808         if (displacement <= 1020) {
    809           short_form = true;
    810           encoded_disp >>= 2;
    811         }
    812         break;
    813       }
    814       if (ARM_LOWREG(r_dest) && (rBase == r15pc) &&
    815           (displacement <= 1020) && (displacement >= 0)) {
    816         short_form = true;
    817         encoded_disp >>= 2;
    818         opcode = kThumbLdrPcRel;
    819       } else if (ARM_LOWREG(r_dest) && (rBase == r13sp) &&
    820           (displacement <= 1020) && (displacement >= 0)) {
    821         short_form = true;
    822         encoded_disp >>= 2;
    823         opcode = kThumbLdrSpRel;
    824       } else if (all_low_regs && displacement < 128 && displacement >= 0) {
    825         DCHECK_EQ((displacement & 0x3), 0);
    826         short_form = true;
    827         encoded_disp >>= 2;
    828         opcode = kThumbLdrRRI5;
    829       } else if (thumb2Form) {
    830         short_form = true;
    831         opcode = kThumb2LdrRRI12;
    832       }
    833       break;
    834     case kUnsignedHalf:
    835       if (all_low_regs && displacement < 64 && displacement >= 0) {
    836         DCHECK_EQ((displacement & 0x1), 0);
    837         short_form = true;
    838         encoded_disp >>= 1;
    839         opcode = kThumbLdrhRRI5;
    840       } else if (displacement < 4092 && displacement >= 0) {
    841         short_form = true;
    842         opcode = kThumb2LdrhRRI12;
    843       }
    844       break;
    845     case kSignedHalf:
    846       if (thumb2Form) {
    847         short_form = true;
    848         opcode = kThumb2LdrshRRI12;
    849       }
    850       break;
    851     case kUnsignedByte:
    852       if (all_low_regs && displacement < 32 && displacement >= 0) {
    853         short_form = true;
    854         opcode = kThumbLdrbRRI5;
    855       } else if (thumb2Form) {
    856         short_form = true;
    857         opcode = kThumb2LdrbRRI12;
    858       }
    859       break;
    860     case kSignedByte:
    861       if (thumb2Form) {
    862         short_form = true;
    863         opcode = kThumb2LdrsbRRI12;
    864       }
    865       break;
    866     default:
    867       LOG(FATAL) << "Bad size: " << size;
    868   }
    869 
    870   if (!already_generated) {
    871     if (short_form) {
    872       load = NewLIR3(opcode, r_dest, rBase, encoded_disp);
    873     } else {
    874       int reg_offset = AllocTemp();
    875       LoadConstant(reg_offset, encoded_disp);
    876       load = LoadBaseIndexed(rBase, reg_offset, r_dest, 0, size);
    877       FreeTemp(reg_offset);
    878     }
    879   }
    880 
    881   // TODO: in future may need to differentiate Dalvik accesses w/ spills
    882   if (rBase == rARM_SP) {
    883     AnnotateDalvikRegAccess(load, displacement >> 2, true /* is_load */, is64bit);
    884   }
    885   return load;
    886 }
    887 
    888 LIR* ArmMir2Lir::LoadBaseDisp(int rBase, int displacement, int r_dest,
    889                               OpSize size, int s_reg) {
    890   return LoadBaseDispBody(rBase, displacement, r_dest, -1, size, s_reg);
    891 }
    892 
    893 LIR* ArmMir2Lir::LoadBaseDispWide(int rBase, int displacement, int r_dest_lo,
    894                                   int r_dest_hi, int s_reg) {
    895   return LoadBaseDispBody(rBase, displacement, r_dest_lo, r_dest_hi, kLong, s_reg);
    896 }
    897 
    898 
    899 LIR* ArmMir2Lir::StoreBaseDispBody(int rBase, int displacement,
    900                                    int r_src, int r_src_hi, OpSize size) {
    901   LIR* store = NULL;
    902   ArmOpcode opcode = kThumbBkpt;
    903   bool short_form = false;
    904   bool thumb2Form = (displacement < 4092 && displacement >= 0);
    905   bool all_low_regs = (ARM_LOWREG(rBase) && ARM_LOWREG(r_src));
    906   int encoded_disp = displacement;
    907   bool is64bit = false;
    908   bool already_generated = false;
    909   switch (size) {
    910     case kLong:
    911     case kDouble:
    912       is64bit = true;
    913       if (!ARM_FPREG(r_src)) {
    914         if (displacement <= 1020) {
    915           store = NewLIR4(kThumb2StrdI8, r_src, r_src_hi, rBase, displacement >> 2);
    916         } else {
    917           store = StoreBaseDispBody(rBase, displacement, r_src, -1, kWord);
    918           StoreBaseDispBody(rBase, displacement + 4, r_src_hi, -1, kWord);
    919         }
    920         already_generated = true;
    921       } else {
    922         if (ARM_SINGLEREG(r_src)) {
    923           DCHECK(ARM_FPREG(r_src_hi));
    924           r_src = S2d(r_src, r_src_hi);
    925         }
    926         opcode = kThumb2Vstrd;
    927         if (displacement <= 1020) {
    928           short_form = true;
    929           encoded_disp >>= 2;
    930         }
    931       }
    932       break;
    933     case kSingle:
    934     case kWord:
    935       if (ARM_FPREG(r_src)) {
    936         DCHECK(ARM_SINGLEREG(r_src));
    937         opcode = kThumb2Vstrs;
    938         if (displacement <= 1020) {
    939           short_form = true;
    940           encoded_disp >>= 2;
    941         }
    942         break;
    943       }
    944       if (ARM_LOWREG(r_src) && (rBase == r13sp) &&
    945           (displacement <= 1020) && (displacement >= 0)) {
    946         short_form = true;
    947         encoded_disp >>= 2;
    948         opcode = kThumbStrSpRel;
    949       } else if (all_low_regs && displacement < 128 && displacement >= 0) {
    950         DCHECK_EQ((displacement & 0x3), 0);
    951         short_form = true;
    952         encoded_disp >>= 2;
    953         opcode = kThumbStrRRI5;
    954       } else if (thumb2Form) {
    955         short_form = true;
    956         opcode = kThumb2StrRRI12;
    957       }
    958       break;
    959     case kUnsignedHalf:
    960     case kSignedHalf:
    961       if (all_low_regs && displacement < 64 && displacement >= 0) {
    962         DCHECK_EQ((displacement & 0x1), 0);
    963         short_form = true;
    964         encoded_disp >>= 1;
    965         opcode = kThumbStrhRRI5;
    966       } else if (thumb2Form) {
    967         short_form = true;
    968         opcode = kThumb2StrhRRI12;
    969       }
    970       break;
    971     case kUnsignedByte:
    972     case kSignedByte:
    973       if (all_low_regs && displacement < 32 && displacement >= 0) {
    974         short_form = true;
    975         opcode = kThumbStrbRRI5;
    976       } else if (thumb2Form) {
    977         short_form = true;
    978         opcode = kThumb2StrbRRI12;
    979       }
    980       break;
    981     default:
    982       LOG(FATAL) << "Bad size: " << size;
    983   }
    984   if (!already_generated) {
    985     if (short_form) {
    986       store = NewLIR3(opcode, r_src, rBase, encoded_disp);
    987     } else {
    988       int r_scratch = AllocTemp();
    989       LoadConstant(r_scratch, encoded_disp);
    990       store = StoreBaseIndexed(rBase, r_scratch, r_src, 0, size);
    991       FreeTemp(r_scratch);
    992     }
    993   }
    994 
    995   // TODO: In future, may need to differentiate Dalvik & spill accesses
    996   if (rBase == rARM_SP) {
    997     AnnotateDalvikRegAccess(store, displacement >> 2, false /* is_load */, is64bit);
    998   }
    999   return store;
   1000 }
   1001 
   1002 LIR* ArmMir2Lir::StoreBaseDisp(int rBase, int displacement, int r_src,
   1003                                OpSize size) {
   1004   return StoreBaseDispBody(rBase, displacement, r_src, -1, size);
   1005 }
   1006 
   1007 LIR* ArmMir2Lir::StoreBaseDispWide(int rBase, int displacement,
   1008                                    int r_src_lo, int r_src_hi) {
   1009   return StoreBaseDispBody(rBase, displacement, r_src_lo, r_src_hi, kLong);
   1010 }
   1011 
   1012 LIR* ArmMir2Lir::OpFpRegCopy(int r_dest, int r_src) {
   1013   int opcode;
   1014   DCHECK_EQ(ARM_DOUBLEREG(r_dest), ARM_DOUBLEREG(r_src));
   1015   if (ARM_DOUBLEREG(r_dest)) {
   1016     opcode = kThumb2Vmovd;
   1017   } else {
   1018     if (ARM_SINGLEREG(r_dest)) {
   1019       opcode = ARM_SINGLEREG(r_src) ? kThumb2Vmovs : kThumb2Fmsr;
   1020     } else {
   1021       DCHECK(ARM_SINGLEREG(r_src));
   1022       opcode = kThumb2Fmrs;
   1023     }
   1024   }
   1025   LIR* res = RawLIR(current_dalvik_offset_, opcode, r_dest, r_src);
   1026   if (!(cu_->disable_opt & (1 << kSafeOptimizations)) && r_dest == r_src) {
   1027     res->flags.is_nop = true;
   1028   }
   1029   return res;
   1030 }
   1031 
   1032 LIR* ArmMir2Lir::OpThreadMem(OpKind op, ThreadOffset thread_offset) {
   1033   LOG(FATAL) << "Unexpected use of OpThreadMem for Arm";
   1034   return NULL;
   1035 }
   1036 
   1037 LIR* ArmMir2Lir::OpMem(OpKind op, int rBase, int disp) {
   1038   LOG(FATAL) << "Unexpected use of OpMem for Arm";
   1039   return NULL;
   1040 }
   1041 
   1042 LIR* ArmMir2Lir::StoreBaseIndexedDisp(int rBase, int r_index, int scale,
   1043                                       int displacement, int r_src, int r_src_hi, OpSize size,
   1044                                       int s_reg) {
   1045   LOG(FATAL) << "Unexpected use of StoreBaseIndexedDisp for Arm";
   1046   return NULL;
   1047 }
   1048 
   1049 LIR* ArmMir2Lir::OpRegMem(OpKind op, int r_dest, int rBase, int offset) {
   1050   LOG(FATAL) << "Unexpected use of OpRegMem for Arm";
   1051   return NULL;
   1052 }
   1053 
   1054 LIR* ArmMir2Lir::LoadBaseIndexedDisp(int rBase, int r_index, int scale,
   1055                                      int displacement, int r_dest, int r_dest_hi, OpSize size,
   1056                                      int s_reg) {
   1057   LOG(FATAL) << "Unexpected use of LoadBaseIndexedDisp for Arm";
   1058   return NULL;
   1059 }
   1060 
   1061 }  // namespace art
   1062