Home | History | Annotate | Download | only in arm
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #include "assembler_arm.h"
     18 
     19 #include "base/bit_utils.h"
     20 #include "base/logging.h"
     21 #include "entrypoints/quick/quick_entrypoints.h"
     22 #include "offsets.h"
     23 #include "thread.h"
     24 
     25 namespace art {
     26 namespace arm {
     27 
     28 const char* kRegisterNames[] = {
     29   "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10",
     30   "fp", "ip", "sp", "lr", "pc"
     31 };
     32 
     33 const char* kConditionNames[] = {
     34   "EQ", "NE", "CS", "CC", "MI", "PL", "VS", "VC", "HI", "LS", "GE", "LT", "GT",
     35   "LE", "AL",
     36 };
     37 
     38 std::ostream& operator<<(std::ostream& os, const Register& rhs) {
     39   if (rhs >= R0 && rhs <= PC) {
     40     os << kRegisterNames[rhs];
     41   } else {
     42     os << "Register[" << static_cast<int>(rhs) << "]";
     43   }
     44   return os;
     45 }
     46 
     47 
     48 std::ostream& operator<<(std::ostream& os, const SRegister& rhs) {
     49   if (rhs >= S0 && rhs < kNumberOfSRegisters) {
     50     os << "s" << static_cast<int>(rhs);
     51   } else {
     52     os << "SRegister[" << static_cast<int>(rhs) << "]";
     53   }
     54   return os;
     55 }
     56 
     57 
     58 std::ostream& operator<<(std::ostream& os, const DRegister& rhs) {
     59   if (rhs >= D0 && rhs < kNumberOfDRegisters) {
     60     os << "d" << static_cast<int>(rhs);
     61   } else {
     62     os << "DRegister[" << static_cast<int>(rhs) << "]";
     63   }
     64   return os;
     65 }
     66 
     67 std::ostream& operator<<(std::ostream& os, const Condition& rhs) {
     68   if (rhs >= EQ && rhs <= AL) {
     69     os << kConditionNames[rhs];
     70   } else {
     71     os << "Condition[" << static_cast<int>(rhs) << "]";
     72   }
     73   return os;
     74 }
     75 
     76 ShifterOperand::ShifterOperand(uint32_t immed)
     77     : type_(kImmediate), rm_(kNoRegister), rs_(kNoRegister),
     78       is_rotate_(false), is_shift_(false), shift_(kNoShift), rotate_(0), immed_(immed) {
     79   CHECK(immed < (1u << 12) || ArmAssembler::ModifiedImmediate(immed) != kInvalidModifiedImmediate);
     80 }
     81 
     82 
     83 uint32_t ShifterOperand::encodingArm() const {
     84   CHECK(is_valid());
     85   switch (type_) {
     86     case kImmediate:
     87       if (is_rotate_) {
     88         return (rotate_ << kRotateShift) | (immed_ << kImmed8Shift);
     89       } else {
     90         return immed_;
     91       }
     92     case kRegister:
     93       if (is_shift_) {
     94         uint32_t shift_type;
     95         switch (shift_) {
     96           case arm::Shift::ROR:
     97             shift_type = static_cast<uint32_t>(shift_);
     98             CHECK_NE(immed_, 0U);
     99             break;
    100           case arm::Shift::RRX:
    101             shift_type = static_cast<uint32_t>(arm::Shift::ROR);  // Same encoding as ROR.
    102             CHECK_EQ(immed_, 0U);
    103             break;
    104           default:
    105             shift_type = static_cast<uint32_t>(shift_);
    106         }
    107         // Shifted immediate or register.
    108         if (rs_ == kNoRegister) {
    109           // Immediate shift.
    110           return immed_ << kShiftImmShift |
    111                           shift_type << kShiftShift |
    112                           static_cast<uint32_t>(rm_);
    113         } else {
    114           // Register shift.
    115           return static_cast<uint32_t>(rs_) << kShiftRegisterShift |
    116               shift_type << kShiftShift | (1 << 4) |
    117               static_cast<uint32_t>(rm_);
    118         }
    119       } else {
    120         // Simple register
    121         return static_cast<uint32_t>(rm_);
    122       }
    123     default:
    124       // Can't get here.
    125       LOG(FATAL) << "Invalid shifter operand for ARM";
    126       return 0;
    127   }
    128 }
    129 
    130 uint32_t ShifterOperand::encodingThumb() const {
    131   switch (type_) {
    132     case kImmediate:
    133       return immed_;
    134     case kRegister:
    135       if (is_shift_) {
    136         // Shifted immediate or register.
    137         if (rs_ == kNoRegister) {
    138           // Immediate shift.
    139           if (shift_ == RRX) {
    140             // RRX is encoded as an ROR with imm 0.
    141             return ROR << 4 | static_cast<uint32_t>(rm_);
    142           } else {
    143             uint32_t imm3 = immed_ >> 2;
    144             uint32_t imm2 = immed_ & 3U /* 0b11 */;
    145 
    146             return imm3 << 12 | imm2 << 6 | shift_ << 4 |
    147                 static_cast<uint32_t>(rm_);
    148           }
    149         } else {
    150           LOG(FATAL) << "No register-shifted register instruction available in thumb";
    151           return 0;
    152         }
    153       } else {
    154         // Simple register
    155         return static_cast<uint32_t>(rm_);
    156       }
    157     default:
    158       // Can't get here.
    159       LOG(FATAL) << "Invalid shifter operand for thumb";
    160       UNREACHABLE();
    161   }
    162 }
    163 
    164 uint32_t Address::encodingArm() const {
    165   CHECK(IsAbsoluteUint<12>(offset_));
    166   uint32_t encoding;
    167   if (is_immed_offset_) {
    168     if (offset_ < 0) {
    169       encoding = (am_ ^ (1 << kUShift)) | -offset_;  // Flip U to adjust sign.
    170     } else {
    171       encoding =  am_ | offset_;
    172     }
    173   } else {
    174     uint32_t shift = shift_;
    175     if (shift == RRX) {
    176       CHECK_EQ(offset_, 0);
    177       shift = ROR;
    178     }
    179     encoding = am_ | static_cast<uint32_t>(rm_) | shift << 5 | offset_ << 7 | B25;
    180   }
    181   encoding |= static_cast<uint32_t>(rn_) << kRnShift;
    182   return encoding;
    183 }
    184 
    185 
    186 uint32_t Address::encodingThumb(bool is_32bit) const {
    187   uint32_t encoding = 0;
    188   if (is_immed_offset_) {
    189     encoding = static_cast<uint32_t>(rn_) << 16;
    190     // Check for the T3/T4 encoding.
    191     // PUW must Offset for T3
    192     // Convert ARM PU0W to PUW
    193     // The Mode is in ARM encoding format which is:
    194     // |P|U|0|W|
    195     // we need this in thumb2 mode:
    196     // |P|U|W|
    197 
    198     uint32_t am = am_;
    199     int32_t offset = offset_;
    200     if (offset < 0) {
    201       am ^= 1 << kUShift;
    202       offset = -offset;
    203     }
    204     if (offset_ < 0 || (offset >= 0 && offset < 256 &&
    205         am_ != Mode::Offset)) {
    206       // T4 encoding.
    207       uint32_t PUW = am >> 21;   // Move down to bottom of word.
    208       PUW = (PUW >> 1) | (PUW & 1);   // Bits 3, 2 and 0.
    209       // If P is 0 then W must be 1 (Different from ARM).
    210       if ((PUW & 4U /* 0b100 */) == 0) {
    211         PUW |= 1U /* 0b1 */;
    212       }
    213       encoding |= B11 | PUW << 8 | offset;
    214     } else {
    215       // T3 encoding (also sets op1 to 0b01).
    216       encoding |= B23 | offset_;
    217     }
    218   } else {
    219     // Register offset, possibly shifted.
    220     // Need to choose between encoding T1 (16 bit) or T2.
    221     // Only Offset mode is supported.  Shift must be LSL and the count
    222     // is only 2 bits.
    223     CHECK_EQ(shift_, LSL);
    224     CHECK_LE(offset_, 4);
    225     CHECK_EQ(am_, Offset);
    226     bool is_t2 = is_32bit;
    227     if (ArmAssembler::IsHighRegister(rn_) || ArmAssembler::IsHighRegister(rm_)) {
    228       is_t2 = true;
    229     } else if (offset_ != 0) {
    230       is_t2 = true;
    231     }
    232     if (is_t2) {
    233       encoding = static_cast<uint32_t>(rn_) << 16 | static_cast<uint32_t>(rm_) |
    234           offset_ << 4;
    235     } else {
    236       encoding = static_cast<uint32_t>(rn_) << 3 | static_cast<uint32_t>(rm_) << 6;
    237     }
    238   }
    239   return encoding;
    240 }
    241 
    242 // This is very like the ARM encoding except the offset is 10 bits.
    243 uint32_t Address::encodingThumbLdrdStrd() const {
    244   DCHECK(IsImmediate());
    245   uint32_t encoding;
    246   uint32_t am = am_;
    247   // If P is 0 then W must be 1 (Different from ARM).
    248   uint32_t PU1W = am_ >> 21;   // Move down to bottom of word.
    249   if ((PU1W & 8U /* 0b1000 */) == 0) {
    250     am |= 1 << 21;      // Set W bit.
    251   }
    252   if (offset_ < 0) {
    253     int32_t off = -offset_;
    254     CHECK_LT(off, 1024);
    255     CHECK_EQ((off & 3 /* 0b11 */), 0);    // Must be multiple of 4.
    256     encoding = (am ^ (1 << kUShift)) | off >> 2;  // Flip U to adjust sign.
    257   } else {
    258     CHECK_LT(offset_, 1024);
    259     CHECK_EQ((offset_ & 3 /* 0b11 */), 0);    // Must be multiple of 4.
    260     encoding =  am | offset_ >> 2;
    261   }
    262   encoding |= static_cast<uint32_t>(rn_) << 16;
    263   return encoding;
    264 }
    265 
    266 // Encoding for ARM addressing mode 3.
    267 uint32_t Address::encoding3() const {
    268   const uint32_t offset_mask = (1 << 12) - 1;
    269   uint32_t encoding = encodingArm();
    270   uint32_t offset = encoding & offset_mask;
    271   CHECK_LT(offset, 256u);
    272   return (encoding & ~offset_mask) | ((offset & 0xf0) << 4) | (offset & 0xf);
    273 }
    274 
    275 // Encoding for vfp load/store addressing.
    276 uint32_t Address::vencoding() const {
    277   CHECK(IsAbsoluteUint<10>(offset_));  // In the range -1020 to +1020.
    278   CHECK_ALIGNED(offset_, 2);  // Multiple of 4.
    279 
    280   const uint32_t offset_mask = (1 << 12) - 1;
    281   uint32_t encoding = encodingArm();
    282   uint32_t offset = encoding & offset_mask;
    283   CHECK((am_ == Offset) || (am_ == NegOffset));
    284   uint32_t vencoding_value = (encoding & (0xf << kRnShift)) | (offset >> 2);
    285   if (am_ == Offset) {
    286     vencoding_value |= 1 << 23;
    287   }
    288   return vencoding_value;
    289 }
    290 
    291 
    292 bool Address::CanHoldLoadOffsetArm(LoadOperandType type, int offset) {
    293   switch (type) {
    294     case kLoadSignedByte:
    295     case kLoadSignedHalfword:
    296     case kLoadUnsignedHalfword:
    297     case kLoadWordPair:
    298       return IsAbsoluteUint<8>(offset);  // Addressing mode 3.
    299     case kLoadUnsignedByte:
    300     case kLoadWord:
    301       return IsAbsoluteUint<12>(offset);  // Addressing mode 2.
    302     case kLoadSWord:
    303     case kLoadDWord:
    304       return IsAbsoluteUint<10>(offset);  // VFP addressing mode.
    305     default:
    306       LOG(FATAL) << "UNREACHABLE";
    307       UNREACHABLE();
    308   }
    309 }
    310 
    311 
    312 bool Address::CanHoldStoreOffsetArm(StoreOperandType type, int offset) {
    313   switch (type) {
    314     case kStoreHalfword:
    315     case kStoreWordPair:
    316       return IsAbsoluteUint<8>(offset);  // Addressing mode 3.
    317     case kStoreByte:
    318     case kStoreWord:
    319       return IsAbsoluteUint<12>(offset);  // Addressing mode 2.
    320     case kStoreSWord:
    321     case kStoreDWord:
    322       return IsAbsoluteUint<10>(offset);  // VFP addressing mode.
    323     default:
    324       LOG(FATAL) << "UNREACHABLE";
    325       UNREACHABLE();
    326   }
    327 }
    328 
    329 bool Address::CanHoldLoadOffsetThumb(LoadOperandType type, int offset) {
    330   switch (type) {
    331     case kLoadSignedByte:
    332     case kLoadSignedHalfword:
    333     case kLoadUnsignedHalfword:
    334     case kLoadUnsignedByte:
    335     case kLoadWord:
    336       return IsAbsoluteUint<12>(offset);
    337     case kLoadSWord:
    338     case kLoadDWord:
    339       return IsAbsoluteUint<10>(offset);  // VFP addressing mode.
    340     case kLoadWordPair:
    341       return IsAbsoluteUint<10>(offset);
    342     default:
    343       LOG(FATAL) << "UNREACHABLE";
    344       UNREACHABLE();
    345   }
    346 }
    347 
    348 
    349 bool Address::CanHoldStoreOffsetThumb(StoreOperandType type, int offset) {
    350   switch (type) {
    351     case kStoreHalfword:
    352     case kStoreByte:
    353     case kStoreWord:
    354       return IsAbsoluteUint<12>(offset);
    355     case kStoreSWord:
    356     case kStoreDWord:
    357       return IsAbsoluteUint<10>(offset);  // VFP addressing mode.
    358     case kStoreWordPair:
    359       return IsAbsoluteUint<10>(offset);
    360     default:
    361       LOG(FATAL) << "UNREACHABLE";
    362       UNREACHABLE();
    363   }
    364 }
    365 
    366 void ArmAssembler::Pad(uint32_t bytes) {
    367   AssemblerBuffer::EnsureCapacity ensured(&buffer_);
    368   for (uint32_t i = 0; i < bytes; ++i) {
    369     buffer_.Emit<uint8_t>(0);
    370   }
    371 }
    372 
    373 static dwarf::Reg DWARFReg(Register reg) {
    374   return dwarf::Reg::ArmCore(static_cast<int>(reg));
    375 }
    376 
    377 static dwarf::Reg DWARFReg(SRegister reg) {
    378   return dwarf::Reg::ArmFp(static_cast<int>(reg));
    379 }
    380 
    381 constexpr size_t kFramePointerSize = kArmPointerSize;
    382 
    383 void ArmAssembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
    384                               const std::vector<ManagedRegister>& callee_save_regs,
    385                               const ManagedRegisterEntrySpills& entry_spills) {
    386   CHECK_EQ(buffer_.Size(), 0U);  // Nothing emitted yet
    387   CHECK_ALIGNED(frame_size, kStackAlignment);
    388   CHECK_EQ(R0, method_reg.AsArm().AsCoreRegister());
    389 
    390   // Push callee saves and link register.
    391   RegList core_spill_mask = 1 << LR;
    392   uint32_t fp_spill_mask = 0;
    393   for (const ManagedRegister& reg : callee_save_regs) {
    394     if (reg.AsArm().IsCoreRegister()) {
    395       core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
    396     } else {
    397       fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
    398     }
    399   }
    400   PushList(core_spill_mask);
    401   cfi_.AdjustCFAOffset(POPCOUNT(core_spill_mask) * kFramePointerSize);
    402   cfi_.RelOffsetForMany(DWARFReg(Register(0)), 0, core_spill_mask, kFramePointerSize);
    403   if (fp_spill_mask != 0) {
    404     vpushs(SRegister(CTZ(fp_spill_mask)), POPCOUNT(fp_spill_mask));
    405     cfi_.AdjustCFAOffset(POPCOUNT(fp_spill_mask) * kFramePointerSize);
    406     cfi_.RelOffsetForMany(DWARFReg(SRegister(0)), 0, fp_spill_mask, kFramePointerSize);
    407   }
    408 
    409   // Increase frame to required size.
    410   int pushed_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
    411   CHECK_GT(frame_size, pushed_values * kFramePointerSize);  // Must at least have space for Method*.
    412   IncreaseFrameSize(frame_size - pushed_values * kFramePointerSize);  // handles CFI as well.
    413 
    414   // Write out Method*.
    415   StoreToOffset(kStoreWord, R0, SP, 0);
    416 
    417   // Write out entry spills.
    418   int32_t offset = frame_size + kFramePointerSize;
    419   for (size_t i = 0; i < entry_spills.size(); ++i) {
    420     ArmManagedRegister reg = entry_spills.at(i).AsArm();
    421     if (reg.IsNoRegister()) {
    422       // only increment stack offset.
    423       ManagedRegisterSpill spill = entry_spills.at(i);
    424       offset += spill.getSize();
    425     } else if (reg.IsCoreRegister()) {
    426       StoreToOffset(kStoreWord, reg.AsCoreRegister(), SP, offset);
    427       offset += 4;
    428     } else if (reg.IsSRegister()) {
    429       StoreSToOffset(reg.AsSRegister(), SP, offset);
    430       offset += 4;
    431     } else if (reg.IsDRegister()) {
    432       StoreDToOffset(reg.AsDRegister(), SP, offset);
    433       offset += 8;
    434     }
    435   }
    436 }
    437 
    438 void ArmAssembler::RemoveFrame(size_t frame_size,
    439                               const std::vector<ManagedRegister>& callee_save_regs) {
    440   CHECK_ALIGNED(frame_size, kStackAlignment);
    441   cfi_.RememberState();
    442 
    443   // Compute callee saves to pop and PC.
    444   RegList core_spill_mask = 1 << PC;
    445   uint32_t fp_spill_mask = 0;
    446   for (const ManagedRegister& reg : callee_save_regs) {
    447     if (reg.AsArm().IsCoreRegister()) {
    448       core_spill_mask |= 1 << reg.AsArm().AsCoreRegister();
    449     } else {
    450       fp_spill_mask |= 1 << reg.AsArm().AsSRegister();
    451     }
    452   }
    453 
    454   // Decrease frame to start of callee saves.
    455   int pop_values = POPCOUNT(core_spill_mask) + POPCOUNT(fp_spill_mask);
    456   CHECK_GT(frame_size, pop_values * kFramePointerSize);
    457   DecreaseFrameSize(frame_size - (pop_values * kFramePointerSize));  // handles CFI as well.
    458 
    459   if (fp_spill_mask != 0) {
    460     vpops(SRegister(CTZ(fp_spill_mask)), POPCOUNT(fp_spill_mask));
    461     cfi_.AdjustCFAOffset(-kFramePointerSize * POPCOUNT(fp_spill_mask));
    462     cfi_.RestoreMany(DWARFReg(SRegister(0)), fp_spill_mask);
    463   }
    464 
    465   // Pop callee saves and PC.
    466   PopList(core_spill_mask);
    467 
    468   // The CFI should be restored for any code that follows the exit block.
    469   cfi_.RestoreState();
    470   cfi_.DefCFAOffset(frame_size);
    471 }
    472 
    473 void ArmAssembler::IncreaseFrameSize(size_t adjust) {
    474   AddConstant(SP, -adjust);
    475   cfi_.AdjustCFAOffset(adjust);
    476 }
    477 
    478 void ArmAssembler::DecreaseFrameSize(size_t adjust) {
    479   AddConstant(SP, adjust);
    480   cfi_.AdjustCFAOffset(-adjust);
    481 }
    482 
    483 void ArmAssembler::Store(FrameOffset dest, ManagedRegister msrc, size_t size) {
    484   ArmManagedRegister src = msrc.AsArm();
    485   if (src.IsNoRegister()) {
    486     CHECK_EQ(0u, size);
    487   } else if (src.IsCoreRegister()) {
    488     CHECK_EQ(4u, size);
    489     StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
    490   } else if (src.IsRegisterPair()) {
    491     CHECK_EQ(8u, size);
    492     StoreToOffset(kStoreWord, src.AsRegisterPairLow(), SP, dest.Int32Value());
    493     StoreToOffset(kStoreWord, src.AsRegisterPairHigh(),
    494                   SP, dest.Int32Value() + 4);
    495   } else if (src.IsSRegister()) {
    496     StoreSToOffset(src.AsSRegister(), SP, dest.Int32Value());
    497   } else {
    498     CHECK(src.IsDRegister()) << src;
    499     StoreDToOffset(src.AsDRegister(), SP, dest.Int32Value());
    500   }
    501 }
    502 
    503 void ArmAssembler::StoreRef(FrameOffset dest, ManagedRegister msrc) {
    504   ArmManagedRegister src = msrc.AsArm();
    505   CHECK(src.IsCoreRegister()) << src;
    506   StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
    507 }
    508 
    509 void ArmAssembler::StoreRawPtr(FrameOffset dest, ManagedRegister msrc) {
    510   ArmManagedRegister src = msrc.AsArm();
    511   CHECK(src.IsCoreRegister()) << src;
    512   StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
    513 }
    514 
    515 void ArmAssembler::StoreSpanning(FrameOffset dest, ManagedRegister msrc,
    516                               FrameOffset in_off, ManagedRegister mscratch) {
    517   ArmManagedRegister src = msrc.AsArm();
    518   ArmManagedRegister scratch = mscratch.AsArm();
    519   StoreToOffset(kStoreWord, src.AsCoreRegister(), SP, dest.Int32Value());
    520   LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, in_off.Int32Value());
    521   StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
    522 }
    523 
    524 void ArmAssembler::CopyRef(FrameOffset dest, FrameOffset src,
    525                         ManagedRegister mscratch) {
    526   ArmManagedRegister scratch = mscratch.AsArm();
    527   LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
    528   StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
    529 }
    530 
    531 void ArmAssembler::LoadRef(ManagedRegister mdest, ManagedRegister base, MemberOffset offs,
    532                            bool poison_reference) {
    533   ArmManagedRegister dst = mdest.AsArm();
    534   CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
    535   LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
    536                  base.AsArm().AsCoreRegister(), offs.Int32Value());
    537   if (kPoisonHeapReferences && poison_reference) {
    538     rsb(dst.AsCoreRegister(), dst.AsCoreRegister(), ShifterOperand(0));
    539   }
    540 }
    541 
    542 void ArmAssembler::LoadRef(ManagedRegister mdest, FrameOffset  src) {
    543   ArmManagedRegister dst = mdest.AsArm();
    544   CHECK(dst.IsCoreRegister()) << dst;
    545   LoadFromOffset(kLoadWord, dst.AsCoreRegister(), SP, src.Int32Value());
    546 }
    547 
    548 void ArmAssembler::LoadRawPtr(ManagedRegister mdest, ManagedRegister base,
    549                            Offset offs) {
    550   ArmManagedRegister dst = mdest.AsArm();
    551   CHECK(dst.IsCoreRegister() && dst.IsCoreRegister()) << dst;
    552   LoadFromOffset(kLoadWord, dst.AsCoreRegister(),
    553                  base.AsArm().AsCoreRegister(), offs.Int32Value());
    554 }
    555 
    556 void ArmAssembler::StoreImmediateToFrame(FrameOffset dest, uint32_t imm,
    557                                       ManagedRegister mscratch) {
    558   ArmManagedRegister scratch = mscratch.AsArm();
    559   CHECK(scratch.IsCoreRegister()) << scratch;
    560   LoadImmediate(scratch.AsCoreRegister(), imm);
    561   StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
    562 }
    563 
    564 void ArmAssembler::StoreImmediateToThread32(ThreadOffset<4> dest, uint32_t imm,
    565                                        ManagedRegister mscratch) {
    566   ArmManagedRegister scratch = mscratch.AsArm();
    567   CHECK(scratch.IsCoreRegister()) << scratch;
    568   LoadImmediate(scratch.AsCoreRegister(), imm);
    569   StoreToOffset(kStoreWord, scratch.AsCoreRegister(), TR, dest.Int32Value());
    570 }
    571 
    572 static void EmitLoad(ArmAssembler* assembler, ManagedRegister m_dst,
    573                      Register src_register, int32_t src_offset, size_t size) {
    574   ArmManagedRegister dst = m_dst.AsArm();
    575   if (dst.IsNoRegister()) {
    576     CHECK_EQ(0u, size) << dst;
    577   } else if (dst.IsCoreRegister()) {
    578     CHECK_EQ(4u, size) << dst;
    579     assembler->LoadFromOffset(kLoadWord, dst.AsCoreRegister(), src_register, src_offset);
    580   } else if (dst.IsRegisterPair()) {
    581     CHECK_EQ(8u, size) << dst;
    582     assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairLow(), src_register, src_offset);
    583     assembler->LoadFromOffset(kLoadWord, dst.AsRegisterPairHigh(), src_register, src_offset + 4);
    584   } else if (dst.IsSRegister()) {
    585     assembler->LoadSFromOffset(dst.AsSRegister(), src_register, src_offset);
    586   } else {
    587     CHECK(dst.IsDRegister()) << dst;
    588     assembler->LoadDFromOffset(dst.AsDRegister(), src_register, src_offset);
    589   }
    590 }
    591 
    592 void ArmAssembler::Load(ManagedRegister m_dst, FrameOffset src, size_t size) {
    593   return EmitLoad(this, m_dst, SP, src.Int32Value(), size);
    594 }
    595 
    596 void ArmAssembler::LoadFromThread32(ManagedRegister m_dst, ThreadOffset<4> src, size_t size) {
    597   return EmitLoad(this, m_dst, TR, src.Int32Value(), size);
    598 }
    599 
    600 void ArmAssembler::LoadRawPtrFromThread32(ManagedRegister m_dst, ThreadOffset<4> offs) {
    601   ArmManagedRegister dst = m_dst.AsArm();
    602   CHECK(dst.IsCoreRegister()) << dst;
    603   LoadFromOffset(kLoadWord, dst.AsCoreRegister(), TR, offs.Int32Value());
    604 }
    605 
    606 void ArmAssembler::CopyRawPtrFromThread32(FrameOffset fr_offs,
    607                                         ThreadOffset<4> thr_offs,
    608                                         ManagedRegister mscratch) {
    609   ArmManagedRegister scratch = mscratch.AsArm();
    610   CHECK(scratch.IsCoreRegister()) << scratch;
    611   LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
    612                  TR, thr_offs.Int32Value());
    613   StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
    614                 SP, fr_offs.Int32Value());
    615 }
    616 
    617 void ArmAssembler::CopyRawPtrToThread32(ThreadOffset<4> thr_offs,
    618                                       FrameOffset fr_offs,
    619                                       ManagedRegister mscratch) {
    620   ArmManagedRegister scratch = mscratch.AsArm();
    621   CHECK(scratch.IsCoreRegister()) << scratch;
    622   LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
    623                  SP, fr_offs.Int32Value());
    624   StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
    625                 TR, thr_offs.Int32Value());
    626 }
    627 
    628 void ArmAssembler::StoreStackOffsetToThread32(ThreadOffset<4> thr_offs,
    629                                             FrameOffset fr_offs,
    630                                             ManagedRegister mscratch) {
    631   ArmManagedRegister scratch = mscratch.AsArm();
    632   CHECK(scratch.IsCoreRegister()) << scratch;
    633   AddConstant(scratch.AsCoreRegister(), SP, fr_offs.Int32Value(), AL);
    634   StoreToOffset(kStoreWord, scratch.AsCoreRegister(),
    635                 TR, thr_offs.Int32Value());
    636 }
    637 
    638 void ArmAssembler::StoreStackPointerToThread32(ThreadOffset<4> thr_offs) {
    639   StoreToOffset(kStoreWord, SP, TR, thr_offs.Int32Value());
    640 }
    641 
    642 void ArmAssembler::SignExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
    643   UNIMPLEMENTED(FATAL) << "no sign extension necessary for arm";
    644 }
    645 
    646 void ArmAssembler::ZeroExtend(ManagedRegister /*mreg*/, size_t /*size*/) {
    647   UNIMPLEMENTED(FATAL) << "no zero extension necessary for arm";
    648 }
    649 
    650 void ArmAssembler::Move(ManagedRegister m_dst, ManagedRegister m_src, size_t /*size*/) {
    651   ArmManagedRegister dst = m_dst.AsArm();
    652   ArmManagedRegister src = m_src.AsArm();
    653   if (!dst.Equals(src)) {
    654     if (dst.IsCoreRegister()) {
    655       CHECK(src.IsCoreRegister()) << src;
    656       mov(dst.AsCoreRegister(), ShifterOperand(src.AsCoreRegister()));
    657     } else if (dst.IsDRegister()) {
    658       CHECK(src.IsDRegister()) << src;
    659       vmovd(dst.AsDRegister(), src.AsDRegister());
    660     } else if (dst.IsSRegister()) {
    661       CHECK(src.IsSRegister()) << src;
    662       vmovs(dst.AsSRegister(), src.AsSRegister());
    663     } else {
    664       CHECK(dst.IsRegisterPair()) << dst;
    665       CHECK(src.IsRegisterPair()) << src;
    666       // Ensure that the first move doesn't clobber the input of the second.
    667       if (src.AsRegisterPairHigh() != dst.AsRegisterPairLow()) {
    668         mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
    669         mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
    670       } else {
    671         mov(dst.AsRegisterPairHigh(), ShifterOperand(src.AsRegisterPairHigh()));
    672         mov(dst.AsRegisterPairLow(), ShifterOperand(src.AsRegisterPairLow()));
    673       }
    674     }
    675   }
    676 }
    677 
    678 void ArmAssembler::Copy(FrameOffset dest, FrameOffset src, ManagedRegister mscratch, size_t size) {
    679   ArmManagedRegister scratch = mscratch.AsArm();
    680   CHECK(scratch.IsCoreRegister()) << scratch;
    681   CHECK(size == 4 || size == 8) << size;
    682   if (size == 4) {
    683     LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
    684     StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
    685   } else if (size == 8) {
    686     LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value());
    687     StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value());
    688     LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP, src.Int32Value() + 4);
    689     StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, dest.Int32Value() + 4);
    690   }
    691 }
    692 
    693 void ArmAssembler::Copy(FrameOffset dest, ManagedRegister src_base, Offset src_offset,
    694                         ManagedRegister mscratch, size_t size) {
    695   Register scratch = mscratch.AsArm().AsCoreRegister();
    696   CHECK_EQ(size, 4u);
    697   LoadFromOffset(kLoadWord, scratch, src_base.AsArm().AsCoreRegister(), src_offset.Int32Value());
    698   StoreToOffset(kStoreWord, scratch, SP, dest.Int32Value());
    699 }
    700 
    701 void ArmAssembler::Copy(ManagedRegister dest_base, Offset dest_offset, FrameOffset src,
    702                         ManagedRegister mscratch, size_t size) {
    703   Register scratch = mscratch.AsArm().AsCoreRegister();
    704   CHECK_EQ(size, 4u);
    705   LoadFromOffset(kLoadWord, scratch, SP, src.Int32Value());
    706   StoreToOffset(kStoreWord, scratch, dest_base.AsArm().AsCoreRegister(), dest_offset.Int32Value());
    707 }
    708 
    709 void ArmAssembler::Copy(FrameOffset /*dst*/, FrameOffset /*src_base*/, Offset /*src_offset*/,
    710                         ManagedRegister /*mscratch*/, size_t /*size*/) {
    711   UNIMPLEMENTED(FATAL);
    712 }
    713 
    714 void ArmAssembler::Copy(ManagedRegister dest, Offset dest_offset,
    715                         ManagedRegister src, Offset src_offset,
    716                         ManagedRegister mscratch, size_t size) {
    717   CHECK_EQ(size, 4u);
    718   Register scratch = mscratch.AsArm().AsCoreRegister();
    719   LoadFromOffset(kLoadWord, scratch, src.AsArm().AsCoreRegister(), src_offset.Int32Value());
    720   StoreToOffset(kStoreWord, scratch, dest.AsArm().AsCoreRegister(), dest_offset.Int32Value());
    721 }
    722 
    723 void ArmAssembler::Copy(FrameOffset /*dst*/, Offset /*dest_offset*/, FrameOffset /*src*/, Offset /*src_offset*/,
    724                         ManagedRegister /*scratch*/, size_t /*size*/) {
    725   UNIMPLEMENTED(FATAL);
    726 }
    727 
    728 void ArmAssembler::CreateHandleScopeEntry(ManagedRegister mout_reg,
    729                                    FrameOffset handle_scope_offset,
    730                                    ManagedRegister min_reg, bool null_allowed) {
    731   ArmManagedRegister out_reg = mout_reg.AsArm();
    732   ArmManagedRegister in_reg = min_reg.AsArm();
    733   CHECK(in_reg.IsNoRegister() || in_reg.IsCoreRegister()) << in_reg;
    734   CHECK(out_reg.IsCoreRegister()) << out_reg;
    735   if (null_allowed) {
    736     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
    737     // the address in the handle scope holding the reference.
    738     // e.g. out_reg = (handle == 0) ? 0 : (SP+handle_offset)
    739     if (in_reg.IsNoRegister()) {
    740       LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
    741                      SP, handle_scope_offset.Int32Value());
    742       in_reg = out_reg;
    743     }
    744     cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
    745     if (!out_reg.Equals(in_reg)) {
    746       it(EQ, kItElse);
    747       LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
    748     } else {
    749       it(NE);
    750     }
    751     AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
    752   } else {
    753     AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
    754   }
    755 }
    756 
    757 void ArmAssembler::CreateHandleScopeEntry(FrameOffset out_off,
    758                                    FrameOffset handle_scope_offset,
    759                                    ManagedRegister mscratch,
    760                                    bool null_allowed) {
    761   ArmManagedRegister scratch = mscratch.AsArm();
    762   CHECK(scratch.IsCoreRegister()) << scratch;
    763   if (null_allowed) {
    764     LoadFromOffset(kLoadWord, scratch.AsCoreRegister(), SP,
    765                    handle_scope_offset.Int32Value());
    766     // Null values get a handle scope entry value of 0.  Otherwise, the handle scope entry is
    767     // the address in the handle scope holding the reference.
    768     // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
    769     cmp(scratch.AsCoreRegister(), ShifterOperand(0));
    770     it(NE);
    771     AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
    772   } else {
    773     AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
    774   }
    775   StoreToOffset(kStoreWord, scratch.AsCoreRegister(), SP, out_off.Int32Value());
    776 }
    777 
    778 void ArmAssembler::LoadReferenceFromHandleScope(ManagedRegister mout_reg,
    779                                          ManagedRegister min_reg) {
    780   ArmManagedRegister out_reg = mout_reg.AsArm();
    781   ArmManagedRegister in_reg = min_reg.AsArm();
    782   CHECK(out_reg.IsCoreRegister()) << out_reg;
    783   CHECK(in_reg.IsCoreRegister()) << in_reg;
    784   Label null_arg;
    785   if (!out_reg.Equals(in_reg)) {
    786     LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);     // TODO: why EQ?
    787   }
    788   cmp(in_reg.AsCoreRegister(), ShifterOperand(0));
    789   it(NE);
    790   LoadFromOffset(kLoadWord, out_reg.AsCoreRegister(),
    791                  in_reg.AsCoreRegister(), 0, NE);
    792 }
    793 
    794 void ArmAssembler::VerifyObject(ManagedRegister /*src*/, bool /*could_be_null*/) {
    795   // TODO: not validating references.
    796 }
    797 
    798 void ArmAssembler::VerifyObject(FrameOffset /*src*/, bool /*could_be_null*/) {
    799   // TODO: not validating references.
    800 }
    801 
    802 void ArmAssembler::Call(ManagedRegister mbase, Offset offset,
    803                         ManagedRegister mscratch) {
    804   ArmManagedRegister base = mbase.AsArm();
    805   ArmManagedRegister scratch = mscratch.AsArm();
    806   CHECK(base.IsCoreRegister()) << base;
    807   CHECK(scratch.IsCoreRegister()) << scratch;
    808   LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
    809                  base.AsCoreRegister(), offset.Int32Value());
    810   blx(scratch.AsCoreRegister());
    811   // TODO: place reference map on call.
    812 }
    813 
    814 void ArmAssembler::Call(FrameOffset base, Offset offset,
    815                         ManagedRegister mscratch) {
    816   ArmManagedRegister scratch = mscratch.AsArm();
    817   CHECK(scratch.IsCoreRegister()) << scratch;
    818   // Call *(*(SP + base) + offset)
    819   LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
    820                  SP, base.Int32Value());
    821   LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
    822                  scratch.AsCoreRegister(), offset.Int32Value());
    823   blx(scratch.AsCoreRegister());
    824   // TODO: place reference map on call
    825 }
    826 
    827 void ArmAssembler::CallFromThread32(ThreadOffset<4> /*offset*/, ManagedRegister /*scratch*/) {
    828   UNIMPLEMENTED(FATAL);
    829 }
    830 
    831 void ArmAssembler::GetCurrentThread(ManagedRegister tr) {
    832   mov(tr.AsArm().AsCoreRegister(), ShifterOperand(TR));
    833 }
    834 
    835 void ArmAssembler::GetCurrentThread(FrameOffset offset,
    836                                     ManagedRegister /*scratch*/) {
    837   StoreToOffset(kStoreWord, TR, SP, offset.Int32Value(), AL);
    838 }
    839 
    840 void ArmAssembler::ExceptionPoll(ManagedRegister mscratch, size_t stack_adjust) {
    841   ArmManagedRegister scratch = mscratch.AsArm();
    842   ArmExceptionSlowPath* slow = new ArmExceptionSlowPath(scratch, stack_adjust);
    843   buffer_.EnqueueSlowPath(slow);
    844   LoadFromOffset(kLoadWord, scratch.AsCoreRegister(),
    845                  TR, Thread::ExceptionOffset<4>().Int32Value());
    846   cmp(scratch.AsCoreRegister(), ShifterOperand(0));
    847   b(slow->Entry(), NE);
    848 }
    849 
    850 void ArmExceptionSlowPath::Emit(Assembler* sasm) {
    851   ArmAssembler* sp_asm = down_cast<ArmAssembler*>(sasm);
    852 #define __ sp_asm->
    853   __ Bind(&entry_);
    854   if (stack_adjust_ != 0) {  // Fix up the frame.
    855     __ DecreaseFrameSize(stack_adjust_);
    856   }
    857   // Pass exception object as argument.
    858   // Don't care about preserving R0 as this call won't return.
    859   __ mov(R0, ShifterOperand(scratch_.AsCoreRegister()));
    860   // Set up call to Thread::Current()->pDeliverException.
    861   __ LoadFromOffset(kLoadWord, R12, TR, QUICK_ENTRYPOINT_OFFSET(4, pDeliverException).Int32Value());
    862   __ blx(R12);
    863   // Call never returns.
    864   __ bkpt(0);
    865 #undef __
    866 }
    867 
    868 
    869 static int LeadingZeros(uint32_t val) {
    870   uint32_t alt;
    871   int32_t n;
    872   int32_t count;
    873 
    874   count = 16;
    875   n = 32;
    876   do {
    877     alt = val >> count;
    878     if (alt != 0) {
    879       n = n - count;
    880       val = alt;
    881     }
    882     count >>= 1;
    883   } while (count);
    884   return n - val;
    885 }
    886 
    887 
    888 uint32_t ArmAssembler::ModifiedImmediate(uint32_t value) {
    889   int32_t z_leading;
    890   int32_t z_trailing;
    891   uint32_t b0 = value & 0xff;
    892 
    893   /* Note: case of value==0 must use 0:000:0:0000000 encoding */
    894   if (value <= 0xFF)
    895     return b0;  // 0:000:a:bcdefgh.
    896   if (value == ((b0 << 16) | b0))
    897     return (0x1 << 12) | b0; /* 0:001:a:bcdefgh */
    898   if (value == ((b0 << 24) | (b0 << 16) | (b0 << 8) | b0))
    899     return (0x3 << 12) | b0; /* 0:011:a:bcdefgh */
    900   b0 = (value >> 8) & 0xff;
    901   if (value == ((b0 << 24) | (b0 << 8)))
    902     return (0x2 << 12) | b0; /* 0:010:a:bcdefgh */
    903   /* Can we do it with rotation? */
    904   z_leading = LeadingZeros(value);
    905   z_trailing = 32 - LeadingZeros(~value & (value - 1));
    906   /* A run of eight or fewer active bits? */
    907   if ((z_leading + z_trailing) < 24)
    908     return kInvalidModifiedImmediate;  /* No - bail */
    909   /* left-justify the constant, discarding msb (known to be 1) */
    910   value <<= z_leading + 1;
    911   /* Create bcdefgh */
    912   value >>= 25;
    913 
    914   /* Put it all together */
    915   uint32_t v = 8 + z_leading;
    916 
    917   uint32_t i = (v & 16U /* 0b10000 */) >> 4;
    918   uint32_t imm3 = (v >> 1) & 7U /* 0b111 */;
    919   uint32_t a = v & 1;
    920   return value | i << 26 | imm3 << 12 | a << 7;
    921 }
    922 
    923 }  // namespace arm
    924 }  // namespace art
    925