Home | History | Annotate | Download | only in arm
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 /* This file contains codegen for the Thumb2 ISA. */
     18 
     19 #include "arm_lir.h"
     20 #include "codegen_arm.h"
     21 #include "dex/quick/mir_to_lir-inl.h"
     22 #include "gc/accounting/card_table.h"
     23 #include "entrypoints/quick/quick_entrypoints.h"
     24 
     25 namespace art {
     26 
     27 /*
     28  * The sparse table in the literal pool is an array of <key,displacement>
     29  * pairs.  For each set, we'll load them as a pair using ldmia.
     30  * This means that the register number of the temp we use for the key
     31  * must be lower than the reg for the displacement.
     32  *
     33  * The test loop will look something like:
     34  *
     35  *   adr   r_base, <table>
     36  *   ldr   r_val, [rARM_SP, v_reg_off]
     37  *   mov   r_idx, #table_size
     38  * lp:
     39  *   ldmia r_base!, {r_key, r_disp}
     40  *   sub   r_idx, #1
     41  *   cmp   r_val, r_key
     42  *   ifeq
     43  *   add   rARM_PC, r_disp   ; This is the branch from which we compute displacement
     44  *   cbnz  r_idx, lp
     45  */
     46 void ArmMir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {
     47   const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
     48   if (cu_->verbose) {
     49     DumpSparseSwitchTable(table);
     50   }
     51   // Add the table to the list - we'll process it later
     52   SwitchTable *tab_rec =
     53       static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), kArenaAllocData));
     54   tab_rec->table = table;
     55   tab_rec->vaddr = current_dalvik_offset_;
     56   uint32_t size = table[1];
     57   tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
     58   switch_tables_.Insert(tab_rec);
     59 
     60   // Get the switch value
     61   rl_src = LoadValue(rl_src, kCoreReg);
     62   RegStorage r_base = AllocTemp();
     63   /* Allocate key and disp temps */
     64   RegStorage r_key = AllocTemp();
     65   RegStorage r_disp = AllocTemp();
     66   // Make sure r_key's register number is less than r_disp's number for ldmia
     67   if (r_key.GetReg() > r_disp.GetReg()) {
     68     RegStorage tmp = r_disp;
     69     r_disp = r_key;
     70     r_key = tmp;
     71   }
     72   // Materialize a pointer to the switch table
     73   NewLIR3(kThumb2Adr, r_base.GetReg(), 0, WrapPointer(tab_rec));
     74   // Set up r_idx
     75   RegStorage r_idx = AllocTemp();
     76   LoadConstant(r_idx, size);
     77   // Establish loop branch target
     78   LIR* target = NewLIR0(kPseudoTargetLabel);
     79   // Load next key/disp
     80   NewLIR2(kThumb2LdmiaWB, r_base.GetReg(), (1 << r_key.GetRegNum()) | (1 << r_disp.GetRegNum()));
     81   OpRegReg(kOpCmp, r_key, rl_src.reg);
     82   // Go if match. NOTE: No instruction set switch here - must stay Thumb2
     83   LIR* it = OpIT(kCondEq, "");
     84   LIR* switch_branch = NewLIR1(kThumb2AddPCR, r_disp.GetReg());
     85   OpEndIT(it);
     86   tab_rec->anchor = switch_branch;
     87   // Needs to use setflags encoding here
     88   OpRegRegImm(kOpSub, r_idx, r_idx, 1);  // For value == 1, this should set flags.
     89   DCHECK(last_lir_insn_->u.m.def_mask->HasBit(ResourceMask::kCCode));
     90   OpCondBranch(kCondNe, target);
     91 }
     92 
     93 
     94 void ArmMir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {
     95   const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
     96   if (cu_->verbose) {
     97     DumpPackedSwitchTable(table);
     98   }
     99   // Add the table to the list - we'll process it later
    100   SwitchTable *tab_rec =
    101       static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable),  kArenaAllocData));
    102   tab_rec->table = table;
    103   tab_rec->vaddr = current_dalvik_offset_;
    104   uint32_t size = table[1];
    105   tab_rec->targets =
    106       static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*), kArenaAllocLIR));
    107   switch_tables_.Insert(tab_rec);
    108 
    109   // Get the switch value
    110   rl_src = LoadValue(rl_src, kCoreReg);
    111   RegStorage table_base = AllocTemp();
    112   // Materialize a pointer to the switch table
    113   NewLIR3(kThumb2Adr, table_base.GetReg(), 0, WrapPointer(tab_rec));
    114   int low_key = s4FromSwitchData(&table[2]);
    115   RegStorage keyReg;
    116   // Remove the bias, if necessary
    117   if (low_key == 0) {
    118     keyReg = rl_src.reg;
    119   } else {
    120     keyReg = AllocTemp();
    121     OpRegRegImm(kOpSub, keyReg, rl_src.reg, low_key);
    122   }
    123   // Bounds check - if < 0 or >= size continue following switch
    124   OpRegImm(kOpCmp, keyReg, size-1);
    125   LIR* branch_over = OpCondBranch(kCondHi, NULL);
    126 
    127   // Load the displacement from the switch table
    128   RegStorage disp_reg = AllocTemp();
    129   LoadBaseIndexed(table_base, keyReg, disp_reg, 2, k32);
    130 
    131   // ..and go! NOTE: No instruction set switch here - must stay Thumb2
    132   LIR* switch_branch = NewLIR1(kThumb2AddPCR, disp_reg.GetReg());
    133   tab_rec->anchor = switch_branch;
    134 
    135   /* branch_over target here */
    136   LIR* target = NewLIR0(kPseudoTargetLabel);
    137   branch_over->target = target;
    138 }
    139 
    140 /*
    141  * Array data table format:
    142  *  ushort ident = 0x0300   magic value
    143  *  ushort width            width of each element in the table
    144  *  uint   size             number of elements in the table
    145  *  ubyte  data[size*width] table of data values (may contain a single-byte
    146  *                          padding at the end)
    147  *
    148  * Total size is 4+(width * size + 1)/2 16-bit code units.
    149  */
    150 void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) {
    151   const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
    152   // Add the table to the list - we'll process it later
    153   FillArrayData *tab_rec =
    154       static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), kArenaAllocData));
    155   tab_rec->table = table;
    156   tab_rec->vaddr = current_dalvik_offset_;
    157   uint16_t width = tab_rec->table[1];
    158   uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
    159   tab_rec->size = (size * width) + 8;
    160 
    161   fill_array_data_.Insert(tab_rec);
    162 
    163   // Making a call - use explicit registers
    164   FlushAllRegs();   /* Everything to home location */
    165   LoadValueDirectFixed(rl_src, rs_r0);
    166   LoadWordDisp(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pHandleFillArrayData).Int32Value(),
    167                rs_rARM_LR);
    168   // Materialize a pointer to the fill data image
    169   NewLIR3(kThumb2Adr, rs_r1.GetReg(), 0, WrapPointer(tab_rec));
    170   ClobberCallerSave();
    171   LIR* call_inst = OpReg(kOpBlx, rs_rARM_LR);
    172   MarkSafepointPC(call_inst);
    173 }
    174 
    175 /*
    176  * Handle unlocked -> thin locked transition inline or else call out to quick entrypoint. For more
    177  * details see monitor.cc.
    178  */
    179 void ArmMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
    180   FlushAllRegs();
    181   // FIXME: need separate LoadValues for object references.
    182   LoadValueDirectFixed(rl_src, rs_r0);  // Get obj
    183   LockCallTemps();  // Prepare for explicit register usage
    184   constexpr bool kArchVariantHasGoodBranchPredictor = false;  // TODO: true if cortex-A15.
    185   if (kArchVariantHasGoodBranchPredictor) {
    186     LIR* null_check_branch = nullptr;
    187     if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) {
    188       null_check_branch = nullptr;  // No null check.
    189     } else {
    190       // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
    191       if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
    192         null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
    193       }
    194     }
    195     Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
    196     NewLIR3(kThumb2Ldrex, rs_r1.GetReg(), rs_r0.GetReg(),
    197         mirror::Object::MonitorOffset().Int32Value() >> 2);
    198     MarkPossibleNullPointerException(opt_flags);
    199     LIR* not_unlocked_branch = OpCmpImmBranch(kCondNe, rs_r1, 0, NULL);
    200     NewLIR4(kThumb2Strex, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(),
    201         mirror::Object::MonitorOffset().Int32Value() >> 2);
    202     LIR* lock_success_branch = OpCmpImmBranch(kCondEq, rs_r1, 0, NULL);
    203 
    204 
    205     LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
    206     not_unlocked_branch->target = slow_path_target;
    207     if (null_check_branch != nullptr) {
    208       null_check_branch->target = slow_path_target;
    209     }
    210     // TODO: move to a slow path.
    211     // Go expensive route - artLockObjectFromCode(obj);
    212     LoadWordDisp(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pLockObject).Int32Value(), rs_rARM_LR);
    213     ClobberCallerSave();
    214     LIR* call_inst = OpReg(kOpBlx, rs_rARM_LR);
    215     MarkSafepointPC(call_inst);
    216 
    217     LIR* success_target = NewLIR0(kPseudoTargetLabel);
    218     lock_success_branch->target = success_target;
    219     GenMemBarrier(kLoadAny);
    220   } else {
    221     // Explicit null-check as slow-path is entered using an IT.
    222     GenNullCheck(rs_r0, opt_flags);
    223     Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
    224     NewLIR3(kThumb2Ldrex, rs_r1.GetReg(), rs_r0.GetReg(),
    225         mirror::Object::MonitorOffset().Int32Value() >> 2);
    226     MarkPossibleNullPointerException(opt_flags);
    227     OpRegImm(kOpCmp, rs_r1, 0);
    228     LIR* it = OpIT(kCondEq, "");
    229     NewLIR4(kThumb2Strex/*eq*/, rs_r1.GetReg(), rs_r2.GetReg(), rs_r0.GetReg(),
    230         mirror::Object::MonitorOffset().Int32Value() >> 2);
    231     OpEndIT(it);
    232     OpRegImm(kOpCmp, rs_r1, 0);
    233     it = OpIT(kCondNe, "T");
    234     // Go expensive route - artLockObjectFromCode(self, obj);
    235     LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pLockObject).Int32Value(),
    236                        rs_rARM_LR);
    237     ClobberCallerSave();
    238     LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rARM_LR);
    239     OpEndIT(it);
    240     MarkSafepointPC(call_inst);
    241     GenMemBarrier(kLoadAny);
    242   }
    243 }
    244 
    245 /*
    246  * Handle thin locked -> unlocked transition inline or else call out to quick entrypoint. For more
    247  * details see monitor.cc. Note the code below doesn't use ldrex/strex as the code holds the lock
    248  * and can only give away ownership if its suspended.
    249  */
    250 void ArmMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
    251   FlushAllRegs();
    252   LoadValueDirectFixed(rl_src, rs_r0);  // Get obj
    253   LockCallTemps();  // Prepare for explicit register usage
    254   LIR* null_check_branch = nullptr;
    255   Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
    256   constexpr bool kArchVariantHasGoodBranchPredictor = false;  // TODO: true if cortex-A15.
    257   if (kArchVariantHasGoodBranchPredictor) {
    258     if ((opt_flags & MIR_IGNORE_NULL_CHECK) && !(cu_->disable_opt & (1 << kNullCheckElimination))) {
    259       null_check_branch = nullptr;  // No null check.
    260     } else {
    261       // If the null-check fails its handled by the slow-path to reduce exception related meta-data.
    262       if (!cu_->compiler_driver->GetCompilerOptions().GetImplicitNullChecks()) {
    263         null_check_branch = OpCmpImmBranch(kCondEq, rs_r0, 0, NULL);
    264       }
    265     }
    266     Load32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);
    267     MarkPossibleNullPointerException(opt_flags);
    268     LoadConstantNoClobber(rs_r3, 0);
    269     LIR* slow_unlock_branch = OpCmpBranch(kCondNe, rs_r1, rs_r2, NULL);
    270     GenMemBarrier(kAnyStore);
    271     Store32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3);
    272     LIR* unlock_success_branch = OpUnconditionalBranch(NULL);
    273 
    274     LIR* slow_path_target = NewLIR0(kPseudoTargetLabel);
    275     slow_unlock_branch->target = slow_path_target;
    276     if (null_check_branch != nullptr) {
    277       null_check_branch->target = slow_path_target;
    278     }
    279     // TODO: move to a slow path.
    280     // Go expensive route - artUnlockObjectFromCode(obj);
    281     LoadWordDisp(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject).Int32Value(), rs_rARM_LR);
    282     ClobberCallerSave();
    283     LIR* call_inst = OpReg(kOpBlx, rs_rARM_LR);
    284     MarkSafepointPC(call_inst);
    285 
    286     LIR* success_target = NewLIR0(kPseudoTargetLabel);
    287     unlock_success_branch->target = success_target;
    288   } else {
    289     // Explicit null-check as slow-path is entered using an IT.
    290     GenNullCheck(rs_r0, opt_flags);
    291     Load32Disp(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r1);  // Get lock
    292     MarkPossibleNullPointerException(opt_flags);
    293     Load32Disp(rs_rARM_SELF, Thread::ThinLockIdOffset<4>().Int32Value(), rs_r2);
    294     LoadConstantNoClobber(rs_r3, 0);
    295     // Is lock unheld on lock or held by us (==thread_id) on unlock?
    296     OpRegReg(kOpCmp, rs_r1, rs_r2);
    297 
    298     LIR* it = OpIT(kCondEq, "EE");
    299     if (GenMemBarrier(kAnyStore)) {
    300       UpdateIT(it, "TEE");
    301     }
    302     Store32Disp/*eq*/(rs_r0, mirror::Object::MonitorOffset().Int32Value(), rs_r3);
    303     // Go expensive route - UnlockObjectFromCode(obj);
    304     LoadWordDisp/*ne*/(rs_rARM_SELF, QUICK_ENTRYPOINT_OFFSET(4, pUnlockObject).Int32Value(),
    305                        rs_rARM_LR);
    306     ClobberCallerSave();
    307     LIR* call_inst = OpReg(kOpBlx/*ne*/, rs_rARM_LR);
    308     OpEndIT(it);
    309     MarkSafepointPC(call_inst);
    310   }
    311 }
    312 
    313 void ArmMir2Lir::GenMoveException(RegLocation rl_dest) {
    314   int ex_offset = Thread::ExceptionOffset<4>().Int32Value();
    315   RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true);
    316   RegStorage reset_reg = AllocTempRef();
    317   LoadRefDisp(rs_rARM_SELF, ex_offset, rl_result.reg, kNotVolatile);
    318   LoadConstant(reset_reg, 0);
    319   StoreRefDisp(rs_rARM_SELF, ex_offset, reset_reg, kNotVolatile);
    320   FreeTemp(reset_reg);
    321   StoreValue(rl_dest, rl_result);
    322 }
    323 
    324 /*
    325  * Mark garbage collection card. Skip if the value we're storing is null.
    326  */
    327 void ArmMir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) {
    328   RegStorage reg_card_base = AllocTemp();
    329   RegStorage reg_card_no = AllocTemp();
    330   LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
    331   LoadWordDisp(rs_rARM_SELF, Thread::CardTableOffset<4>().Int32Value(), reg_card_base);
    332   OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
    333   StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte);
    334   LIR* target = NewLIR0(kPseudoTargetLabel);
    335   branch_over->target = target;
    336   FreeTemp(reg_card_base);
    337   FreeTemp(reg_card_no);
    338 }
    339 
    340 void ArmMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
    341   int spill_count = num_core_spills_ + num_fp_spills_;
    342   /*
    343    * On entry, r0, r1, r2 & r3 are live.  Let the register allocation
    344    * mechanism know so it doesn't try to use any of them when
    345    * expanding the frame or flushing.  This leaves the utility
    346    * code with a single temp: r12.  This should be enough.
    347    */
    348   LockTemp(rs_r0);
    349   LockTemp(rs_r1);
    350   LockTemp(rs_r2);
    351   LockTemp(rs_r3);
    352 
    353   /*
    354    * We can safely skip the stack overflow check if we're
    355    * a leaf *and* our frame size < fudge factor.
    356    */
    357   bool skip_overflow_check = mir_graph_->MethodIsLeaf() && !FrameNeedsStackCheck(frame_size_, kArm);
    358   NewLIR0(kPseudoMethodEntry);
    359   const size_t kStackOverflowReservedUsableBytes = GetStackOverflowReservedBytes(kArm);
    360   bool large_frame = (static_cast<size_t>(frame_size_) > kStackOverflowReservedUsableBytes);
    361   bool generate_explicit_stack_overflow_check = large_frame ||
    362     !cu_->compiler_driver->GetCompilerOptions().GetImplicitStackOverflowChecks();
    363   if (!skip_overflow_check) {
    364     if (generate_explicit_stack_overflow_check) {
    365       if (!large_frame) {
    366         /* Load stack limit */
    367         LockTemp(rs_r12);
    368         Load32Disp(rs_rARM_SELF, Thread::StackEndOffset<4>().Int32Value(), rs_r12);
    369       }
    370     } else {
    371       // Implicit stack overflow check.
    372       // Generate a load from [sp, #-overflowsize].  If this is in the stack
    373       // redzone we will get a segmentation fault.
    374       //
    375       // Caveat coder: if someone changes the kStackOverflowReservedBytes value
    376       // we need to make sure that it's loadable in an immediate field of
    377       // a sub instruction.  Otherwise we will get a temp allocation and the
    378       // code size will increase.
    379       //
    380       // This is done before the callee save instructions to avoid any possibility
    381       // of these overflowing.  This uses r12 and that's never saved in a callee
    382       // save.
    383       OpRegRegImm(kOpSub, rs_r12, rs_rARM_SP, GetStackOverflowReservedBytes(kArm));
    384       Load32Disp(rs_r12, 0, rs_r12);
    385       MarkPossibleStackOverflowException();
    386     }
    387   }
    388   /* Spill core callee saves */
    389   NewLIR1(kThumb2Push, core_spill_mask_);
    390   /* Need to spill any FP regs? */
    391   if (num_fp_spills_) {
    392     /*
    393      * NOTE: fp spills are a little different from core spills in that
    394      * they are pushed as a contiguous block.  When promoting from
    395      * the fp set, we must allocate all singles from s16..highest-promoted
    396      */
    397     NewLIR1(kThumb2VPushCS, num_fp_spills_);
    398   }
    399 
    400   const int spill_size = spill_count * 4;
    401   const int frame_size_without_spills = frame_size_ - spill_size;
    402   if (!skip_overflow_check) {
    403     if (generate_explicit_stack_overflow_check) {
    404       class StackOverflowSlowPath : public LIRSlowPath {
    405        public:
    406         StackOverflowSlowPath(Mir2Lir* m2l, LIR* branch, bool restore_lr, size_t sp_displace)
    407             : LIRSlowPath(m2l, m2l->GetCurrentDexPc(), branch, nullptr), restore_lr_(restore_lr),
    408               sp_displace_(sp_displace) {
    409         }
    410         void Compile() OVERRIDE {
    411           m2l_->ResetRegPool();
    412           m2l_->ResetDefTracking();
    413           GenerateTargetLabel(kPseudoThrowTarget);
    414           if (restore_lr_) {
    415             m2l_->LoadWordDisp(rs_rARM_SP, sp_displace_ - 4, rs_rARM_LR);
    416           }
    417           m2l_->OpRegImm(kOpAdd, rs_rARM_SP, sp_displace_);
    418           m2l_->ClobberCallerSave();
    419           ThreadOffset<4> func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowStackOverflow);
    420           // Load the entrypoint directly into the pc instead of doing a load + branch. Assumes
    421           // codegen and target are in thumb2 mode.
    422           // NOTE: native pointer.
    423           m2l_->LoadWordDisp(rs_rARM_SELF, func_offset.Int32Value(), rs_rARM_PC);
    424         }
    425 
    426        private:
    427         const bool restore_lr_;
    428         const size_t sp_displace_;
    429       };
    430       if (large_frame) {
    431         // Note: may need a temp reg, and we only have r12 free at this point.
    432         OpRegRegImm(kOpSub, rs_rARM_LR, rs_rARM_SP, frame_size_without_spills);
    433         Load32Disp(rs_rARM_SELF, Thread::StackEndOffset<4>().Int32Value(), rs_r12);
    434         LIR* branch = OpCmpBranch(kCondUlt, rs_rARM_LR, rs_r12, nullptr);
    435         // Need to restore LR since we used it as a temp.
    436         AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, true, spill_size));
    437         OpRegCopy(rs_rARM_SP, rs_rARM_LR);     // Establish stack
    438       } else {
    439         /*
    440          * If the frame is small enough we are guaranteed to have enough space that remains to
    441          * handle signals on the user stack.  However, we may not have any free temp
    442          * registers at this point, so we'll temporarily add LR to the temp pool.
    443          */
    444         DCHECK(!GetRegInfo(rs_rARM_LR)->IsTemp());
    445         MarkTemp(rs_rARM_LR);
    446         FreeTemp(rs_rARM_LR);
    447         OpRegRegImm(kOpSub, rs_rARM_SP, rs_rARM_SP, frame_size_without_spills);
    448         Clobber(rs_rARM_LR);
    449         UnmarkTemp(rs_rARM_LR);
    450         LIR* branch = OpCmpBranch(kCondUlt, rs_rARM_SP, rs_r12, nullptr);
    451         AddSlowPath(new(arena_)StackOverflowSlowPath(this, branch, false, frame_size_));
    452       }
    453     } else {
    454       // Implicit stack overflow check has already been done.  Just make room on the
    455       // stack for the frame now.
    456       OpRegImm(kOpSub, rs_rARM_SP, frame_size_without_spills);
    457     }
    458   } else {
    459     OpRegImm(kOpSub, rs_rARM_SP, frame_size_without_spills);
    460   }
    461 
    462   FlushIns(ArgLocs, rl_method);
    463 
    464   FreeTemp(rs_r0);
    465   FreeTemp(rs_r1);
    466   FreeTemp(rs_r2);
    467   FreeTemp(rs_r3);
    468   FreeTemp(rs_r12);
    469 }
    470 
    471 void ArmMir2Lir::GenExitSequence() {
    472   int spill_count = num_core_spills_ + num_fp_spills_;
    473   /*
    474    * In the exit path, r0/r1 are live - make sure they aren't
    475    * allocated by the register utilities as temps.
    476    */
    477   LockTemp(rs_r0);
    478   LockTemp(rs_r1);
    479 
    480   NewLIR0(kPseudoMethodExit);
    481   OpRegImm(kOpAdd, rs_rARM_SP, frame_size_ - (spill_count * 4));
    482   /* Need to restore any FP callee saves? */
    483   if (num_fp_spills_) {
    484     NewLIR1(kThumb2VPopCS, num_fp_spills_);
    485   }
    486   if (core_spill_mask_ & (1 << rs_rARM_LR.GetRegNum())) {
    487     /* Unspill rARM_LR to rARM_PC */
    488     core_spill_mask_ &= ~(1 << rs_rARM_LR.GetRegNum());
    489     core_spill_mask_ |= (1 << rs_rARM_PC.GetRegNum());
    490   }
    491   NewLIR1(kThumb2Pop, core_spill_mask_);
    492   if (!(core_spill_mask_ & (1 << rs_rARM_PC.GetRegNum()))) {
    493     /* We didn't pop to rARM_PC, so must do a bv rARM_LR */
    494     NewLIR1(kThumbBx, rs_rARM_LR.GetReg());
    495   }
    496 }
    497 
    498 void ArmMir2Lir::GenSpecialExitSequence() {
    499   NewLIR1(kThumbBx, rs_rARM_LR.GetReg());
    500 }
    501 
    502 }  // namespace art
    503