Home | History | Annotate | Download | only in mips
      1 /*
      2  * Copyright (C) 2012 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 /* This file contains codegen for the Mips ISA */
     18 
     19 #include "codegen_mips.h"
     20 #include "dex/quick/mir_to_lir-inl.h"
     21 #include "entrypoints/quick/quick_entrypoints.h"
     22 #include "mips_lir.h"
     23 
     24 namespace art {
     25 
     26 void MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
     27                                  SpecialCaseHandler special_case) {
     28     // TODO
     29 }
     30 
     31 /*
     32  * The lack of pc-relative loads on Mips presents somewhat of a challenge
     33  * for our PIC switch table strategy.  To materialize the current location
     34  * we'll do a dummy JAL and reference our tables using r_RA as the
     35  * base register.  Note that r_RA will be used both as the base to
     36  * locate the switch table data and as the reference base for the switch
     37  * target offsets stored in the table.  We'll use a special pseudo-instruction
     38  * to represent the jal and trigger the construction of the
     39  * switch table offsets (which will happen after final assembly and all
     40  * labels are fixed).
     41  *
     42  * The test loop will look something like:
     43  *
     44  *   ori   rEnd, r_ZERO, #table_size  ; size in bytes
     45  *   jal   BaseLabel         ; stores "return address" (BaseLabel) in r_RA
     46  *   nop                     ; opportunistically fill
     47  * BaseLabel:
     48  *   addiu rBase, r_RA, <table> - <BaseLabel>  ; table relative to BaseLabel
     49      addu  rEnd, rEnd, rBase                   ; end of table
     50  *   lw    r_val, [rSP, v_reg_off]                ; Test Value
     51  * loop:
     52  *   beq   rBase, rEnd, done
     53  *   lw    r_key, 0(rBase)
     54  *   addu  rBase, 8
     55  *   bne   r_val, r_key, loop
     56  *   lw    r_disp, -4(rBase)
     57  *   addu  r_RA, r_disp
     58  *   jr    r_RA
     59  * done:
     60  *
     61  */
     62 void MipsMir2Lir::GenSparseSwitch(MIR* mir, uint32_t table_offset,
     63                                   RegLocation rl_src) {
     64   const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
     65   if (cu_->verbose) {
     66     DumpSparseSwitchTable(table);
     67   }
     68   // Add the table to the list - we'll process it later
     69   SwitchTable *tab_rec =
     70       static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
     71   tab_rec->table = table;
     72   tab_rec->vaddr = current_dalvik_offset_;
     73   int elements = table[1];
     74   tab_rec->targets =
     75       static_cast<LIR**>(arena_->Alloc(elements * sizeof(LIR*), ArenaAllocator::kAllocLIR));
     76   switch_tables_.Insert(tab_rec);
     77 
     78   // The table is composed of 8-byte key/disp pairs
     79   int byte_size = elements * 8;
     80 
     81   int size_hi = byte_size >> 16;
     82   int size_lo = byte_size & 0xffff;
     83 
     84   int rEnd = AllocTemp();
     85   if (size_hi) {
     86     NewLIR2(kMipsLui, rEnd, size_hi);
     87   }
     88   // Must prevent code motion for the curr pc pair
     89   GenBarrier();  // Scheduling barrier
     90   NewLIR0(kMipsCurrPC);  // Really a jal to .+8
     91   // Now, fill the branch delay slot
     92   if (size_hi) {
     93     NewLIR3(kMipsOri, rEnd, rEnd, size_lo);
     94   } else {
     95     NewLIR3(kMipsOri, rEnd, r_ZERO, size_lo);
     96   }
     97   GenBarrier();  // Scheduling barrier
     98 
     99   // Construct BaseLabel and set up table base register
    100   LIR* base_label = NewLIR0(kPseudoTargetLabel);
    101   // Remember base label so offsets can be computed later
    102   tab_rec->anchor = base_label;
    103   int rBase = AllocTemp();
    104   NewLIR4(kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
    105           reinterpret_cast<uintptr_t>(tab_rec));
    106   OpRegRegReg(kOpAdd, rEnd, rEnd, rBase);
    107 
    108   // Grab switch test value
    109   rl_src = LoadValue(rl_src, kCoreReg);
    110 
    111   // Test loop
    112   int r_key = AllocTemp();
    113   LIR* loop_label = NewLIR0(kPseudoTargetLabel);
    114   LIR* exit_branch = OpCmpBranch(kCondEq, rBase, rEnd, NULL);
    115   LoadWordDisp(rBase, 0, r_key);
    116   OpRegImm(kOpAdd, rBase, 8);
    117   OpCmpBranch(kCondNe, rl_src.low_reg, r_key, loop_label);
    118   int r_disp = AllocTemp();
    119   LoadWordDisp(rBase, -4, r_disp);
    120   OpRegRegReg(kOpAdd, r_RA, r_RA, r_disp);
    121   OpReg(kOpBx, r_RA);
    122 
    123   // Loop exit
    124   LIR* exit_label = NewLIR0(kPseudoTargetLabel);
    125   exit_branch->target = exit_label;
    126 }
    127 
    128 /*
    129  * Code pattern will look something like:
    130  *
    131  *   lw    r_val
    132  *   jal   BaseLabel         ; stores "return address" (BaseLabel) in r_RA
    133  *   nop                     ; opportunistically fill
    134  *   [subiu r_val, bias]      ; Remove bias if low_val != 0
    135  *   bound check -> done
    136  *   lw    r_disp, [r_RA, r_val]
    137  *   addu  r_RA, r_disp
    138  *   jr    r_RA
    139  * done:
    140  */
    141 void MipsMir2Lir::GenPackedSwitch(MIR* mir, uint32_t table_offset,
    142                                   RegLocation rl_src) {
    143   const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
    144   if (cu_->verbose) {
    145     DumpPackedSwitchTable(table);
    146   }
    147   // Add the table to the list - we'll process it later
    148   SwitchTable *tab_rec =
    149       static_cast<SwitchTable*>(arena_->Alloc(sizeof(SwitchTable), ArenaAllocator::kAllocData));
    150   tab_rec->table = table;
    151   tab_rec->vaddr = current_dalvik_offset_;
    152   int size = table[1];
    153   tab_rec->targets = static_cast<LIR**>(arena_->Alloc(size * sizeof(LIR*),
    154                                                        ArenaAllocator::kAllocLIR));
    155   switch_tables_.Insert(tab_rec);
    156 
    157   // Get the switch value
    158   rl_src = LoadValue(rl_src, kCoreReg);
    159 
    160   // Prepare the bias.  If too big, handle 1st stage here
    161   int low_key = s4FromSwitchData(&table[2]);
    162   bool large_bias = false;
    163   int r_key;
    164   if (low_key == 0) {
    165     r_key = rl_src.low_reg;
    166   } else if ((low_key & 0xffff) != low_key) {
    167     r_key = AllocTemp();
    168     LoadConstant(r_key, low_key);
    169     large_bias = true;
    170   } else {
    171     r_key = AllocTemp();
    172   }
    173 
    174   // Must prevent code motion for the curr pc pair
    175   GenBarrier();
    176   NewLIR0(kMipsCurrPC);  // Really a jal to .+8
    177   // Now, fill the branch delay slot with bias strip
    178   if (low_key == 0) {
    179     NewLIR0(kMipsNop);
    180   } else {
    181     if (large_bias) {
    182       OpRegRegReg(kOpSub, r_key, rl_src.low_reg, r_key);
    183     } else {
    184       OpRegRegImm(kOpSub, r_key, rl_src.low_reg, low_key);
    185     }
    186   }
    187   GenBarrier();  // Scheduling barrier
    188 
    189   // Construct BaseLabel and set up table base register
    190   LIR* base_label = NewLIR0(kPseudoTargetLabel);
    191   // Remember base label so offsets can be computed later
    192   tab_rec->anchor = base_label;
    193 
    194   // Bounds check - if < 0 or >= size continue following switch
    195   LIR* branch_over = OpCmpImmBranch(kCondHi, r_key, size-1, NULL);
    196 
    197   // Materialize the table base pointer
    198   int rBase = AllocTemp();
    199   NewLIR4(kMipsDelta, rBase, 0, reinterpret_cast<uintptr_t>(base_label),
    200           reinterpret_cast<uintptr_t>(tab_rec));
    201 
    202   // Load the displacement from the switch table
    203   int r_disp = AllocTemp();
    204   LoadBaseIndexed(rBase, r_key, r_disp, 2, kWord);
    205 
    206   // Add to r_AP and go
    207   OpRegRegReg(kOpAdd, r_RA, r_RA, r_disp);
    208   OpReg(kOpBx, r_RA);
    209 
    210   /* branch_over target here */
    211   LIR* target = NewLIR0(kPseudoTargetLabel);
    212   branch_over->target = target;
    213 }
    214 
    215 /*
    216  * Array data table format:
    217  *  ushort ident = 0x0300   magic value
    218  *  ushort width            width of each element in the table
    219  *  uint   size             number of elements in the table
    220  *  ubyte  data[size*width] table of data values (may contain a single-byte
    221  *                          padding at the end)
    222  *
    223  * Total size is 4+(width * size + 1)/2 16-bit code units.
    224  */
    225 void MipsMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) {
    226   const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
    227   // Add the table to the list - we'll process it later
    228   FillArrayData *tab_rec =
    229       reinterpret_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData),
    230                                                      ArenaAllocator::kAllocData));
    231   tab_rec->table = table;
    232   tab_rec->vaddr = current_dalvik_offset_;
    233   uint16_t width = tab_rec->table[1];
    234   uint32_t size = tab_rec->table[2] | ((static_cast<uint32_t>(tab_rec->table[3])) << 16);
    235   tab_rec->size = (size * width) + 8;
    236 
    237   fill_array_data_.Insert(tab_rec);
    238 
    239   // Making a call - use explicit registers
    240   FlushAllRegs();   /* Everything to home location */
    241   LockCallTemps();
    242   LoadValueDirectFixed(rl_src, rMIPS_ARG0);
    243 
    244   // Must prevent code motion for the curr pc pair
    245   GenBarrier();
    246   NewLIR0(kMipsCurrPC);  // Really a jal to .+8
    247   // Now, fill the branch delay slot with the helper load
    248   int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pHandleFillArrayData));
    249   GenBarrier();  // Scheduling barrier
    250 
    251   // Construct BaseLabel and set up table base register
    252   LIR* base_label = NewLIR0(kPseudoTargetLabel);
    253 
    254   // Materialize a pointer to the fill data image
    255   NewLIR4(kMipsDelta, rMIPS_ARG1, 0, reinterpret_cast<uintptr_t>(base_label),
    256           reinterpret_cast<uintptr_t>(tab_rec));
    257 
    258   // And go...
    259   ClobberCalleeSave();
    260   LIR* call_inst = OpReg(kOpBlx, r_tgt);  // ( array*, fill_data* )
    261   MarkSafepointPC(call_inst);
    262 }
    263 
    264 /*
    265  * TODO: implement fast path to short-circuit thin-lock case
    266  */
    267 void MipsMir2Lir::GenMonitorEnter(int opt_flags, RegLocation rl_src) {
    268   FlushAllRegs();
    269   LoadValueDirectFixed(rl_src, rMIPS_ARG0);  // Get obj
    270   LockCallTemps();  // Prepare for explicit register usage
    271   GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
    272   // Go expensive route - artLockObjectFromCode(self, obj);
    273   int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pLockObject));
    274   ClobberCalleeSave();
    275   LIR* call_inst = OpReg(kOpBlx, r_tgt);
    276   MarkSafepointPC(call_inst);
    277 }
    278 
    279 /*
    280  * TODO: implement fast path to short-circuit thin-lock case
    281  */
    282 void MipsMir2Lir::GenMonitorExit(int opt_flags, RegLocation rl_src) {
    283   FlushAllRegs();
    284   LoadValueDirectFixed(rl_src, rMIPS_ARG0);  // Get obj
    285   LockCallTemps();  // Prepare for explicit register usage
    286   GenNullCheck(rl_src.s_reg_low, rMIPS_ARG0, opt_flags);
    287   // Go expensive route - UnlockObjectFromCode(obj);
    288   int r_tgt = LoadHelper(QUICK_ENTRYPOINT_OFFSET(pUnlockObject));
    289   ClobberCalleeSave();
    290   LIR* call_inst = OpReg(kOpBlx, r_tgt);
    291   MarkSafepointPC(call_inst);
    292 }
    293 
    294 void MipsMir2Lir::GenMoveException(RegLocation rl_dest) {
    295   int ex_offset = Thread::ExceptionOffset().Int32Value();
    296   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
    297   int reset_reg = AllocTemp();
    298   LoadWordDisp(rMIPS_SELF, ex_offset, rl_result.low_reg);
    299   LoadConstant(reset_reg, 0);
    300   StoreWordDisp(rMIPS_SELF, ex_offset, reset_reg);
    301   FreeTemp(reset_reg);
    302   StoreValue(rl_dest, rl_result);
    303 }
    304 
    305 /*
    306  * Mark garbage collection card. Skip if the value we're storing is null.
    307  */
    308 void MipsMir2Lir::MarkGCCard(int val_reg, int tgt_addr_reg) {
    309   int reg_card_base = AllocTemp();
    310   int reg_card_no = AllocTemp();
    311   LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL);
    312   LoadWordDisp(rMIPS_SELF, Thread::CardTableOffset().Int32Value(), reg_card_base);
    313   OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift);
    314   StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0,
    315                    kUnsignedByte);
    316   LIR* target = NewLIR0(kPseudoTargetLabel);
    317   branch_over->target = target;
    318   FreeTemp(reg_card_base);
    319   FreeTemp(reg_card_no);
    320 }
    321 void MipsMir2Lir::GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) {
    322   int spill_count = num_core_spills_ + num_fp_spills_;
    323   /*
    324    * On entry, rMIPS_ARG0, rMIPS_ARG1, rMIPS_ARG2 & rMIPS_ARG3 are live.  Let the register
    325    * allocation mechanism know so it doesn't try to use any of them when
    326    * expanding the frame or flushing.  This leaves the utility
    327    * code with a single temp: r12.  This should be enough.
    328    */
    329   LockTemp(rMIPS_ARG0);
    330   LockTemp(rMIPS_ARG1);
    331   LockTemp(rMIPS_ARG2);
    332   LockTemp(rMIPS_ARG3);
    333 
    334   /*
    335    * We can safely skip the stack overflow check if we're
    336    * a leaf *and* our frame size < fudge factor.
    337    */
    338   bool skip_overflow_check = (mir_graph_->MethodIsLeaf() &&
    339       (static_cast<size_t>(frame_size_) < Thread::kStackOverflowReservedBytes));
    340   NewLIR0(kPseudoMethodEntry);
    341   int check_reg = AllocTemp();
    342   int new_sp = AllocTemp();
    343   if (!skip_overflow_check) {
    344     /* Load stack limit */
    345     LoadWordDisp(rMIPS_SELF, Thread::StackEndOffset().Int32Value(), check_reg);
    346   }
    347   /* Spill core callee saves */
    348   SpillCoreRegs();
    349   /* NOTE: promotion of FP regs currently unsupported, thus no FP spill */
    350   DCHECK_EQ(num_fp_spills_, 0);
    351   if (!skip_overflow_check) {
    352     OpRegRegImm(kOpSub, new_sp, rMIPS_SP, frame_size_ - (spill_count * 4));
    353     GenRegRegCheck(kCondCc, new_sp, check_reg, kThrowStackOverflow);
    354     OpRegCopy(rMIPS_SP, new_sp);     // Establish stack
    355   } else {
    356     OpRegImm(kOpSub, rMIPS_SP, frame_size_ - (spill_count * 4));
    357   }
    358 
    359   FlushIns(ArgLocs, rl_method);
    360 
    361   FreeTemp(rMIPS_ARG0);
    362   FreeTemp(rMIPS_ARG1);
    363   FreeTemp(rMIPS_ARG2);
    364   FreeTemp(rMIPS_ARG3);
    365 }
    366 
    367 void MipsMir2Lir::GenExitSequence() {
    368   /*
    369    * In the exit path, rMIPS_RET0/rMIPS_RET1 are live - make sure they aren't
    370    * allocated by the register utilities as temps.
    371    */
    372   LockTemp(rMIPS_RET0);
    373   LockTemp(rMIPS_RET1);
    374 
    375   NewLIR0(kPseudoMethodExit);
    376   UnSpillCoreRegs();
    377   OpReg(kOpBx, r_RA);
    378 }
    379 
    380 }  // namespace art
    381