Home | History | Annotate | Download | only in mips64
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/mips64/codegen-mips64.h"
      6 
      7 #if V8_TARGET_ARCH_MIPS64
      8 
      9 #include <memory>
     10 
     11 #include "src/codegen.h"
     12 #include "src/macro-assembler.h"
     13 #include "src/mips64/simulator-mips64.h"
     14 
     15 namespace v8 {
     16 namespace internal {
     17 
     18 
     19 #define __ masm.
     20 
     21 
     22 #if defined(V8_HOST_ARCH_MIPS)
     23 MemCopyUint8Function CreateMemCopyUint8Function(Isolate* isolate,
     24                                                 MemCopyUint8Function stub) {
     25 #if defined(USE_SIMULATOR)
     26   return stub;
     27 #else
     28 
     29   size_t actual_size;
     30   byte* buffer =
     31       static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true));
     32   if (buffer == nullptr) return stub;
     33 
     34   // This code assumes that cache lines are 32 bytes and if the cache line is
     35   // larger it will not work correctly.
     36   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
     37                       CodeObjectRequired::kNo);
     38 
     39   {
     40     Label lastb, unaligned, aligned, chkw,
     41           loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
     42           leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
     43           ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
     44 
     45     // The size of each prefetch.
     46     uint32_t pref_chunk = 32;
     47     // The maximum size of a prefetch, it must not be less than pref_chunk.
     48     // If the real size of a prefetch is greater than max_pref_size and
     49     // the kPrefHintPrepareForStore hint is used, the code will not work
     50     // correctly.
     51     uint32_t max_pref_size = 128;
     52     DCHECK(pref_chunk < max_pref_size);
     53 
     54     // pref_limit is set based on the fact that we never use an offset
     55     // greater then 5 on a store pref and that a single pref can
     56     // never be larger then max_pref_size.
     57     uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
     58     int32_t pref_hint_load = kPrefHintLoadStreamed;
     59     int32_t pref_hint_store = kPrefHintPrepareForStore;
     60     uint32_t loadstore_chunk = 4;
     61 
     62     // The initial prefetches may fetch bytes that are before the buffer being
     63     // copied. Start copies with an offset of 4 so avoid this situation when
     64     // using kPrefHintPrepareForStore.
     65     DCHECK(pref_hint_store != kPrefHintPrepareForStore ||
     66            pref_chunk * 4 >= max_pref_size);
     67     // If the size is less than 8, go to lastb. Regardless of size,
     68     // copy dst pointer to v0 for the retuen value.
     69     __ slti(a6, a2, 2 * loadstore_chunk);
     70     __ bne(a6, zero_reg, &lastb);
     71     __ mov(v0, a0);  // In delay slot.
     72 
     73     // If src and dst have different alignments, go to unaligned, if they
     74     // have the same alignment (but are not actually aligned) do a partial
     75     // load/store to make them aligned. If they are both already aligned
     76     // we can start copying at aligned.
     77     __ xor_(t8, a1, a0);
     78     __ andi(t8, t8, loadstore_chunk - 1);  // t8 is a0/a1 word-displacement.
     79     __ bne(t8, zero_reg, &unaligned);
     80     __ subu(a3, zero_reg, a0);  // In delay slot.
     81 
     82     __ andi(a3, a3, loadstore_chunk - 1);  // Copy a3 bytes to align a0/a1.
     83     __ beq(a3, zero_reg, &aligned);  // Already aligned.
     84     __ subu(a2, a2, a3);  // In delay slot. a2 is the remining bytes count.
     85 
     86     if (kArchEndian == kLittle) {
     87       __ lwr(t8, MemOperand(a1));
     88       __ addu(a1, a1, a3);
     89       __ swr(t8, MemOperand(a0));
     90       __ addu(a0, a0, a3);
     91     } else {
     92       __ lwl(t8, MemOperand(a1));
     93       __ addu(a1, a1, a3);
     94       __ swl(t8, MemOperand(a0));
     95       __ addu(a0, a0, a3);
     96     }
     97 
     98     // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
     99     // count how many bytes we have to copy after all the 64 byte chunks are
    100     // copied and a3 to the dst pointer after all the 64 byte chunks have been
    101     // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
    102     __ bind(&aligned);
    103     __ andi(t8, a2, 0x3f);
    104     __ beq(a2, t8, &chkw);  // Less than 64?
    105     __ subu(a3, a2, t8);  // In delay slot.
    106     __ addu(a3, a0, a3);  // Now a3 is the final dst after loop.
    107 
    108     // When in the loop we prefetch with kPrefHintPrepareForStore hint,
    109     // in this case the a0+x should be past the "a4-32" address. This means:
    110     // for x=128 the last "safe" a0 address is "a4-160". Alternatively, for
    111     // x=64 the last "safe" a0 address is "a4-96". In the current version we
    112     // will use "pref hint, 128(a0)", so "a4-160" is the limit.
    113     if (pref_hint_store == kPrefHintPrepareForStore) {
    114       __ addu(a4, a0, a2);  // a4 is the "past the end" address.
    115       __ Subu(t9, a4, pref_limit);  // t9 is the "last safe pref" address.
    116     }
    117 
    118     __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
    119     __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
    120     __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
    121     __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
    122 
    123     if (pref_hint_store != kPrefHintPrepareForStore) {
    124       __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
    125       __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
    126       __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
    127     }
    128     __ bind(&loop16w);
    129     __ lw(a4, MemOperand(a1));
    130 
    131     if (pref_hint_store == kPrefHintPrepareForStore) {
    132       __ sltu(v1, t9, a0);  // If a0 > t9, don't use next prefetch.
    133       __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
    134     }
    135     __ lw(a5, MemOperand(a1, 1, loadstore_chunk));  // Maybe in delay slot.
    136 
    137     __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
    138     __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
    139 
    140     __ bind(&skip_pref);
    141     __ lw(a6, MemOperand(a1, 2, loadstore_chunk));
    142     __ lw(a7, MemOperand(a1, 3, loadstore_chunk));
    143     __ lw(t0, MemOperand(a1, 4, loadstore_chunk));
    144     __ lw(t1, MemOperand(a1, 5, loadstore_chunk));
    145     __ lw(t2, MemOperand(a1, 6, loadstore_chunk));
    146     __ lw(t3, MemOperand(a1, 7, loadstore_chunk));
    147     __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
    148 
    149     __ sw(a4, MemOperand(a0));
    150     __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
    151     __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
    152     __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
    153     __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
    154     __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
    155     __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
    156     __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
    157 
    158     __ lw(a4, MemOperand(a1, 8, loadstore_chunk));
    159     __ lw(a5, MemOperand(a1, 9, loadstore_chunk));
    160     __ lw(a6, MemOperand(a1, 10, loadstore_chunk));
    161     __ lw(a7, MemOperand(a1, 11, loadstore_chunk));
    162     __ lw(t0, MemOperand(a1, 12, loadstore_chunk));
    163     __ lw(t1, MemOperand(a1, 13, loadstore_chunk));
    164     __ lw(t2, MemOperand(a1, 14, loadstore_chunk));
    165     __ lw(t3, MemOperand(a1, 15, loadstore_chunk));
    166     __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
    167 
    168     __ sw(a4, MemOperand(a0, 8, loadstore_chunk));
    169     __ sw(a5, MemOperand(a0, 9, loadstore_chunk));
    170     __ sw(a6, MemOperand(a0, 10, loadstore_chunk));
    171     __ sw(a7, MemOperand(a0, 11, loadstore_chunk));
    172     __ sw(t0, MemOperand(a0, 12, loadstore_chunk));
    173     __ sw(t1, MemOperand(a0, 13, loadstore_chunk));
    174     __ sw(t2, MemOperand(a0, 14, loadstore_chunk));
    175     __ sw(t3, MemOperand(a0, 15, loadstore_chunk));
    176     __ addiu(a0, a0, 16 * loadstore_chunk);
    177     __ bne(a0, a3, &loop16w);
    178     __ addiu(a1, a1, 16 * loadstore_chunk);  // In delay slot.
    179     __ mov(a2, t8);
    180 
    181     // Here we have src and dest word-aligned but less than 64-bytes to go.
    182     // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
    183     // down to chk1w to handle the tail end of the copy.
    184     __ bind(&chkw);
    185     __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
    186     __ andi(t8, a2, 0x1f);
    187     __ beq(a2, t8, &chk1w);  // Less than 32?
    188     __ nop();  // In delay slot.
    189     __ lw(a4, MemOperand(a1));
    190     __ lw(a5, MemOperand(a1, 1, loadstore_chunk));
    191     __ lw(a6, MemOperand(a1, 2, loadstore_chunk));
    192     __ lw(a7, MemOperand(a1, 3, loadstore_chunk));
    193     __ lw(t0, MemOperand(a1, 4, loadstore_chunk));
    194     __ lw(t1, MemOperand(a1, 5, loadstore_chunk));
    195     __ lw(t2, MemOperand(a1, 6, loadstore_chunk));
    196     __ lw(t3, MemOperand(a1, 7, loadstore_chunk));
    197     __ addiu(a1, a1, 8 * loadstore_chunk);
    198     __ sw(a4, MemOperand(a0));
    199     __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
    200     __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
    201     __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
    202     __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
    203     __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
    204     __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
    205     __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
    206     __ addiu(a0, a0, 8 * loadstore_chunk);
    207 
    208     // Here we have less than 32 bytes to copy. Set up for a loop to copy
    209     // one word at a time. Set a2 to count how many bytes we have to copy
    210     // after all the word chunks are copied and a3 to the dst pointer after
    211     // all the word chunks have been copied. We will loop, incrementing a0
    212     // and a1 untill a0 equals a3.
    213     __ bind(&chk1w);
    214     __ andi(a2, t8, loadstore_chunk - 1);
    215     __ beq(a2, t8, &lastb);
    216     __ subu(a3, t8, a2);  // In delay slot.
    217     __ addu(a3, a0, a3);
    218 
    219     __ bind(&wordCopy_loop);
    220     __ lw(a7, MemOperand(a1));
    221     __ addiu(a0, a0, loadstore_chunk);
    222     __ addiu(a1, a1, loadstore_chunk);
    223     __ bne(a0, a3, &wordCopy_loop);
    224     __ sw(a7, MemOperand(a0, -1, loadstore_chunk));  // In delay slot.
    225 
    226     __ bind(&lastb);
    227     __ Branch(&leave, le, a2, Operand(zero_reg));
    228     __ addu(a3, a0, a2);
    229 
    230     __ bind(&lastbloop);
    231     __ lb(v1, MemOperand(a1));
    232     __ addiu(a0, a0, 1);
    233     __ addiu(a1, a1, 1);
    234     __ bne(a0, a3, &lastbloop);
    235     __ sb(v1, MemOperand(a0, -1));  // In delay slot.
    236 
    237     __ bind(&leave);
    238     __ jr(ra);
    239     __ nop();
    240 
    241     // Unaligned case. Only the dst gets aligned so we need to do partial
    242     // loads of the source followed by normal stores to the dst (once we
    243     // have aligned the destination).
    244     __ bind(&unaligned);
    245     __ andi(a3, a3, loadstore_chunk - 1);  // Copy a3 bytes to align a0/a1.
    246     __ beq(a3, zero_reg, &ua_chk16w);
    247     __ subu(a2, a2, a3);  // In delay slot.
    248 
    249     if (kArchEndian == kLittle) {
    250       __ lwr(v1, MemOperand(a1));
    251       __ lwl(v1,
    252              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
    253       __ addu(a1, a1, a3);
    254       __ swr(v1, MemOperand(a0));
    255       __ addu(a0, a0, a3);
    256     } else {
    257       __ lwl(v1, MemOperand(a1));
    258       __ lwr(v1,
    259              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
    260       __ addu(a1, a1, a3);
    261       __ swl(v1, MemOperand(a0));
    262       __ addu(a0, a0, a3);
    263     }
    264 
    265     // Now the dst (but not the source) is aligned. Set a2 to count how many
    266     // bytes we have to copy after all the 64 byte chunks are copied and a3 to
    267     // the dst pointer after all the 64 byte chunks have been copied. We will
    268     // loop, incrementing a0 and a1 until a0 equals a3.
    269     __ bind(&ua_chk16w);
    270     __ andi(t8, a2, 0x3f);
    271     __ beq(a2, t8, &ua_chkw);
    272     __ subu(a3, a2, t8);  // In delay slot.
    273     __ addu(a3, a0, a3);
    274 
    275     if (pref_hint_store == kPrefHintPrepareForStore) {
    276       __ addu(a4, a0, a2);
    277       __ Subu(t9, a4, pref_limit);
    278     }
    279 
    280     __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
    281     __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
    282     __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
    283 
    284     if (pref_hint_store != kPrefHintPrepareForStore) {
    285       __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
    286       __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
    287       __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
    288     }
    289 
    290     __ bind(&ua_loop16w);
    291     if (kArchEndian == kLittle) {
    292       __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
    293       __ lwr(a4, MemOperand(a1));
    294       __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
    295       __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
    296 
    297       if (pref_hint_store == kPrefHintPrepareForStore) {
    298         __ sltu(v1, t9, a0);
    299         __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
    300       }
    301       __ lwr(a7, MemOperand(a1, 3, loadstore_chunk));  // Maybe in delay slot.
    302 
    303       __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
    304       __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
    305 
    306       __ bind(&ua_skip_pref);
    307       __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
    308       __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
    309       __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
    310       __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
    311       __ lwl(a4,
    312              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
    313       __ lwl(a5,
    314              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
    315       __ lwl(a6,
    316              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
    317       __ lwl(a7,
    318              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
    319       __ lwl(t0,
    320              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
    321       __ lwl(t1,
    322              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
    323       __ lwl(t2,
    324              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
    325       __ lwl(t3,
    326              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
    327     } else {
    328       __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
    329       __ lwl(a4, MemOperand(a1));
    330       __ lwl(a5, MemOperand(a1, 1, loadstore_chunk));
    331       __ lwl(a6, MemOperand(a1, 2, loadstore_chunk));
    332 
    333       if (pref_hint_store == kPrefHintPrepareForStore) {
    334         __ sltu(v1, t9, a0);
    335         __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
    336       }
    337       __ lwl(a7, MemOperand(a1, 3, loadstore_chunk));  // Maybe in delay slot.
    338 
    339       __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
    340       __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
    341 
    342       __ bind(&ua_skip_pref);
    343       __ lwl(t0, MemOperand(a1, 4, loadstore_chunk));
    344       __ lwl(t1, MemOperand(a1, 5, loadstore_chunk));
    345       __ lwl(t2, MemOperand(a1, 6, loadstore_chunk));
    346       __ lwl(t3, MemOperand(a1, 7, loadstore_chunk));
    347       __ lwr(a4,
    348              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
    349       __ lwr(a5,
    350              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
    351       __ lwr(a6,
    352              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
    353       __ lwr(a7,
    354              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
    355       __ lwr(t0,
    356              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
    357       __ lwr(t1,
    358              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
    359       __ lwr(t2,
    360              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
    361       __ lwr(t3,
    362              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
    363     }
    364     __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
    365     __ sw(a4, MemOperand(a0));
    366     __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
    367     __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
    368     __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
    369     __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
    370     __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
    371     __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
    372     __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
    373     if (kArchEndian == kLittle) {
    374       __ lwr(a4, MemOperand(a1, 8, loadstore_chunk));
    375       __ lwr(a5, MemOperand(a1, 9, loadstore_chunk));
    376       __ lwr(a6, MemOperand(a1, 10, loadstore_chunk));
    377       __ lwr(a7, MemOperand(a1, 11, loadstore_chunk));
    378       __ lwr(t0, MemOperand(a1, 12, loadstore_chunk));
    379       __ lwr(t1, MemOperand(a1, 13, loadstore_chunk));
    380       __ lwr(t2, MemOperand(a1, 14, loadstore_chunk));
    381       __ lwr(t3, MemOperand(a1, 15, loadstore_chunk));
    382       __ lwl(a4,
    383              MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
    384       __ lwl(a5,
    385              MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
    386       __ lwl(a6,
    387              MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
    388       __ lwl(a7,
    389              MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
    390       __ lwl(t0,
    391              MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
    392       __ lwl(t1,
    393              MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
    394       __ lwl(t2,
    395              MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
    396       __ lwl(t3,
    397              MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
    398     } else {
    399       __ lwl(a4, MemOperand(a1, 8, loadstore_chunk));
    400       __ lwl(a5, MemOperand(a1, 9, loadstore_chunk));
    401       __ lwl(a6, MemOperand(a1, 10, loadstore_chunk));
    402       __ lwl(a7, MemOperand(a1, 11, loadstore_chunk));
    403       __ lwl(t0, MemOperand(a1, 12, loadstore_chunk));
    404       __ lwl(t1, MemOperand(a1, 13, loadstore_chunk));
    405       __ lwl(t2, MemOperand(a1, 14, loadstore_chunk));
    406       __ lwl(t3, MemOperand(a1, 15, loadstore_chunk));
    407       __ lwr(a4,
    408              MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
    409       __ lwr(a5,
    410              MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
    411       __ lwr(a6,
    412              MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
    413       __ lwr(a7,
    414              MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
    415       __ lwr(t0,
    416              MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
    417       __ lwr(t1,
    418              MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
    419       __ lwr(t2,
    420              MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
    421       __ lwr(t3,
    422              MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
    423     }
    424     __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
    425     __ sw(a4, MemOperand(a0, 8, loadstore_chunk));
    426     __ sw(a5, MemOperand(a0, 9, loadstore_chunk));
    427     __ sw(a6, MemOperand(a0, 10, loadstore_chunk));
    428     __ sw(a7, MemOperand(a0, 11, loadstore_chunk));
    429     __ sw(t0, MemOperand(a0, 12, loadstore_chunk));
    430     __ sw(t1, MemOperand(a0, 13, loadstore_chunk));
    431     __ sw(t2, MemOperand(a0, 14, loadstore_chunk));
    432     __ sw(t3, MemOperand(a0, 15, loadstore_chunk));
    433     __ addiu(a0, a0, 16 * loadstore_chunk);
    434     __ bne(a0, a3, &ua_loop16w);
    435     __ addiu(a1, a1, 16 * loadstore_chunk);  // In delay slot.
    436     __ mov(a2, t8);
    437 
    438     // Here less than 64-bytes. Check for
    439     // a 32 byte chunk and copy if there is one. Otherwise jump down to
    440     // ua_chk1w to handle the tail end of the copy.
    441     __ bind(&ua_chkw);
    442     __ Pref(pref_hint_load, MemOperand(a1));
    443     __ andi(t8, a2, 0x1f);
    444 
    445     __ beq(a2, t8, &ua_chk1w);
    446     __ nop();  // In delay slot.
    447     if (kArchEndian == kLittle) {
    448       __ lwr(a4, MemOperand(a1));
    449       __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
    450       __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
    451       __ lwr(a7, MemOperand(a1, 3, loadstore_chunk));
    452       __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
    453       __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
    454       __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
    455       __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
    456       __ lwl(a4,
    457              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
    458       __ lwl(a5,
    459              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
    460       __ lwl(a6,
    461              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
    462       __ lwl(a7,
    463              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
    464       __ lwl(t0,
    465              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
    466       __ lwl(t1,
    467              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
    468       __ lwl(t2,
    469              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
    470       __ lwl(t3,
    471              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
    472     } else {
    473       __ lwl(a4, MemOperand(a1));
    474       __ lwl(a5, MemOperand(a1, 1, loadstore_chunk));
    475       __ lwl(a6, MemOperand(a1, 2, loadstore_chunk));
    476       __ lwl(a7, MemOperand(a1, 3, loadstore_chunk));
    477       __ lwl(t0, MemOperand(a1, 4, loadstore_chunk));
    478       __ lwl(t1, MemOperand(a1, 5, loadstore_chunk));
    479       __ lwl(t2, MemOperand(a1, 6, loadstore_chunk));
    480       __ lwl(t3, MemOperand(a1, 7, loadstore_chunk));
    481       __ lwr(a4,
    482              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
    483       __ lwr(a5,
    484              MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
    485       __ lwr(a6,
    486              MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
    487       __ lwr(a7,
    488              MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
    489       __ lwr(t0,
    490              MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
    491       __ lwr(t1,
    492              MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
    493       __ lwr(t2,
    494              MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
    495       __ lwr(t3,
    496              MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
    497     }
    498     __ addiu(a1, a1, 8 * loadstore_chunk);
    499     __ sw(a4, MemOperand(a0));
    500     __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
    501     __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
    502     __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
    503     __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
    504     __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
    505     __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
    506     __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
    507     __ addiu(a0, a0, 8 * loadstore_chunk);
    508 
    509     // Less than 32 bytes to copy. Set up for a loop to
    510     // copy one word at a time.
    511     __ bind(&ua_chk1w);
    512     __ andi(a2, t8, loadstore_chunk - 1);
    513     __ beq(a2, t8, &ua_smallCopy);
    514     __ subu(a3, t8, a2);  // In delay slot.
    515     __ addu(a3, a0, a3);
    516 
    517     __ bind(&ua_wordCopy_loop);
    518     if (kArchEndian == kLittle) {
    519       __ lwr(v1, MemOperand(a1));
    520       __ lwl(v1,
    521              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
    522     } else {
    523       __ lwl(v1, MemOperand(a1));
    524       __ lwr(v1,
    525              MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
    526     }
    527     __ addiu(a0, a0, loadstore_chunk);
    528     __ addiu(a1, a1, loadstore_chunk);
    529     __ bne(a0, a3, &ua_wordCopy_loop);
    530     __ sw(v1, MemOperand(a0, -1, loadstore_chunk));  // In delay slot.
    531 
    532     // Copy the last 8 bytes.
    533     __ bind(&ua_smallCopy);
    534     __ beq(a2, zero_reg, &leave);
    535     __ addu(a3, a0, a2);  // In delay slot.
    536 
    537     __ bind(&ua_smallCopy_loop);
    538     __ lb(v1, MemOperand(a1));
    539     __ addiu(a0, a0, 1);
    540     __ addiu(a1, a1, 1);
    541     __ bne(a0, a3, &ua_smallCopy_loop);
    542     __ sb(v1, MemOperand(a0, -1));  // In delay slot.
    543 
    544     __ jr(ra);
    545     __ nop();
    546   }
    547   CodeDesc desc;
    548   masm.GetCode(&desc);
    549   DCHECK(!RelocInfo::RequiresRelocation(desc));
    550 
    551   Assembler::FlushICache(isolate, buffer, actual_size);
    552   base::OS::ProtectCode(buffer, actual_size);
    553   return FUNCTION_CAST<MemCopyUint8Function>(buffer);
    554 #endif
    555 }
    556 #endif
    557 
    558 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
    559 #if defined(USE_SIMULATOR)
    560   return nullptr;
    561 #else
    562   size_t actual_size;
    563   byte* buffer =
    564       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
    565   if (buffer == nullptr) return nullptr;
    566 
    567   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
    568                       CodeObjectRequired::kNo);
    569 
    570   __ MovFromFloatParameter(f12);
    571   __ sqrt_d(f0, f12);
    572   __ MovToFloatResult(f0);
    573   __ Ret();
    574 
    575   CodeDesc desc;
    576   masm.GetCode(&desc);
    577   DCHECK(!RelocInfo::RequiresRelocation(desc));
    578 
    579   Assembler::FlushICache(isolate, buffer, actual_size);
    580   base::OS::ProtectCode(buffer, actual_size);
    581   return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
    582 #endif
    583 }
    584 
    585 #undef __
    586 
    587 
    588 // -------------------------------------------------------------------------
    589 // Platform-specific RuntimeCallHelper functions.
    590 
    591 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
    592   masm->EnterFrame(StackFrame::INTERNAL);
    593   DCHECK(!masm->has_frame());
    594   masm->set_has_frame(true);
    595 }
    596 
    597 
    598 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
    599   masm->LeaveFrame(StackFrame::INTERNAL);
    600   DCHECK(masm->has_frame());
    601   masm->set_has_frame(false);
    602 }
    603 
    604 
    605 // -------------------------------------------------------------------------
    606 // Code generators
    607 
    608 #define __ ACCESS_MASM(masm)
    609 
    610 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
    611                                        Register string,
    612                                        Register index,
    613                                        Register result,
    614                                        Label* call_runtime) {
    615   Label indirect_string_loaded;
    616   __ bind(&indirect_string_loaded);
    617 
    618   // Fetch the instance type of the receiver into result register.
    619   __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
    620   __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
    621 
    622   // We need special handling for indirect strings.
    623   Label check_sequential;
    624   __ And(at, result, Operand(kIsIndirectStringMask));
    625   __ Branch(&check_sequential, eq, at, Operand(zero_reg));
    626 
    627   // Dispatch on the indirect string shape: slice or cons.
    628   Label cons_string, thin_string;
    629   __ And(at, result, Operand(kStringRepresentationMask));
    630   __ Branch(&cons_string, eq, at, Operand(kConsStringTag));
    631   __ Branch(&thin_string, eq, at, Operand(kThinStringTag));
    632 
    633   // Handle slices.
    634   __ ld(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
    635   __ ld(string, FieldMemOperand(string, SlicedString::kParentOffset));
    636   __ dsra32(at, result, 0);
    637   __ Daddu(index, index, at);
    638   __ jmp(&indirect_string_loaded);
    639 
    640   // Handle thin strings.
    641   __ bind(&thin_string);
    642   __ ld(string, FieldMemOperand(string, ThinString::kActualOffset));
    643   __ jmp(&indirect_string_loaded);
    644 
    645   // Handle cons strings.
    646   // Check whether the right hand side is the empty string (i.e. if
    647   // this is really a flat string in a cons string). If that is not
    648   // the case we would rather go to the runtime system now to flatten
    649   // the string.
    650   __ bind(&cons_string);
    651   __ ld(result, FieldMemOperand(string, ConsString::kSecondOffset));
    652   __ LoadRoot(at, Heap::kempty_stringRootIndex);
    653   __ Branch(call_runtime, ne, result, Operand(at));
    654   // Get the first of the two strings and load its instance type.
    655   __ ld(string, FieldMemOperand(string, ConsString::kFirstOffset));
    656   __ jmp(&indirect_string_loaded);
    657 
    658   // Distinguish sequential and external strings. Only these two string
    659   // representations can reach here (slices and flat cons strings have been
    660   // reduced to the underlying sequential or external string).
    661   Label external_string, check_encoding;
    662   __ bind(&check_sequential);
    663   STATIC_ASSERT(kSeqStringTag == 0);
    664   __ And(at, result, Operand(kStringRepresentationMask));
    665   __ Branch(&external_string, ne, at, Operand(zero_reg));
    666 
    667   // Prepare sequential strings
    668   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
    669   __ Daddu(string,
    670           string,
    671           SeqTwoByteString::kHeaderSize - kHeapObjectTag);
    672   __ jmp(&check_encoding);
    673 
    674   // Handle external strings.
    675   __ bind(&external_string);
    676   if (FLAG_debug_code) {
    677     // Assert that we do not have a cons or slice (indirect strings) here.
    678     // Sequential strings have already been ruled out.
    679     __ And(at, result, Operand(kIsIndirectStringMask));
    680     __ Assert(eq, kExternalStringExpectedButNotFound,
    681         at, Operand(zero_reg));
    682   }
    683   // Rule out short external strings.
    684   STATIC_ASSERT(kShortExternalStringTag != 0);
    685   __ And(at, result, Operand(kShortExternalStringMask));
    686   __ Branch(call_runtime, ne, at, Operand(zero_reg));
    687   __ ld(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
    688 
    689   Label one_byte, done;
    690   __ bind(&check_encoding);
    691   STATIC_ASSERT(kTwoByteStringTag == 0);
    692   __ And(at, result, Operand(kStringEncodingMask));
    693   __ Branch(&one_byte, ne, at, Operand(zero_reg));
    694   // Two-byte string.
    695   __ Dlsa(at, string, index, 1);
    696   __ lhu(result, MemOperand(at));
    697   __ jmp(&done);
    698   __ bind(&one_byte);
    699   // One_byte string.
    700   __ Daddu(at, string, index);
    701   __ lbu(result, MemOperand(at));
    702   __ bind(&done);
    703 }
    704 
    705 #ifdef DEBUG
    706 // nop(CODE_AGE_MARKER_NOP)
    707 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
    708 #endif
    709 
    710 
    711 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
    712   USE(isolate);
    713   DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
    714   // Since patcher is a large object, allocate it dynamically when needed,
    715   // to avoid overloading the stack in stress conditions.
    716   // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
    717   // the process, before MIPS simulator ICache is setup.
    718   std::unique_ptr<CodePatcher> patcher(
    719       new CodePatcher(isolate, young_sequence_.start(),
    720                       young_sequence_.length() / Assembler::kInstrSize,
    721                       CodePatcher::DONT_FLUSH));
    722   PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
    723   patcher->masm()->PushStandardFrame(a1);
    724   patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
    725   patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
    726   patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
    727 }
    728 
    729 
    730 #ifdef DEBUG
    731 bool CodeAgingHelper::IsOld(byte* candidate) const {
    732   return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
    733 }
    734 #endif
    735 
    736 
    737 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
    738   bool result = isolate->code_aging_helper()->IsYoung(sequence);
    739   DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
    740   return result;
    741 }
    742 
    743 Code::Age Code::GetCodeAge(Isolate* isolate, byte* sequence) {
    744   if (IsYoungSequence(isolate, sequence)) return kNoAgeCodeAge;
    745 
    746   Address target_address =
    747       Assembler::target_address_at(sequence + Assembler::kInstrSize);
    748   Code* stub = GetCodeFromTargetAddress(target_address);
    749   return GetAgeOfCodeAgeStub(stub);
    750 }
    751 
    752 void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence,
    753                                 Code::Age age) {
    754   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
    755   if (age == kNoAgeCodeAge) {
    756     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
    757     Assembler::FlushICache(isolate, sequence, young_length);
    758   } else {
    759     Code* stub = GetCodeAgeStub(isolate, age);
    760     CodePatcher patcher(isolate, sequence,
    761                         young_length / Assembler::kInstrSize);
    762     // Mark this code sequence for FindPlatformCodeAgeSequence().
    763     patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
    764     // Load the stub address to t9 and call it,
    765     // GetCodeAge() extracts the stub address from this instruction.
    766     patcher.masm()->li(
    767         t9,
    768         Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
    769         ADDRESS_LOAD);
    770     patcher.masm()->nop();  // Prevent jalr to jal optimization.
    771     patcher.masm()->jalr(t9, a0);
    772     patcher.masm()->nop();  // Branch delay slot nop.
    773     patcher.masm()->nop();  // Pad the empty space.
    774   }
    775 }
    776 
    777 
    778 #undef __
    779 
    780 }  // namespace internal
    781 }  // namespace v8
    782 
    783 #endif  // V8_TARGET_ARCH_MIPS64
    784