Home | History | Annotate | Download | only in mips
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "v8.h"
     29 
     30 #if V8_TARGET_ARCH_MIPS
     31 
     32 #include "codegen.h"
     33 #include "macro-assembler.h"
     34 #include "simulator-mips.h"
     35 
     36 namespace v8 {
     37 namespace internal {
     38 
     39 
     40 UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
     41   switch (type) {
     42     case TranscendentalCache::SIN: return &sin;
     43     case TranscendentalCache::COS: return &cos;
     44     case TranscendentalCache::TAN: return &tan;
     45     case TranscendentalCache::LOG: return &log;
     46     default: UNIMPLEMENTED();
     47   }
     48   return NULL;
     49 }
     50 
     51 
     52 #define __ masm.
     53 
     54 
     55 #if defined(USE_SIMULATOR)
     56 byte* fast_exp_mips_machine_code = NULL;
     57 double fast_exp_simulator(double x) {
     58   return Simulator::current(Isolate::Current())->CallFP(
     59       fast_exp_mips_machine_code, x, 0);
     60 }
     61 #endif
     62 
     63 
     64 UnaryMathFunction CreateExpFunction() {
     65   if (!FLAG_fast_math) return &exp;
     66   size_t actual_size;
     67   byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
     68   if (buffer == NULL) return &exp;
     69   ExternalReference::InitializeMathExpData();
     70 
     71   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
     72 
     73   {
     74     DoubleRegister input = f12;
     75     DoubleRegister result = f0;
     76     DoubleRegister double_scratch1 = f4;
     77     DoubleRegister double_scratch2 = f6;
     78     Register temp1 = t0;
     79     Register temp2 = t1;
     80     Register temp3 = t2;
     81 
     82     if (!IsMipsSoftFloatABI) {
     83       // Input value is in f12 anyway, nothing to do.
     84     } else {
     85       __ Move(input, a0, a1);
     86     }
     87     __ Push(temp3, temp2, temp1);
     88     MathExpGenerator::EmitMathExp(
     89         &masm, input, result, double_scratch1, double_scratch2,
     90         temp1, temp2, temp3);
     91     __ Pop(temp3, temp2, temp1);
     92     if (!IsMipsSoftFloatABI) {
     93       // Result is already in f0, nothing to do.
     94     } else {
     95       __ Move(v0, v1, result);
     96     }
     97     __ Ret();
     98   }
     99 
    100   CodeDesc desc;
    101   masm.GetCode(&desc);
    102   ASSERT(!RelocInfo::RequiresRelocation(desc));
    103 
    104   CPU::FlushICache(buffer, actual_size);
    105   OS::ProtectCode(buffer, actual_size);
    106 
    107 #if !defined(USE_SIMULATOR)
    108   return FUNCTION_CAST<UnaryMathFunction>(buffer);
    109 #else
    110   fast_exp_mips_machine_code = buffer;
    111   return &fast_exp_simulator;
    112 #endif
    113 }
    114 
    115 
    116 #undef __
    117 
    118 
    119 UnaryMathFunction CreateSqrtFunction() {
    120   return &sqrt;
    121 }
    122 
    123 
    124 // -------------------------------------------------------------------------
    125 // Platform-specific RuntimeCallHelper functions.
    126 
    127 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
    128   masm->EnterFrame(StackFrame::INTERNAL);
    129   ASSERT(!masm->has_frame());
    130   masm->set_has_frame(true);
    131 }
    132 
    133 
    134 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
    135   masm->LeaveFrame(StackFrame::INTERNAL);
    136   ASSERT(masm->has_frame());
    137   masm->set_has_frame(false);
    138 }
    139 
    140 
    141 // -------------------------------------------------------------------------
    142 // Code generators
    143 
    144 #define __ ACCESS_MASM(masm)
    145 
    146 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
    147     MacroAssembler* masm, AllocationSiteMode mode,
    148     Label* allocation_memento_found) {
    149   // ----------- S t a t e -------------
    150   //  -- a0    : value
    151   //  -- a1    : key
    152   //  -- a2    : receiver
    153   //  -- ra    : return address
    154   //  -- a3    : target map, scratch for subsequent call
    155   //  -- t0    : scratch (elements)
    156   // -----------------------------------
    157   if (mode == TRACK_ALLOCATION_SITE) {
    158     ASSERT(allocation_memento_found != NULL);
    159     masm->TestJSArrayForAllocationMemento(a2, t0, eq,
    160                                           allocation_memento_found);
    161   }
    162 
    163   // Set transitioned map.
    164   __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
    165   __ RecordWriteField(a2,
    166                       HeapObject::kMapOffset,
    167                       a3,
    168                       t5,
    169                       kRAHasNotBeenSaved,
    170                       kDontSaveFPRegs,
    171                       EMIT_REMEMBERED_SET,
    172                       OMIT_SMI_CHECK);
    173 }
    174 
    175 
    176 void ElementsTransitionGenerator::GenerateSmiToDouble(
    177     MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
    178   // ----------- S t a t e -------------
    179   //  -- a0    : value
    180   //  -- a1    : key
    181   //  -- a2    : receiver
    182   //  -- ra    : return address
    183   //  -- a3    : target map, scratch for subsequent call
    184   //  -- t0    : scratch (elements)
    185   // -----------------------------------
    186   Label loop, entry, convert_hole, gc_required, only_change_map, done;
    187 
    188   Register scratch = t6;
    189 
    190   if (mode == TRACK_ALLOCATION_SITE) {
    191     masm->TestJSArrayForAllocationMemento(a2, t0, eq, fail);
    192   }
    193 
    194   // Check for empty arrays, which only require a map transition and no changes
    195   // to the backing store.
    196   __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
    197   __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
    198   __ Branch(&only_change_map, eq, at, Operand(t0));
    199 
    200   __ push(ra);
    201   __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
    202   // t0: source FixedArray
    203   // t1: number of elements (smi-tagged)
    204 
    205   // Allocate new FixedDoubleArray.
    206   __ sll(scratch, t1, 2);
    207   __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
    208   __ Allocate(scratch, t2, t3, t5, &gc_required, DOUBLE_ALIGNMENT);
    209   // t2: destination FixedDoubleArray, not tagged as heap object
    210 
    211   // Set destination FixedDoubleArray's length and map.
    212   __ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
    213   __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
    214   __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
    215   // Update receiver's map.
    216 
    217   __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
    218   __ RecordWriteField(a2,
    219                       HeapObject::kMapOffset,
    220                       a3,
    221                       t5,
    222                       kRAHasBeenSaved,
    223                       kDontSaveFPRegs,
    224                       OMIT_REMEMBERED_SET,
    225                       OMIT_SMI_CHECK);
    226   // Replace receiver's backing store with newly created FixedDoubleArray.
    227   __ Addu(a3, t2, Operand(kHeapObjectTag));
    228   __ sw(a3, FieldMemOperand(a2, JSObject::kElementsOffset));
    229   __ RecordWriteField(a2,
    230                       JSObject::kElementsOffset,
    231                       a3,
    232                       t5,
    233                       kRAHasBeenSaved,
    234                       kDontSaveFPRegs,
    235                       EMIT_REMEMBERED_SET,
    236                       OMIT_SMI_CHECK);
    237 
    238 
    239   // Prepare for conversion loop.
    240   __ Addu(a3, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
    241   __ Addu(t3, t2, Operand(FixedDoubleArray::kHeaderSize));
    242   __ sll(t2, t1, 2);
    243   __ Addu(t2, t2, t3);
    244   __ li(t0, Operand(kHoleNanLower32));
    245   __ li(t1, Operand(kHoleNanUpper32));
    246   // t0: kHoleNanLower32
    247   // t1: kHoleNanUpper32
    248   // t2: end of destination FixedDoubleArray, not tagged
    249   // t3: begin of FixedDoubleArray element fields, not tagged
    250 
    251   __ Branch(&entry);
    252 
    253   __ bind(&only_change_map);
    254   __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
    255   __ RecordWriteField(a2,
    256                       HeapObject::kMapOffset,
    257                       a3,
    258                       t5,
    259                       kRAHasNotBeenSaved,
    260                       kDontSaveFPRegs,
    261                       OMIT_REMEMBERED_SET,
    262                       OMIT_SMI_CHECK);
    263   __ Branch(&done);
    264 
    265   // Call into runtime if GC is required.
    266   __ bind(&gc_required);
    267   __ pop(ra);
    268   __ Branch(fail);
    269 
    270   // Convert and copy elements.
    271   __ bind(&loop);
    272   __ lw(t5, MemOperand(a3));
    273   __ Addu(a3, a3, kIntSize);
    274   // t5: current element
    275   __ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
    276 
    277   // Normal smi, convert to double and store.
    278   __ mtc1(t5, f0);
    279   __ cvt_d_w(f0, f0);
    280   __ sdc1(f0, MemOperand(t3));
    281   __ Addu(t3, t3, kDoubleSize);
    282 
    283   __ Branch(&entry);
    284 
    285   // Hole found, store the-hole NaN.
    286   __ bind(&convert_hole);
    287   if (FLAG_debug_code) {
    288     // Restore a "smi-untagged" heap object.
    289     __ SmiTag(t5);
    290     __ Or(t5, t5, Operand(1));
    291     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
    292     __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t5));
    293   }
    294   __ sw(t0, MemOperand(t3));  // mantissa
    295   __ sw(t1, MemOperand(t3, kIntSize));  // exponent
    296   __ Addu(t3, t3, kDoubleSize);
    297 
    298   __ bind(&entry);
    299   __ Branch(&loop, lt, t3, Operand(t2));
    300 
    301   __ pop(ra);
    302   __ bind(&done);
    303 }
    304 
    305 
    306 void ElementsTransitionGenerator::GenerateDoubleToObject(
    307     MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
    308   // ----------- S t a t e -------------
    309   //  -- a0    : value
    310   //  -- a1    : key
    311   //  -- a2    : receiver
    312   //  -- ra    : return address
    313   //  -- a3    : target map, scratch for subsequent call
    314   //  -- t0    : scratch (elements)
    315   // -----------------------------------
    316   Label entry, loop, convert_hole, gc_required, only_change_map;
    317 
    318   if (mode == TRACK_ALLOCATION_SITE) {
    319     masm->TestJSArrayForAllocationMemento(a2, t0, eq, fail);
    320   }
    321 
    322   // Check for empty arrays, which only require a map transition and no changes
    323   // to the backing store.
    324   __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
    325   __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
    326   __ Branch(&only_change_map, eq, at, Operand(t0));
    327 
    328   __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
    329 
    330   __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
    331   // t0: source FixedArray
    332   // t1: number of elements (smi-tagged)
    333 
    334   // Allocate new FixedArray.
    335   __ sll(a0, t1, 1);
    336   __ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
    337   __ Allocate(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
    338   // t2: destination FixedArray, not tagged as heap object
    339   // Set destination FixedDoubleArray's length and map.
    340   __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
    341   __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
    342   __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
    343 
    344   // Prepare for conversion loop.
    345   __ Addu(t0, t0, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
    346   __ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
    347   __ Addu(t2, t2, Operand(kHeapObjectTag));
    348   __ sll(t1, t1, 1);
    349   __ Addu(t1, a3, t1);
    350   __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
    351   __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
    352   // Using offsetted addresses.
    353   // a3: begin of destination FixedArray element fields, not tagged
    354   // t0: begin of source FixedDoubleArray element fields, not tagged, +4
    355   // t1: end of destination FixedArray, not tagged
    356   // t2: destination FixedArray
    357   // t3: the-hole pointer
    358   // t5: heap number map
    359   __ Branch(&entry);
    360 
    361   // Call into runtime if GC is required.
    362   __ bind(&gc_required);
    363   __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
    364 
    365   __ Branch(fail);
    366 
    367   __ bind(&loop);
    368   __ lw(a1, MemOperand(t0));
    369   __ Addu(t0, t0, kDoubleSize);
    370   // a1: current element's upper 32 bit
    371   // t0: address of next element's upper 32 bit
    372   __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
    373 
    374   // Non-hole double, copy value into a heap number.
    375   __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
    376   // a2: new heap number
    377   __ lw(a0, MemOperand(t0, -12));
    378   __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset));
    379   __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset));
    380   __ mov(a0, a3);
    381   __ sw(a2, MemOperand(a3));
    382   __ Addu(a3, a3, kIntSize);
    383   __ RecordWrite(t2,
    384                  a0,
    385                  a2,
    386                  kRAHasBeenSaved,
    387                  kDontSaveFPRegs,
    388                  EMIT_REMEMBERED_SET,
    389                  OMIT_SMI_CHECK);
    390   __ Branch(&entry);
    391 
    392   // Replace the-hole NaN with the-hole pointer.
    393   __ bind(&convert_hole);
    394   __ sw(t3, MemOperand(a3));
    395   __ Addu(a3, a3, kIntSize);
    396 
    397   __ bind(&entry);
    398   __ Branch(&loop, lt, a3, Operand(t1));
    399 
    400   __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit());
    401   // Replace receiver's backing store with newly created and filled FixedArray.
    402   __ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset));
    403   __ RecordWriteField(a2,
    404                       JSObject::kElementsOffset,
    405                       t2,
    406                       t5,
    407                       kRAHasBeenSaved,
    408                       kDontSaveFPRegs,
    409                       EMIT_REMEMBERED_SET,
    410                       OMIT_SMI_CHECK);
    411   __ pop(ra);
    412 
    413   __ bind(&only_change_map);
    414   // Update receiver's map.
    415   __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
    416   __ RecordWriteField(a2,
    417                       HeapObject::kMapOffset,
    418                       a3,
    419                       t5,
    420                       kRAHasNotBeenSaved,
    421                       kDontSaveFPRegs,
    422                       OMIT_REMEMBERED_SET,
    423                       OMIT_SMI_CHECK);
    424 }
    425 
    426 
    427 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
    428                                        Register string,
    429                                        Register index,
    430                                        Register result,
    431                                        Label* call_runtime) {
    432   // Fetch the instance type of the receiver into result register.
    433   __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
    434   __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
    435 
    436   // We need special handling for indirect strings.
    437   Label check_sequential;
    438   __ And(at, result, Operand(kIsIndirectStringMask));
    439   __ Branch(&check_sequential, eq, at, Operand(zero_reg));
    440 
    441   // Dispatch on the indirect string shape: slice or cons.
    442   Label cons_string;
    443   __ And(at, result, Operand(kSlicedNotConsMask));
    444   __ Branch(&cons_string, eq, at, Operand(zero_reg));
    445 
    446   // Handle slices.
    447   Label indirect_string_loaded;
    448   __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
    449   __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
    450   __ sra(at, result, kSmiTagSize);
    451   __ Addu(index, index, at);
    452   __ jmp(&indirect_string_loaded);
    453 
    454   // Handle cons strings.
    455   // Check whether the right hand side is the empty string (i.e. if
    456   // this is really a flat string in a cons string). If that is not
    457   // the case we would rather go to the runtime system now to flatten
    458   // the string.
    459   __ bind(&cons_string);
    460   __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
    461   __ LoadRoot(at, Heap::kempty_stringRootIndex);
    462   __ Branch(call_runtime, ne, result, Operand(at));
    463   // Get the first of the two strings and load its instance type.
    464   __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
    465 
    466   __ bind(&indirect_string_loaded);
    467   __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
    468   __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
    469 
    470   // Distinguish sequential and external strings. Only these two string
    471   // representations can reach here (slices and flat cons strings have been
    472   // reduced to the underlying sequential or external string).
    473   Label external_string, check_encoding;
    474   __ bind(&check_sequential);
    475   STATIC_ASSERT(kSeqStringTag == 0);
    476   __ And(at, result, Operand(kStringRepresentationMask));
    477   __ Branch(&external_string, ne, at, Operand(zero_reg));
    478 
    479   // Prepare sequential strings
    480   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
    481   __ Addu(string,
    482           string,
    483           SeqTwoByteString::kHeaderSize - kHeapObjectTag);
    484   __ jmp(&check_encoding);
    485 
    486   // Handle external strings.
    487   __ bind(&external_string);
    488   if (FLAG_debug_code) {
    489     // Assert that we do not have a cons or slice (indirect strings) here.
    490     // Sequential strings have already been ruled out.
    491     __ And(at, result, Operand(kIsIndirectStringMask));
    492     __ Assert(eq, kExternalStringExpectedButNotFound,
    493         at, Operand(zero_reg));
    494   }
    495   // Rule out short external strings.
    496   STATIC_CHECK(kShortExternalStringTag != 0);
    497   __ And(at, result, Operand(kShortExternalStringMask));
    498   __ Branch(call_runtime, ne, at, Operand(zero_reg));
    499   __ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
    500 
    501   Label ascii, done;
    502   __ bind(&check_encoding);
    503   STATIC_ASSERT(kTwoByteStringTag == 0);
    504   __ And(at, result, Operand(kStringEncodingMask));
    505   __ Branch(&ascii, ne, at, Operand(zero_reg));
    506   // Two-byte string.
    507   __ sll(at, index, 1);
    508   __ Addu(at, string, at);
    509   __ lhu(result, MemOperand(at));
    510   __ jmp(&done);
    511   __ bind(&ascii);
    512   // Ascii string.
    513   __ Addu(at, string, index);
    514   __ lbu(result, MemOperand(at));
    515   __ bind(&done);
    516 }
    517 
    518 
    519 static MemOperand ExpConstant(int index, Register base) {
    520   return MemOperand(base, index * kDoubleSize);
    521 }
    522 
    523 
    524 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
    525                                    DoubleRegister input,
    526                                    DoubleRegister result,
    527                                    DoubleRegister double_scratch1,
    528                                    DoubleRegister double_scratch2,
    529                                    Register temp1,
    530                                    Register temp2,
    531                                    Register temp3) {
    532   ASSERT(!input.is(result));
    533   ASSERT(!input.is(double_scratch1));
    534   ASSERT(!input.is(double_scratch2));
    535   ASSERT(!result.is(double_scratch1));
    536   ASSERT(!result.is(double_scratch2));
    537   ASSERT(!double_scratch1.is(double_scratch2));
    538   ASSERT(!temp1.is(temp2));
    539   ASSERT(!temp1.is(temp3));
    540   ASSERT(!temp2.is(temp3));
    541   ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
    542 
    543   Label done;
    544 
    545   __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
    546 
    547   __ ldc1(double_scratch1, ExpConstant(0, temp3));
    548   __ Move(result, kDoubleRegZero);
    549   __ BranchF(&done, NULL, ge, double_scratch1, input);
    550   __ ldc1(double_scratch2, ExpConstant(1, temp3));
    551   __ ldc1(result, ExpConstant(2, temp3));
    552   __ BranchF(&done, NULL, ge, input, double_scratch2);
    553   __ ldc1(double_scratch1, ExpConstant(3, temp3));
    554   __ ldc1(result, ExpConstant(4, temp3));
    555   __ mul_d(double_scratch1, double_scratch1, input);
    556   __ add_d(double_scratch1, double_scratch1, result);
    557   __ Move(temp2, temp1, double_scratch1);
    558   __ sub_d(double_scratch1, double_scratch1, result);
    559   __ ldc1(result, ExpConstant(6, temp3));
    560   __ ldc1(double_scratch2, ExpConstant(5, temp3));
    561   __ mul_d(double_scratch1, double_scratch1, double_scratch2);
    562   __ sub_d(double_scratch1, double_scratch1, input);
    563   __ sub_d(result, result, double_scratch1);
    564   __ mul_d(input, double_scratch1, double_scratch1);
    565   __ mul_d(result, result, input);
    566   __ srl(temp1, temp2, 11);
    567   __ ldc1(double_scratch2, ExpConstant(7, temp3));
    568   __ mul_d(result, result, double_scratch2);
    569   __ sub_d(result, result, double_scratch1);
    570   __ ldc1(double_scratch2, ExpConstant(8, temp3));
    571   __ add_d(result, result, double_scratch2);
    572   __ li(at, 0x7ff);
    573   __ And(temp2, temp2, at);
    574   __ Addu(temp1, temp1, Operand(0x3ff));
    575   __ sll(temp1, temp1, 20);
    576 
    577   // Must not call ExpConstant() after overwriting temp3!
    578   __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
    579   __ sll(at, temp2, 3);
    580   __ addu(at, at, temp3);
    581   __ lw(at, MemOperand(at));
    582   __ Addu(temp3, temp3, Operand(kPointerSize));
    583   __ sll(temp2, temp2, 3);
    584   __ addu(temp2, temp2, temp3);
    585   __ lw(temp2, MemOperand(temp2));
    586   __ Or(temp1, temp1, temp2);
    587   __ Move(input, at, temp1);
    588   __ mul_d(result, result, input);
    589   __ bind(&done);
    590 }
    591 
    592 
    593 // nop(CODE_AGE_MARKER_NOP)
    594 static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
    595 
    596 static byte* GetNoCodeAgeSequence(uint32_t* length) {
    597   // The sequence of instructions that is patched out for aging code is the
    598   // following boilerplate stack-building prologue that is found in FUNCTIONS
    599   static bool initialized = false;
    600   static uint32_t sequence[kNoCodeAgeSequenceLength];
    601   byte* byte_sequence = reinterpret_cast<byte*>(sequence);
    602   *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
    603   if (!initialized) {
    604     CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
    605     patcher.masm()->Push(ra, fp, cp, a1);
    606     patcher.masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
    607     patcher.masm()->Addu(fp, sp, Operand(2 * kPointerSize));
    608     initialized = true;
    609   }
    610   return byte_sequence;
    611 }
    612 
    613 
    614 bool Code::IsYoungSequence(byte* sequence) {
    615   uint32_t young_length;
    616   byte* young_sequence = GetNoCodeAgeSequence(&young_length);
    617   bool result = !memcmp(sequence, young_sequence, young_length);
    618   ASSERT(result ||
    619          Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
    620   return result;
    621 }
    622 
    623 
    624 void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
    625                                MarkingParity* parity) {
    626   if (IsYoungSequence(sequence)) {
    627     *age = kNoAge;
    628     *parity = NO_MARKING_PARITY;
    629   } else {
    630     Address target_address = Memory::Address_at(
    631         sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
    632     Code* stub = GetCodeFromTargetAddress(target_address);
    633     GetCodeAgeAndParity(stub, age, parity);
    634   }
    635 }
    636 
    637 
    638 void Code::PatchPlatformCodeAge(byte* sequence,
    639                                 Code::Age age,
    640                                 MarkingParity parity) {
    641   uint32_t young_length;
    642   byte* young_sequence = GetNoCodeAgeSequence(&young_length);
    643   if (age == kNoAge) {
    644     CopyBytes(sequence, young_sequence, young_length);
    645     CPU::FlushICache(sequence, young_length);
    646   } else {
    647     Code* stub = GetCodeAgeStub(age, parity);
    648     CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
    649     // Mark this code sequence for FindPlatformCodeAgeSequence()
    650     patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
    651     // Save the function's original return address
    652     // (it will be clobbered by Call(t9))
    653     patcher.masm()->mov(at, ra);
    654     // Load the stub address to t9 and call it
    655     patcher.masm()->li(t9,
    656         Operand(reinterpret_cast<uint32_t>(stub->instruction_start())));
    657     patcher.masm()->Call(t9);
    658     // Record the stub address in the empty space for GetCodeAgeAndParity()
    659     patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
    660   }
    661 }
    662 
    663 
    664 #undef __
    665 
    666 } }  // namespace v8::internal
    667 
    668 #endif  // V8_TARGET_ARCH_MIPS
    669