Home | History | Annotate | Download | only in x64
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "v8.h"
     29 
     30 #if V8_TARGET_ARCH_X64
     31 
     32 #include "codegen.h"
     33 #include "macro-assembler.h"
     34 
     35 namespace v8 {
     36 namespace internal {
     37 
     38 // -------------------------------------------------------------------------
     39 // Platform-specific RuntimeCallHelper functions.
     40 
     41 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
     42   masm->EnterFrame(StackFrame::INTERNAL);
     43   ASSERT(!masm->has_frame());
     44   masm->set_has_frame(true);
     45 }
     46 
     47 
     48 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
     49   masm->LeaveFrame(StackFrame::INTERNAL);
     50   ASSERT(masm->has_frame());
     51   masm->set_has_frame(false);
     52 }
     53 
     54 
     55 #define __ masm.
     56 
     57 
     58 UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
     59   size_t actual_size;
     60   // Allocate buffer in executable space.
     61   byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
     62                                                  &actual_size,
     63                                                  true));
     64   if (buffer == NULL) {
     65     // Fallback to library function if function cannot be created.
     66     switch (type) {
     67       case TranscendentalCache::SIN: return &sin;
     68       case TranscendentalCache::COS: return &cos;
     69       case TranscendentalCache::TAN: return &tan;
     70       case TranscendentalCache::LOG: return &log;
     71       default: UNIMPLEMENTED();
     72     }
     73   }
     74 
     75   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
     76   // xmm0: raw double input.
     77   // Move double input into registers.
     78   __ push(rbx);
     79   __ push(rdi);
     80   __ movq(rbx, xmm0);
     81   __ push(rbx);
     82   __ fld_d(Operand(rsp, 0));
     83   TranscendentalCacheStub::GenerateOperation(&masm, type);
     84   // The return value is expected to be in xmm0.
     85   __ fstp_d(Operand(rsp, 0));
     86   __ pop(rbx);
     87   __ movq(xmm0, rbx);
     88   __ pop(rdi);
     89   __ pop(rbx);
     90   __ Ret();
     91 
     92   CodeDesc desc;
     93   masm.GetCode(&desc);
     94   ASSERT(!RelocInfo::RequiresRelocation(desc));
     95 
     96   CPU::FlushICache(buffer, actual_size);
     97   OS::ProtectCode(buffer, actual_size);
     98   return FUNCTION_CAST<UnaryMathFunction>(buffer);
     99 }
    100 
    101 
    102 UnaryMathFunction CreateExpFunction() {
    103   if (!FLAG_fast_math) return &exp;
    104   size_t actual_size;
    105   byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
    106   if (buffer == NULL) return &exp;
    107   ExternalReference::InitializeMathExpData();
    108 
    109   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
    110   // xmm0: raw double input.
    111   XMMRegister input = xmm0;
    112   XMMRegister result = xmm1;
    113   __ push(rax);
    114   __ push(rbx);
    115 
    116   MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
    117 
    118   __ pop(rbx);
    119   __ pop(rax);
    120   __ movsd(xmm0, result);
    121   __ Ret();
    122 
    123   CodeDesc desc;
    124   masm.GetCode(&desc);
    125   ASSERT(!RelocInfo::RequiresRelocation(desc));
    126 
    127   CPU::FlushICache(buffer, actual_size);
    128   OS::ProtectCode(buffer, actual_size);
    129   return FUNCTION_CAST<UnaryMathFunction>(buffer);
    130 }
    131 
    132 
    133 UnaryMathFunction CreateSqrtFunction() {
    134   size_t actual_size;
    135   // Allocate buffer in executable space.
    136   byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
    137                                                  &actual_size,
    138                                                  true));
    139   if (buffer == NULL) return &sqrt;
    140 
    141   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
    142   // xmm0: raw double input.
    143   // Move double input into registers.
    144   __ sqrtsd(xmm0, xmm0);
    145   __ Ret();
    146 
    147   CodeDesc desc;
    148   masm.GetCode(&desc);
    149   ASSERT(!RelocInfo::RequiresRelocation(desc));
    150 
    151   CPU::FlushICache(buffer, actual_size);
    152   OS::ProtectCode(buffer, actual_size);
    153   return FUNCTION_CAST<UnaryMathFunction>(buffer);
    154 }
    155 
    156 
    157 #ifdef _WIN64
    158 typedef double (*ModuloFunction)(double, double);
    159 // Define custom fmod implementation.
    160 ModuloFunction CreateModuloFunction() {
    161   size_t actual_size;
    162   byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
    163                                                  &actual_size,
    164                                                  true));
    165   CHECK(buffer);
    166   Assembler masm(NULL, buffer, static_cast<int>(actual_size));
    167   // Generated code is put into a fixed, unmovable, buffer, and not into
    168   // the V8 heap. We can't, and don't, refer to any relocatable addresses
    169   // (e.g. the JavaScript nan-object).
    170 
    171   // Windows 64 ABI passes double arguments in xmm0, xmm1 and
    172   // returns result in xmm0.
    173   // Argument backing space is allocated on the stack above
    174   // the return address.
    175 
    176   // Compute x mod y.
    177   // Load y and x (use argument backing store as temporary storage).
    178   __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
    179   __ movsd(Operand(rsp, kPointerSize), xmm0);
    180   __ fld_d(Operand(rsp, kPointerSize * 2));
    181   __ fld_d(Operand(rsp, kPointerSize));
    182 
    183   // Clear exception flags before operation.
    184   {
    185     Label no_exceptions;
    186     __ fwait();
    187     __ fnstsw_ax();
    188     // Clear if Illegal Operand or Zero Division exceptions are set.
    189     __ testb(rax, Immediate(5));
    190     __ j(zero, &no_exceptions);
    191     __ fnclex();
    192     __ bind(&no_exceptions);
    193   }
    194 
    195   // Compute st(0) % st(1)
    196   {
    197     Label partial_remainder_loop;
    198     __ bind(&partial_remainder_loop);
    199     __ fprem();
    200     __ fwait();
    201     __ fnstsw_ax();
    202     __ testl(rax, Immediate(0x400 /* C2 */));
    203     // If C2 is set, computation only has partial result. Loop to
    204     // continue computation.
    205     __ j(not_zero, &partial_remainder_loop);
    206   }
    207 
    208   Label valid_result;
    209   Label return_result;
    210   // If Invalid Operand or Zero Division exceptions are set,
    211   // return NaN.
    212   __ testb(rax, Immediate(5));
    213   __ j(zero, &valid_result);
    214   __ fstp(0);  // Drop result in st(0).
    215   int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
    216   __ movq(rcx, kNaNValue, RelocInfo::NONE64);
    217   __ movq(Operand(rsp, kPointerSize), rcx);
    218   __ movsd(xmm0, Operand(rsp, kPointerSize));
    219   __ jmp(&return_result);
    220 
    221   // If result is valid, return that.
    222   __ bind(&valid_result);
    223   __ fstp_d(Operand(rsp, kPointerSize));
    224   __ movsd(xmm0, Operand(rsp, kPointerSize));
    225 
    226   // Clean up FPU stack and exceptions and return xmm0
    227   __ bind(&return_result);
    228   __ fstp(0);  // Unload y.
    229 
    230   Label clear_exceptions;
    231   __ testb(rax, Immediate(0x3f /* Any Exception*/));
    232   __ j(not_zero, &clear_exceptions);
    233   __ ret(0);
    234   __ bind(&clear_exceptions);
    235   __ fnclex();
    236   __ ret(0);
    237 
    238   CodeDesc desc;
    239   masm.GetCode(&desc);
    240   OS::ProtectCode(buffer, actual_size);
    241   // Call the function from C++ through this pointer.
    242   return FUNCTION_CAST<ModuloFunction>(buffer);
    243 }
    244 
    245 #endif
    246 
    247 #undef __
    248 
    249 // -------------------------------------------------------------------------
    250 // Code generators
    251 
    252 #define __ ACCESS_MASM(masm)
    253 
    254 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
    255     MacroAssembler* masm, AllocationSiteMode mode,
    256     Label* allocation_memento_found) {
    257   // ----------- S t a t e -------------
    258   //  -- rax    : value
    259   //  -- rbx    : target map
    260   //  -- rcx    : key
    261   //  -- rdx    : receiver
    262   //  -- rsp[0] : return address
    263   // -----------------------------------
    264   if (mode == TRACK_ALLOCATION_SITE) {
    265     ASSERT(allocation_memento_found != NULL);
    266     __ TestJSArrayForAllocationMemento(rdx, rdi);
    267     __ j(equal, allocation_memento_found);
    268   }
    269 
    270   // Set transitioned map.
    271   __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
    272   __ RecordWriteField(rdx,
    273                       HeapObject::kMapOffset,
    274                       rbx,
    275                       rdi,
    276                       kDontSaveFPRegs,
    277                       EMIT_REMEMBERED_SET,
    278                       OMIT_SMI_CHECK);
    279 }
    280 
    281 
    282 void ElementsTransitionGenerator::GenerateSmiToDouble(
    283     MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
    284   // ----------- S t a t e -------------
    285   //  -- rax    : value
    286   //  -- rbx    : target map
    287   //  -- rcx    : key
    288   //  -- rdx    : receiver
    289   //  -- rsp[0] : return address
    290   // -----------------------------------
    291   // The fail label is not actually used since we do not allocate.
    292   Label allocated, new_backing_store, only_change_map, done;
    293 
    294   if (mode == TRACK_ALLOCATION_SITE) {
    295     __ TestJSArrayForAllocationMemento(rdx, rdi);
    296     __ j(equal, fail);
    297   }
    298 
    299   // Check for empty arrays, which only require a map transition and no changes
    300   // to the backing store.
    301   __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
    302   __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
    303   __ j(equal, &only_change_map);
    304 
    305   // Check backing store for COW-ness.  For COW arrays we have to
    306   // allocate a new backing store.
    307   __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
    308   __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
    309                  Heap::kFixedCOWArrayMapRootIndex);
    310   __ j(equal, &new_backing_store);
    311   // Check if the backing store is in new-space. If not, we need to allocate
    312   // a new one since the old one is in pointer-space.
    313   // If in new space, we can reuse the old backing store because it is
    314   // the same size.
    315   __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
    316 
    317   __ movq(r14, r8);  // Destination array equals source array.
    318 
    319   // r8 : source FixedArray
    320   // r9 : elements array length
    321   // r14: destination FixedDoubleArray
    322   // Set backing store's map
    323   __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
    324   __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
    325 
    326   __ bind(&allocated);
    327   // Set transitioned map.
    328   __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
    329   __ RecordWriteField(rdx,
    330                       HeapObject::kMapOffset,
    331                       rbx,
    332                       rdi,
    333                       kDontSaveFPRegs,
    334                       EMIT_REMEMBERED_SET,
    335                       OMIT_SMI_CHECK);
    336 
    337   // Convert smis to doubles and holes to hole NaNs.  The Array's length
    338   // remains unchanged.
    339   STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
    340   STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
    341 
    342   Label loop, entry, convert_hole;
    343   __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
    344   // r15: the-hole NaN
    345   __ jmp(&entry);
    346 
    347   // Allocate new backing store.
    348   __ bind(&new_backing_store);
    349   __ lea(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
    350   __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
    351   // Set backing store's map
    352   __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
    353   __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
    354   // Set receiver's backing store.
    355   __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r14);
    356   __ movq(r11, r14);
    357   __ RecordWriteField(rdx,
    358                       JSObject::kElementsOffset,
    359                       r11,
    360                       r15,
    361                       kDontSaveFPRegs,
    362                       EMIT_REMEMBERED_SET,
    363                       OMIT_SMI_CHECK);
    364   // Set backing store's length.
    365   __ Integer32ToSmi(r11, r9);
    366   __ movq(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
    367   __ jmp(&allocated);
    368 
    369   __ bind(&only_change_map);
    370   // Set transitioned map.
    371   __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
    372   __ RecordWriteField(rdx,
    373                       HeapObject::kMapOffset,
    374                       rbx,
    375                       rdi,
    376                       kDontSaveFPRegs,
    377                       OMIT_REMEMBERED_SET,
    378                       OMIT_SMI_CHECK);
    379   __ jmp(&done);
    380 
    381   // Conversion loop.
    382   __ bind(&loop);
    383   __ movq(rbx,
    384           FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize));
    385   // r9 : current element's index
    386   // rbx: current element (smi-tagged)
    387   __ JumpIfNotSmi(rbx, &convert_hole);
    388   __ SmiToInteger32(rbx, rbx);
    389   __ cvtlsi2sd(xmm0, rbx);
    390   __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
    391            xmm0);
    392   __ jmp(&entry);
    393   __ bind(&convert_hole);
    394 
    395   if (FLAG_debug_code) {
    396     __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
    397     __ Assert(equal, kObjectFoundInSmiOnlyArray);
    398   }
    399 
    400   __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
    401   __ bind(&entry);
    402   __ decq(r9);
    403   __ j(not_sign, &loop);
    404 
    405   __ bind(&done);
    406 }
    407 
    408 
    409 void ElementsTransitionGenerator::GenerateDoubleToObject(
    410     MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
    411   // ----------- S t a t e -------------
    412   //  -- rax    : value
    413   //  -- rbx    : target map
    414   //  -- rcx    : key
    415   //  -- rdx    : receiver
    416   //  -- rsp[0] : return address
    417   // -----------------------------------
    418   Label loop, entry, convert_hole, gc_required, only_change_map;
    419 
    420   if (mode == TRACK_ALLOCATION_SITE) {
    421     __ TestJSArrayForAllocationMemento(rdx, rdi);
    422     __ j(equal, fail);
    423   }
    424 
    425   // Check for empty arrays, which only require a map transition and no changes
    426   // to the backing store.
    427   __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
    428   __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
    429   __ j(equal, &only_change_map);
    430 
    431   __ push(rax);
    432 
    433   __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
    434   __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
    435   // r8 : source FixedDoubleArray
    436   // r9 : number of elements
    437   __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
    438   __ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
    439   // r11: destination FixedArray
    440   __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
    441   __ movq(FieldOperand(r11, HeapObject::kMapOffset), rdi);
    442   __ Integer32ToSmi(r14, r9);
    443   __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
    444 
    445   // Prepare for conversion loop.
    446   __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE64);
    447   __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
    448   // rsi: the-hole NaN
    449   // rdi: pointer to the-hole
    450   __ jmp(&entry);
    451 
    452   // Call into runtime if GC is required.
    453   __ bind(&gc_required);
    454   __ pop(rax);
    455   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
    456   __ jmp(fail);
    457 
    458   // Box doubles into heap numbers.
    459   __ bind(&loop);
    460   __ movq(r14, FieldOperand(r8,
    461                             r9,
    462                             times_8,
    463                             FixedDoubleArray::kHeaderSize));
    464   // r9 : current element's index
    465   // r14: current element
    466   __ cmpq(r14, rsi);
    467   __ j(equal, &convert_hole);
    468 
    469   // Non-hole double, copy value into a heap number.
    470   __ AllocateHeapNumber(rax, r15, &gc_required);
    471   // rax: new heap number
    472   __ movq(FieldOperand(rax, HeapNumber::kValueOffset), r14);
    473   __ movq(FieldOperand(r11,
    474                        r9,
    475                        times_pointer_size,
    476                        FixedArray::kHeaderSize),
    477           rax);
    478   __ movq(r15, r9);
    479   __ RecordWriteArray(r11,
    480                       rax,
    481                       r15,
    482                       kDontSaveFPRegs,
    483                       EMIT_REMEMBERED_SET,
    484                       OMIT_SMI_CHECK);
    485   __ jmp(&entry, Label::kNear);
    486 
    487   // Replace the-hole NaN with the-hole pointer.
    488   __ bind(&convert_hole);
    489   __ movq(FieldOperand(r11,
    490                        r9,
    491                        times_pointer_size,
    492                        FixedArray::kHeaderSize),
    493           rdi);
    494 
    495   __ bind(&entry);
    496   __ decq(r9);
    497   __ j(not_sign, &loop);
    498 
    499   // Replace receiver's backing store with newly created and filled FixedArray.
    500   __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r11);
    501   __ RecordWriteField(rdx,
    502                       JSObject::kElementsOffset,
    503                       r11,
    504                       r15,
    505                       kDontSaveFPRegs,
    506                       EMIT_REMEMBERED_SET,
    507                       OMIT_SMI_CHECK);
    508   __ pop(rax);
    509   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
    510 
    511   __ bind(&only_change_map);
    512   // Set transitioned map.
    513   __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
    514   __ RecordWriteField(rdx,
    515                       HeapObject::kMapOffset,
    516                       rbx,
    517                       rdi,
    518                       kDontSaveFPRegs,
    519                       OMIT_REMEMBERED_SET,
    520                       OMIT_SMI_CHECK);
    521 }
    522 
    523 
    524 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
    525                                        Register string,
    526                                        Register index,
    527                                        Register result,
    528                                        Label* call_runtime) {
    529   // Fetch the instance type of the receiver into result register.
    530   __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
    531   __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
    532 
    533   // We need special handling for indirect strings.
    534   Label check_sequential;
    535   __ testb(result, Immediate(kIsIndirectStringMask));
    536   __ j(zero, &check_sequential, Label::kNear);
    537 
    538   // Dispatch on the indirect string shape: slice or cons.
    539   Label cons_string;
    540   __ testb(result, Immediate(kSlicedNotConsMask));
    541   __ j(zero, &cons_string, Label::kNear);
    542 
    543   // Handle slices.
    544   Label indirect_string_loaded;
    545   __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
    546   __ addq(index, result);
    547   __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
    548   __ jmp(&indirect_string_loaded, Label::kNear);
    549 
    550   // Handle cons strings.
    551   // Check whether the right hand side is the empty string (i.e. if
    552   // this is really a flat string in a cons string). If that is not
    553   // the case we would rather go to the runtime system now to flatten
    554   // the string.
    555   __ bind(&cons_string);
    556   __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
    557                  Heap::kempty_stringRootIndex);
    558   __ j(not_equal, call_runtime);
    559   __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
    560 
    561   __ bind(&indirect_string_loaded);
    562   __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
    563   __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
    564 
    565   // Distinguish sequential and external strings. Only these two string
    566   // representations can reach here (slices and flat cons strings have been
    567   // reduced to the underlying sequential or external string).
    568   Label seq_string;
    569   __ bind(&check_sequential);
    570   STATIC_ASSERT(kSeqStringTag == 0);
    571   __ testb(result, Immediate(kStringRepresentationMask));
    572   __ j(zero, &seq_string, Label::kNear);
    573 
    574   // Handle external strings.
    575   Label ascii_external, done;
    576   if (FLAG_debug_code) {
    577     // Assert that we do not have a cons or slice (indirect strings) here.
    578     // Sequential strings have already been ruled out.
    579     __ testb(result, Immediate(kIsIndirectStringMask));
    580     __ Assert(zero, kExternalStringExpectedButNotFound);
    581   }
    582   // Rule out short external strings.
    583   STATIC_CHECK(kShortExternalStringTag != 0);
    584   __ testb(result, Immediate(kShortExternalStringTag));
    585   __ j(not_zero, call_runtime);
    586   // Check encoding.
    587   STATIC_ASSERT(kTwoByteStringTag == 0);
    588   __ testb(result, Immediate(kStringEncodingMask));
    589   __ movq(result, FieldOperand(string, ExternalString::kResourceDataOffset));
    590   __ j(not_equal, &ascii_external, Label::kNear);
    591   // Two-byte string.
    592   __ movzxwl(result, Operand(result, index, times_2, 0));
    593   __ jmp(&done, Label::kNear);
    594   __ bind(&ascii_external);
    595   // Ascii string.
    596   __ movzxbl(result, Operand(result, index, times_1, 0));
    597   __ jmp(&done, Label::kNear);
    598 
    599   // Dispatch on the encoding: ASCII or two-byte.
    600   Label ascii;
    601   __ bind(&seq_string);
    602   STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
    603   STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
    604   __ testb(result, Immediate(kStringEncodingMask));
    605   __ j(not_zero, &ascii, Label::kNear);
    606 
    607   // Two-byte string.
    608   // Load the two-byte character code into the result register.
    609   STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
    610   __ movzxwl(result, FieldOperand(string,
    611                                   index,
    612                                   times_2,
    613                                   SeqTwoByteString::kHeaderSize));
    614   __ jmp(&done, Label::kNear);
    615 
    616   // ASCII string.
    617   // Load the byte into the result register.
    618   __ bind(&ascii);
    619   __ movzxbl(result, FieldOperand(string,
    620                                   index,
    621                                   times_1,
    622                                   SeqOneByteString::kHeaderSize));
    623   __ bind(&done);
    624 }
    625 
    626 
    627 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
    628                                    XMMRegister input,
    629                                    XMMRegister result,
    630                                    XMMRegister double_scratch,
    631                                    Register temp1,
    632                                    Register temp2) {
    633   ASSERT(!input.is(result));
    634   ASSERT(!input.is(double_scratch));
    635   ASSERT(!result.is(double_scratch));
    636   ASSERT(!temp1.is(temp2));
    637   ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
    638 
    639   Label done;
    640 
    641   __ movq(kScratchRegister, ExternalReference::math_exp_constants(0));
    642   __ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
    643   __ xorpd(result, result);
    644   __ ucomisd(double_scratch, input);
    645   __ j(above_equal, &done);
    646   __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
    647   __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
    648   __ j(above_equal, &done);
    649   __ movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
    650   __ movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
    651   __ mulsd(double_scratch, input);
    652   __ addsd(double_scratch, result);
    653   __ movq(temp2, double_scratch);
    654   __ subsd(double_scratch, result);
    655   __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
    656   __ lea(temp1, Operand(temp2, 0x1ff800));
    657   __ and_(temp2, Immediate(0x7ff));
    658   __ shr(temp1, Immediate(11));
    659   __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
    660   __ movq(kScratchRegister, ExternalReference::math_exp_log_table());
    661   __ shl(temp1, Immediate(52));
    662   __ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0));
    663   __ movq(kScratchRegister, ExternalReference::math_exp_constants(0));
    664   __ subsd(double_scratch, input);
    665   __ movsd(input, double_scratch);
    666   __ subsd(result, double_scratch);
    667   __ mulsd(input, double_scratch);
    668   __ mulsd(result, input);
    669   __ movq(input, temp1);
    670   __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
    671   __ subsd(result, double_scratch);
    672   __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
    673   __ mulsd(result, input);
    674 
    675   __ bind(&done);
    676 }
    677 
    678 #undef __
    679 
    680 
    681 static const int kNoCodeAgeSequenceLength = 6;
    682 
    683 static byte* GetNoCodeAgeSequence(uint32_t* length) {
    684   static bool initialized = false;
    685   static byte sequence[kNoCodeAgeSequenceLength];
    686   *length = kNoCodeAgeSequenceLength;
    687   if (!initialized) {
    688     // The sequence of instructions that is patched out for aging code is the
    689     // following boilerplate stack-building prologue that is found both in
    690     // FUNCTION and OPTIMIZED_FUNCTION code:
    691     CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
    692     patcher.masm()->push(rbp);
    693     patcher.masm()->movq(rbp, rsp);
    694     patcher.masm()->push(rsi);
    695     patcher.masm()->push(rdi);
    696     initialized = true;
    697   }
    698   return sequence;
    699 }
    700 
    701 
    702 bool Code::IsYoungSequence(byte* sequence) {
    703   uint32_t young_length;
    704   byte* young_sequence = GetNoCodeAgeSequence(&young_length);
    705   bool result = (!memcmp(sequence, young_sequence, young_length));
    706   ASSERT(result || *sequence == kCallOpcode);
    707   return result;
    708 }
    709 
    710 
    711 void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
    712                                MarkingParity* parity) {
    713   if (IsYoungSequence(sequence)) {
    714     *age = kNoAge;
    715     *parity = NO_MARKING_PARITY;
    716   } else {
    717     sequence++;  // Skip the kCallOpcode byte
    718     Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
    719         Assembler::kCallTargetAddressOffset;
    720     Code* stub = GetCodeFromTargetAddress(target_address);
    721     GetCodeAgeAndParity(stub, age, parity);
    722   }
    723 }
    724 
    725 
    726 void Code::PatchPlatformCodeAge(byte* sequence,
    727                                 Code::Age age,
    728                                 MarkingParity parity) {
    729   uint32_t young_length;
    730   byte* young_sequence = GetNoCodeAgeSequence(&young_length);
    731   if (age == kNoAge) {
    732     CopyBytes(sequence, young_sequence, young_length);
    733     CPU::FlushICache(sequence, young_length);
    734   } else {
    735     Code* stub = GetCodeAgeStub(age, parity);
    736     CodePatcher patcher(sequence, young_length);
    737     patcher.masm()->call(stub->instruction_start());
    738     for (int i = 0;
    739          i < kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength;
    740          i++) {
    741       patcher.masm()->nop();
    742     }
    743   }
    744 }
    745 
    746 
    747 } }  // namespace v8::internal
    748 
    749 #endif  // V8_TARGET_ARCH_X64
    750