Home | History | Annotate | Download | only in x64
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "v8.h"
     29 
     30 #if V8_TARGET_ARCH_X64
     31 
     32 #include "codegen.h"
     33 #include "macro-assembler.h"
     34 
     35 namespace v8 {
     36 namespace internal {
     37 
     38 // -------------------------------------------------------------------------
     39 // Platform-specific RuntimeCallHelper functions.
     40 
     41 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
     42   masm->EnterFrame(StackFrame::INTERNAL);
     43   ASSERT(!masm->has_frame());
     44   masm->set_has_frame(true);
     45 }
     46 
     47 
     48 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
     49   masm->LeaveFrame(StackFrame::INTERNAL);
     50   ASSERT(masm->has_frame());
     51   masm->set_has_frame(false);
     52 }
     53 
     54 
     55 #define __ masm.
     56 
     57 
     58 UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
     59   size_t actual_size;
     60   // Allocate buffer in executable space.
     61   byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
     62                                                  &actual_size,
     63                                                  true));
     64   if (buffer == NULL) {
     65     // Fallback to library function if function cannot be created.
     66     switch (type) {
     67       case TranscendentalCache::SIN: return &sin;
     68       case TranscendentalCache::COS: return &cos;
     69       case TranscendentalCache::TAN: return &tan;
     70       case TranscendentalCache::LOG: return &log;
     71       default: UNIMPLEMENTED();
     72     }
     73   }
     74 
     75   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
     76   // xmm0: raw double input.
     77   // Move double input into registers.
     78   __ push(rbx);
     79   __ push(rdi);
     80   __ movq(rbx, xmm0);
     81   __ push(rbx);
     82   __ fld_d(Operand(rsp, 0));
     83   TranscendentalCacheStub::GenerateOperation(&masm, type);
     84   // The return value is expected to be in xmm0.
     85   __ fstp_d(Operand(rsp, 0));
     86   __ pop(rbx);
     87   __ movq(xmm0, rbx);
     88   __ pop(rdi);
     89   __ pop(rbx);
     90   __ Ret();
     91 
     92   CodeDesc desc;
     93   masm.GetCode(&desc);
     94   ASSERT(!RelocInfo::RequiresRelocation(desc));
     95 
     96   CPU::FlushICache(buffer, actual_size);
     97   OS::ProtectCode(buffer, actual_size);
     98   return FUNCTION_CAST<UnaryMathFunction>(buffer);
     99 }
    100 
    101 
    102 UnaryMathFunction CreateExpFunction() {
    103   if (!FLAG_fast_math) return &exp;
    104   size_t actual_size;
    105   byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
    106   if (buffer == NULL) return &exp;
    107   ExternalReference::InitializeMathExpData();
    108 
    109   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
    110   // xmm0: raw double input.
    111   XMMRegister input = xmm0;
    112   XMMRegister result = xmm1;
    113   __ push(rax);
    114   __ push(rbx);
    115 
    116   MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
    117 
    118   __ pop(rbx);
    119   __ pop(rax);
    120   __ movsd(xmm0, result);
    121   __ Ret();
    122 
    123   CodeDesc desc;
    124   masm.GetCode(&desc);
    125   ASSERT(!RelocInfo::RequiresRelocation(desc));
    126 
    127   CPU::FlushICache(buffer, actual_size);
    128   OS::ProtectCode(buffer, actual_size);
    129   return FUNCTION_CAST<UnaryMathFunction>(buffer);
    130 }
    131 
    132 
    133 UnaryMathFunction CreateSqrtFunction() {
    134   size_t actual_size;
    135   // Allocate buffer in executable space.
    136   byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
    137                                                  &actual_size,
    138                                                  true));
    139   if (buffer == NULL) return &sqrt;
    140 
    141   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
    142   // xmm0: raw double input.
    143   // Move double input into registers.
    144   __ sqrtsd(xmm0, xmm0);
    145   __ Ret();
    146 
    147   CodeDesc desc;
    148   masm.GetCode(&desc);
    149   ASSERT(!RelocInfo::RequiresRelocation(desc));
    150 
    151   CPU::FlushICache(buffer, actual_size);
    152   OS::ProtectCode(buffer, actual_size);
    153   return FUNCTION_CAST<UnaryMathFunction>(buffer);
    154 }
    155 
    156 
    157 #ifdef _WIN64
    158 typedef double (*ModuloFunction)(double, double);
    159 // Define custom fmod implementation.
    160 ModuloFunction CreateModuloFunction() {
    161   size_t actual_size;
    162   byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
    163                                                  &actual_size,
    164                                                  true));
    165   CHECK(buffer);
    166   Assembler masm(NULL, buffer, static_cast<int>(actual_size));
    167   // Generated code is put into a fixed, unmovable, buffer, and not into
    168   // the V8 heap. We can't, and don't, refer to any relocatable addresses
    169   // (e.g. the JavaScript nan-object).
    170 
    171   // Windows 64 ABI passes double arguments in xmm0, xmm1 and
    172   // returns result in xmm0.
    173   // Argument backing space is allocated on the stack above
    174   // the return address.
    175 
    176   // Compute x mod y.
    177   // Load y and x (use argument backing store as temporary storage).
    178   __ movsd(Operand(rsp, kPointerSize * 2), xmm1);
    179   __ movsd(Operand(rsp, kPointerSize), xmm0);
    180   __ fld_d(Operand(rsp, kPointerSize * 2));
    181   __ fld_d(Operand(rsp, kPointerSize));
    182 
    183   // Clear exception flags before operation.
    184   {
    185     Label no_exceptions;
    186     __ fwait();
    187     __ fnstsw_ax();
    188     // Clear if Illegal Operand or Zero Division exceptions are set.
    189     __ testb(rax, Immediate(5));
    190     __ j(zero, &no_exceptions);
    191     __ fnclex();
    192     __ bind(&no_exceptions);
    193   }
    194 
    195   // Compute st(0) % st(1)
    196   {
    197     Label partial_remainder_loop;
    198     __ bind(&partial_remainder_loop);
    199     __ fprem();
    200     __ fwait();
    201     __ fnstsw_ax();
    202     __ testl(rax, Immediate(0x400 /* C2 */));
    203     // If C2 is set, computation only has partial result. Loop to
    204     // continue computation.
    205     __ j(not_zero, &partial_remainder_loop);
    206   }
    207 
    208   Label valid_result;
    209   Label return_result;
    210   // If Invalid Operand or Zero Division exceptions are set,
    211   // return NaN.
    212   __ testb(rax, Immediate(5));
    213   __ j(zero, &valid_result);
    214   __ fstp(0);  // Drop result in st(0).
    215   int64_t kNaNValue = V8_INT64_C(0x7ff8000000000000);
    216   __ movq(rcx, kNaNValue);
    217   __ movq(Operand(rsp, kPointerSize), rcx);
    218   __ movsd(xmm0, Operand(rsp, kPointerSize));
    219   __ jmp(&return_result);
    220 
    221   // If result is valid, return that.
    222   __ bind(&valid_result);
    223   __ fstp_d(Operand(rsp, kPointerSize));
    224   __ movsd(xmm0, Operand(rsp, kPointerSize));
    225 
    226   // Clean up FPU stack and exceptions and return xmm0
    227   __ bind(&return_result);
    228   __ fstp(0);  // Unload y.
    229 
    230   Label clear_exceptions;
    231   __ testb(rax, Immediate(0x3f /* Any Exception*/));
    232   __ j(not_zero, &clear_exceptions);
    233   __ ret(0);
    234   __ bind(&clear_exceptions);
    235   __ fnclex();
    236   __ ret(0);
    237 
    238   CodeDesc desc;
    239   masm.GetCode(&desc);
    240   OS::ProtectCode(buffer, actual_size);
    241   // Call the function from C++ through this pointer.
    242   return FUNCTION_CAST<ModuloFunction>(buffer);
    243 }
    244 
    245 #endif
    246 
    247 #undef __
    248 
    249 // -------------------------------------------------------------------------
    250 // Code generators
    251 
    252 #define __ ACCESS_MASM(masm)
    253 
    254 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
    255     MacroAssembler* masm, AllocationSiteMode mode,
    256     Label* allocation_memento_found) {
    257   // ----------- S t a t e -------------
    258   //  -- rax    : value
    259   //  -- rbx    : target map
    260   //  -- rcx    : key
    261   //  -- rdx    : receiver
    262   //  -- rsp[0] : return address
    263   // -----------------------------------
    264   if (mode == TRACK_ALLOCATION_SITE) {
    265     ASSERT(allocation_memento_found != NULL);
    266     __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, allocation_memento_found);
    267   }
    268 
    269   // Set transitioned map.
    270   __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
    271   __ RecordWriteField(rdx,
    272                       HeapObject::kMapOffset,
    273                       rbx,
    274                       rdi,
    275                       kDontSaveFPRegs,
    276                       EMIT_REMEMBERED_SET,
    277                       OMIT_SMI_CHECK);
    278 }
    279 
    280 
    281 void ElementsTransitionGenerator::GenerateSmiToDouble(
    282     MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
    283   // ----------- S t a t e -------------
    284   //  -- rax    : value
    285   //  -- rbx    : target map
    286   //  -- rcx    : key
    287   //  -- rdx    : receiver
    288   //  -- rsp[0] : return address
    289   // -----------------------------------
    290   // The fail label is not actually used since we do not allocate.
    291   Label allocated, new_backing_store, only_change_map, done;
    292 
    293   if (mode == TRACK_ALLOCATION_SITE) {
    294     __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
    295   }
    296 
    297   // Check for empty arrays, which only require a map transition and no changes
    298   // to the backing store.
    299   __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
    300   __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
    301   __ j(equal, &only_change_map);
    302 
    303   // Check backing store for COW-ness.  For COW arrays we have to
    304   // allocate a new backing store.
    305   __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
    306   __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset),
    307                  Heap::kFixedCOWArrayMapRootIndex);
    308   __ j(equal, &new_backing_store);
    309   // Check if the backing store is in new-space. If not, we need to allocate
    310   // a new one since the old one is in pointer-space.
    311   // If in new space, we can reuse the old backing store because it is
    312   // the same size.
    313   __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store);
    314 
    315   __ movq(r14, r8);  // Destination array equals source array.
    316 
    317   // r8 : source FixedArray
    318   // r9 : elements array length
    319   // r14: destination FixedDoubleArray
    320   // Set backing store's map
    321   __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
    322   __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
    323 
    324   __ bind(&allocated);
    325   // Set transitioned map.
    326   __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
    327   __ RecordWriteField(rdx,
    328                       HeapObject::kMapOffset,
    329                       rbx,
    330                       rdi,
    331                       kDontSaveFPRegs,
    332                       EMIT_REMEMBERED_SET,
    333                       OMIT_SMI_CHECK);
    334 
    335   // Convert smis to doubles and holes to hole NaNs.  The Array's length
    336   // remains unchanged.
    337   STATIC_ASSERT(FixedDoubleArray::kLengthOffset == FixedArray::kLengthOffset);
    338   STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
    339 
    340   Label loop, entry, convert_hole;
    341   __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64));
    342   // r15: the-hole NaN
    343   __ jmp(&entry);
    344 
    345   // Allocate new backing store.
    346   __ bind(&new_backing_store);
    347   __ lea(rdi, Operand(r9, times_8, FixedArray::kHeaderSize));
    348   __ Allocate(rdi, r14, r11, r15, fail, TAG_OBJECT);
    349   // Set backing store's map
    350   __ LoadRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
    351   __ movq(FieldOperand(r14, HeapObject::kMapOffset), rdi);
    352   // Set receiver's backing store.
    353   __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r14);
    354   __ movq(r11, r14);
    355   __ RecordWriteField(rdx,
    356                       JSObject::kElementsOffset,
    357                       r11,
    358                       r15,
    359                       kDontSaveFPRegs,
    360                       EMIT_REMEMBERED_SET,
    361                       OMIT_SMI_CHECK);
    362   // Set backing store's length.
    363   __ Integer32ToSmi(r11, r9);
    364   __ movq(FieldOperand(r14, FixedDoubleArray::kLengthOffset), r11);
    365   __ jmp(&allocated);
    366 
    367   __ bind(&only_change_map);
    368   // Set transitioned map.
    369   __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
    370   __ RecordWriteField(rdx,
    371                       HeapObject::kMapOffset,
    372                       rbx,
    373                       rdi,
    374                       kDontSaveFPRegs,
    375                       OMIT_REMEMBERED_SET,
    376                       OMIT_SMI_CHECK);
    377   __ jmp(&done);
    378 
    379   // Conversion loop.
    380   __ bind(&loop);
    381   __ movq(rbx,
    382           FieldOperand(r8, r9, times_pointer_size, FixedArray::kHeaderSize));
    383   // r9 : current element's index
    384   // rbx: current element (smi-tagged)
    385   __ JumpIfNotSmi(rbx, &convert_hole);
    386   __ SmiToInteger32(rbx, rbx);
    387   __ Cvtlsi2sd(xmm0, rbx);
    388   __ movsd(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize),
    389            xmm0);
    390   __ jmp(&entry);
    391   __ bind(&convert_hole);
    392 
    393   if (FLAG_debug_code) {
    394     __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
    395     __ Assert(equal, kObjectFoundInSmiOnlyArray);
    396   }
    397 
    398   __ movq(FieldOperand(r14, r9, times_8, FixedDoubleArray::kHeaderSize), r15);
    399   __ bind(&entry);
    400   __ decq(r9);
    401   __ j(not_sign, &loop);
    402 
    403   __ bind(&done);
    404 }
    405 
    406 
    407 void ElementsTransitionGenerator::GenerateDoubleToObject(
    408     MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
    409   // ----------- S t a t e -------------
    410   //  -- rax    : value
    411   //  -- rbx    : target map
    412   //  -- rcx    : key
    413   //  -- rdx    : receiver
    414   //  -- rsp[0] : return address
    415   // -----------------------------------
    416   Label loop, entry, convert_hole, gc_required, only_change_map;
    417 
    418   if (mode == TRACK_ALLOCATION_SITE) {
    419     __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail);
    420   }
    421 
    422   // Check for empty arrays, which only require a map transition and no changes
    423   // to the backing store.
    424   __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
    425   __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex);
    426   __ j(equal, &only_change_map);
    427 
    428   __ push(rax);
    429 
    430   __ movq(r8, FieldOperand(rdx, JSObject::kElementsOffset));
    431   __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset));
    432   // r8 : source FixedDoubleArray
    433   // r9 : number of elements
    434   __ lea(rdi, Operand(r9, times_pointer_size, FixedArray::kHeaderSize));
    435   __ Allocate(rdi, r11, r14, r15, &gc_required, TAG_OBJECT);
    436   // r11: destination FixedArray
    437   __ LoadRoot(rdi, Heap::kFixedArrayMapRootIndex);
    438   __ movq(FieldOperand(r11, HeapObject::kMapOffset), rdi);
    439   __ Integer32ToSmi(r14, r9);
    440   __ movq(FieldOperand(r11, FixedArray::kLengthOffset), r14);
    441 
    442   // Prepare for conversion loop.
    443   __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64));
    444   __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
    445   // rsi: the-hole NaN
    446   // rdi: pointer to the-hole
    447   __ jmp(&entry);
    448 
    449   // Call into runtime if GC is required.
    450   __ bind(&gc_required);
    451   __ pop(rax);
    452   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
    453   __ jmp(fail);
    454 
    455   // Box doubles into heap numbers.
    456   __ bind(&loop);
    457   __ movq(r14, FieldOperand(r8,
    458                             r9,
    459                             times_8,
    460                             FixedDoubleArray::kHeaderSize));
    461   // r9 : current element's index
    462   // r14: current element
    463   __ cmpq(r14, rsi);
    464   __ j(equal, &convert_hole);
    465 
    466   // Non-hole double, copy value into a heap number.
    467   __ AllocateHeapNumber(rax, r15, &gc_required);
    468   // rax: new heap number
    469   __ MoveDouble(FieldOperand(rax, HeapNumber::kValueOffset), r14);
    470   __ movq(FieldOperand(r11,
    471                        r9,
    472                        times_pointer_size,
    473                        FixedArray::kHeaderSize),
    474           rax);
    475   __ movq(r15, r9);
    476   __ RecordWriteArray(r11,
    477                       rax,
    478                       r15,
    479                       kDontSaveFPRegs,
    480                       EMIT_REMEMBERED_SET,
    481                       OMIT_SMI_CHECK);
    482   __ jmp(&entry, Label::kNear);
    483 
    484   // Replace the-hole NaN with the-hole pointer.
    485   __ bind(&convert_hole);
    486   __ movq(FieldOperand(r11,
    487                        r9,
    488                        times_pointer_size,
    489                        FixedArray::kHeaderSize),
    490           rdi);
    491 
    492   __ bind(&entry);
    493   __ decq(r9);
    494   __ j(not_sign, &loop);
    495 
    496   // Replace receiver's backing store with newly created and filled FixedArray.
    497   __ movq(FieldOperand(rdx, JSObject::kElementsOffset), r11);
    498   __ RecordWriteField(rdx,
    499                       JSObject::kElementsOffset,
    500                       r11,
    501                       r15,
    502                       kDontSaveFPRegs,
    503                       EMIT_REMEMBERED_SET,
    504                       OMIT_SMI_CHECK);
    505   __ pop(rax);
    506   __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
    507 
    508   __ bind(&only_change_map);
    509   // Set transitioned map.
    510   __ movq(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
    511   __ RecordWriteField(rdx,
    512                       HeapObject::kMapOffset,
    513                       rbx,
    514                       rdi,
    515                       kDontSaveFPRegs,
    516                       OMIT_REMEMBERED_SET,
    517                       OMIT_SMI_CHECK);
    518 }
    519 
    520 
    521 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
    522                                        Register string,
    523                                        Register index,
    524                                        Register result,
    525                                        Label* call_runtime) {
    526   // Fetch the instance type of the receiver into result register.
    527   __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
    528   __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
    529 
    530   // We need special handling for indirect strings.
    531   Label check_sequential;
    532   __ testb(result, Immediate(kIsIndirectStringMask));
    533   __ j(zero, &check_sequential, Label::kNear);
    534 
    535   // Dispatch on the indirect string shape: slice or cons.
    536   Label cons_string;
    537   __ testb(result, Immediate(kSlicedNotConsMask));
    538   __ j(zero, &cons_string, Label::kNear);
    539 
    540   // Handle slices.
    541   Label indirect_string_loaded;
    542   __ SmiToInteger32(result, FieldOperand(string, SlicedString::kOffsetOffset));
    543   __ addq(index, result);
    544   __ movq(string, FieldOperand(string, SlicedString::kParentOffset));
    545   __ jmp(&indirect_string_loaded, Label::kNear);
    546 
    547   // Handle cons strings.
    548   // Check whether the right hand side is the empty string (i.e. if
    549   // this is really a flat string in a cons string). If that is not
    550   // the case we would rather go to the runtime system now to flatten
    551   // the string.
    552   __ bind(&cons_string);
    553   __ CompareRoot(FieldOperand(string, ConsString::kSecondOffset),
    554                  Heap::kempty_stringRootIndex);
    555   __ j(not_equal, call_runtime);
    556   __ movq(string, FieldOperand(string, ConsString::kFirstOffset));
    557 
    558   __ bind(&indirect_string_loaded);
    559   __ movq(result, FieldOperand(string, HeapObject::kMapOffset));
    560   __ movzxbl(result, FieldOperand(result, Map::kInstanceTypeOffset));
    561 
    562   // Distinguish sequential and external strings. Only these two string
    563   // representations can reach here (slices and flat cons strings have been
    564   // reduced to the underlying sequential or external string).
    565   Label seq_string;
    566   __ bind(&check_sequential);
    567   STATIC_ASSERT(kSeqStringTag == 0);
    568   __ testb(result, Immediate(kStringRepresentationMask));
    569   __ j(zero, &seq_string, Label::kNear);
    570 
    571   // Handle external strings.
    572   Label ascii_external, done;
    573   if (FLAG_debug_code) {
    574     // Assert that we do not have a cons or slice (indirect strings) here.
    575     // Sequential strings have already been ruled out.
    576     __ testb(result, Immediate(kIsIndirectStringMask));
    577     __ Assert(zero, kExternalStringExpectedButNotFound);
    578   }
    579   // Rule out short external strings.
    580   STATIC_CHECK(kShortExternalStringTag != 0);
    581   __ testb(result, Immediate(kShortExternalStringTag));
    582   __ j(not_zero, call_runtime);
    583   // Check encoding.
    584   STATIC_ASSERT(kTwoByteStringTag == 0);
    585   __ testb(result, Immediate(kStringEncodingMask));
    586   __ movq(result, FieldOperand(string, ExternalString::kResourceDataOffset));
    587   __ j(not_equal, &ascii_external, Label::kNear);
    588   // Two-byte string.
    589   __ movzxwl(result, Operand(result, index, times_2, 0));
    590   __ jmp(&done, Label::kNear);
    591   __ bind(&ascii_external);
    592   // Ascii string.
    593   __ movzxbl(result, Operand(result, index, times_1, 0));
    594   __ jmp(&done, Label::kNear);
    595 
    596   // Dispatch on the encoding: ASCII or two-byte.
    597   Label ascii;
    598   __ bind(&seq_string);
    599   STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
    600   STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
    601   __ testb(result, Immediate(kStringEncodingMask));
    602   __ j(not_zero, &ascii, Label::kNear);
    603 
    604   // Two-byte string.
    605   // Load the two-byte character code into the result register.
    606   STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
    607   __ movzxwl(result, FieldOperand(string,
    608                                   index,
    609                                   times_2,
    610                                   SeqTwoByteString::kHeaderSize));
    611   __ jmp(&done, Label::kNear);
    612 
    613   // ASCII string.
    614   // Load the byte into the result register.
    615   __ bind(&ascii);
    616   __ movzxbl(result, FieldOperand(string,
    617                                   index,
    618                                   times_1,
    619                                   SeqOneByteString::kHeaderSize));
    620   __ bind(&done);
    621 }
    622 
    623 
    624 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
    625                                    XMMRegister input,
    626                                    XMMRegister result,
    627                                    XMMRegister double_scratch,
    628                                    Register temp1,
    629                                    Register temp2) {
    630   ASSERT(!input.is(result));
    631   ASSERT(!input.is(double_scratch));
    632   ASSERT(!result.is(double_scratch));
    633   ASSERT(!temp1.is(temp2));
    634   ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
    635 
    636   Label done;
    637 
    638   __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
    639   __ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
    640   __ xorpd(result, result);
    641   __ ucomisd(double_scratch, input);
    642   __ j(above_equal, &done);
    643   __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
    644   __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
    645   __ j(above_equal, &done);
    646   __ movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
    647   __ movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
    648   __ mulsd(double_scratch, input);
    649   __ addsd(double_scratch, result);
    650   __ movq(temp2, double_scratch);
    651   __ subsd(double_scratch, result);
    652   __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
    653   __ lea(temp1, Operand(temp2, 0x1ff800));
    654   __ and_(temp2, Immediate(0x7ff));
    655   __ shr(temp1, Immediate(11));
    656   __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
    657   __ Move(kScratchRegister, ExternalReference::math_exp_log_table());
    658   __ shl(temp1, Immediate(52));
    659   __ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0));
    660   __ Move(kScratchRegister, ExternalReference::math_exp_constants(0));
    661   __ subsd(double_scratch, input);
    662   __ movsd(input, double_scratch);
    663   __ subsd(result, double_scratch);
    664   __ mulsd(input, double_scratch);
    665   __ mulsd(result, input);
    666   __ movq(input, temp1);
    667   __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
    668   __ subsd(result, double_scratch);
    669   __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
    670   __ mulsd(result, input);
    671 
    672   __ bind(&done);
    673 }
    674 
    675 #undef __
    676 
    677 
    678 static byte* GetNoCodeAgeSequence(uint32_t* length) {
    679   static bool initialized = false;
    680   static byte sequence[kNoCodeAgeSequenceLength];
    681   *length = kNoCodeAgeSequenceLength;
    682   if (!initialized) {
    683     // The sequence of instructions that is patched out for aging code is the
    684     // following boilerplate stack-building prologue that is found both in
    685     // FUNCTION and OPTIMIZED_FUNCTION code:
    686     CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
    687     patcher.masm()->push(rbp);
    688     patcher.masm()->movq(rbp, rsp);
    689     patcher.masm()->push(rsi);
    690     patcher.masm()->push(rdi);
    691     initialized = true;
    692   }
    693   return sequence;
    694 }
    695 
    696 
    697 bool Code::IsYoungSequence(byte* sequence) {
    698   uint32_t young_length;
    699   byte* young_sequence = GetNoCodeAgeSequence(&young_length);
    700   bool result = (!memcmp(sequence, young_sequence, young_length));
    701   ASSERT(result || *sequence == kCallOpcode);
    702   return result;
    703 }
    704 
    705 
    706 void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
    707                                MarkingParity* parity) {
    708   if (IsYoungSequence(sequence)) {
    709     *age = kNoAgeCodeAge;
    710     *parity = NO_MARKING_PARITY;
    711   } else {
    712     sequence++;  // Skip the kCallOpcode byte
    713     Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
    714         Assembler::kCallTargetAddressOffset;
    715     Code* stub = GetCodeFromTargetAddress(target_address);
    716     GetCodeAgeAndParity(stub, age, parity);
    717   }
    718 }
    719 
    720 
    721 void Code::PatchPlatformCodeAge(Isolate* isolate,
    722                                 byte* sequence,
    723                                 Code::Age age,
    724                                 MarkingParity parity) {
    725   uint32_t young_length;
    726   byte* young_sequence = GetNoCodeAgeSequence(&young_length);
    727   if (age == kNoAgeCodeAge) {
    728     CopyBytes(sequence, young_sequence, young_length);
    729     CPU::FlushICache(sequence, young_length);
    730   } else {
    731     Code* stub = GetCodeAgeStub(isolate, age, parity);
    732     CodePatcher patcher(sequence, young_length);
    733     patcher.masm()->call(stub->instruction_start());
    734     patcher.masm()->Nop(
    735         kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength);
    736   }
    737 }
    738 
    739 
    740 Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
    741   ASSERT(index >= 0);
    742   int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
    743   int displacement_to_last_argument = base_reg_.is(rsp) ?
    744       kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
    745   displacement_to_last_argument += extra_displacement_to_last_argument_;
    746   if (argument_count_reg_.is(no_reg)) {
    747     // argument[0] is at base_reg_ + displacement_to_last_argument +
    748     // (argument_count_immediate_ + receiver - 1) * kPointerSize.
    749     ASSERT(argument_count_immediate_ + receiver > 0);
    750     return Operand(base_reg_, displacement_to_last_argument +
    751         (argument_count_immediate_ + receiver - 1 - index) * kPointerSize);
    752   } else {
    753     // argument[0] is at base_reg_ + displacement_to_last_argument +
    754     // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize.
    755     return Operand(base_reg_, argument_count_reg_, times_pointer_size,
    756         displacement_to_last_argument + (receiver - 1 - index) * kPointerSize);
    757   }
    758 }
    759 
    760 
    761 } }  // namespace v8::internal
    762 
    763 #endif  // V8_TARGET_ARCH_X64
    764