Home | History | Annotate | Download | only in ia32
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/v8.h"
      6 
      7 #if V8_TARGET_ARCH_IA32
      8 
      9 #include "src/codegen.h"
     10 #include "src/heap.h"
     11 #include "src/macro-assembler.h"
     12 
     13 namespace v8 {
     14 namespace internal {
     15 
     16 
     17 // -------------------------------------------------------------------------
     18 // Platform-specific RuntimeCallHelper functions.
     19 
     20 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
     21   masm->EnterFrame(StackFrame::INTERNAL);
     22   ASSERT(!masm->has_frame());
     23   masm->set_has_frame(true);
     24 }
     25 
     26 
     27 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
     28   masm->LeaveFrame(StackFrame::INTERNAL);
     29   ASSERT(masm->has_frame());
     30   masm->set_has_frame(false);
     31 }
     32 
     33 
     34 #define __ masm.
     35 
     36 
     37 UnaryMathFunction CreateExpFunction() {
     38   if (!FLAG_fast_math) return &std::exp;
     39   size_t actual_size;
     40   byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
     41   if (buffer == NULL) return &std::exp;
     42   ExternalReference::InitializeMathExpData();
     43 
     44   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
     45   // esp[1 * kPointerSize]: raw double input
     46   // esp[0 * kPointerSize]: return address
     47   {
     48     XMMRegister input = xmm1;
     49     XMMRegister result = xmm2;
     50     __ movsd(input, Operand(esp, 1 * kPointerSize));
     51     __ push(eax);
     52     __ push(ebx);
     53 
     54     MathExpGenerator::EmitMathExp(&masm, input, result, xmm0, eax, ebx);
     55 
     56     __ pop(ebx);
     57     __ pop(eax);
     58     __ movsd(Operand(esp, 1 * kPointerSize), result);
     59     __ fld_d(Operand(esp, 1 * kPointerSize));
     60     __ Ret();
     61   }
     62 
     63   CodeDesc desc;
     64   masm.GetCode(&desc);
     65   ASSERT(!RelocInfo::RequiresRelocation(desc));
     66 
     67   CPU::FlushICache(buffer, actual_size);
     68   OS::ProtectCode(buffer, actual_size);
     69   return FUNCTION_CAST<UnaryMathFunction>(buffer);
     70 }
     71 
     72 
     73 UnaryMathFunction CreateSqrtFunction() {
     74   size_t actual_size;
     75   // Allocate buffer in executable space.
     76   byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
     77                                                  &actual_size,
     78                                                  true));
     79   if (buffer == NULL) return &std::sqrt;
     80   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
     81   // esp[1 * kPointerSize]: raw double input
     82   // esp[0 * kPointerSize]: return address
     83   // Move double input into registers.
     84   {
     85     __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
     86     __ sqrtsd(xmm0, xmm0);
     87     __ movsd(Operand(esp, 1 * kPointerSize), xmm0);
     88     // Load result into floating point register as return value.
     89     __ fld_d(Operand(esp, 1 * kPointerSize));
     90     __ Ret();
     91   }
     92 
     93   CodeDesc desc;
     94   masm.GetCode(&desc);
     95   ASSERT(!RelocInfo::RequiresRelocation(desc));
     96 
     97   CPU::FlushICache(buffer, actual_size);
     98   OS::ProtectCode(buffer, actual_size);
     99   return FUNCTION_CAST<UnaryMathFunction>(buffer);
    100 }
    101 
    102 
    103 // Helper functions for CreateMemMoveFunction.
    104 #undef __
    105 #define __ ACCESS_MASM(masm)
    106 
    107 enum Direction { FORWARD, BACKWARD };
    108 enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
    109 
    110 // Expects registers:
    111 // esi - source, aligned if alignment == ALIGNED
    112 // edi - destination, always aligned
    113 // ecx - count (copy size in bytes)
    114 // edx - loop count (number of 64 byte chunks)
    115 void MemMoveEmitMainLoop(MacroAssembler* masm,
    116                          Label* move_last_15,
    117                          Direction direction,
    118                          Alignment alignment) {
    119   Register src = esi;
    120   Register dst = edi;
    121   Register count = ecx;
    122   Register loop_count = edx;
    123   Label loop, move_last_31, move_last_63;
    124   __ cmp(loop_count, 0);
    125   __ j(equal, &move_last_63);
    126   __ bind(&loop);
    127   // Main loop. Copy in 64 byte chunks.
    128   if (direction == BACKWARD) __ sub(src, Immediate(0x40));
    129   __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
    130   __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
    131   __ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
    132   __ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
    133   if (direction == FORWARD) __ add(src, Immediate(0x40));
    134   if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
    135   __ movdqa(Operand(dst, 0x00), xmm0);
    136   __ movdqa(Operand(dst, 0x10), xmm1);
    137   __ movdqa(Operand(dst, 0x20), xmm2);
    138   __ movdqa(Operand(dst, 0x30), xmm3);
    139   if (direction == FORWARD) __ add(dst, Immediate(0x40));
    140   __ dec(loop_count);
    141   __ j(not_zero, &loop);
    142   // At most 63 bytes left to copy.
    143   __ bind(&move_last_63);
    144   __ test(count, Immediate(0x20));
    145   __ j(zero, &move_last_31);
    146   if (direction == BACKWARD) __ sub(src, Immediate(0x20));
    147   __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
    148   __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
    149   if (direction == FORWARD) __ add(src, Immediate(0x20));
    150   if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
    151   __ movdqa(Operand(dst, 0x00), xmm0);
    152   __ movdqa(Operand(dst, 0x10), xmm1);
    153   if (direction == FORWARD) __ add(dst, Immediate(0x20));
    154   // At most 31 bytes left to copy.
    155   __ bind(&move_last_31);
    156   __ test(count, Immediate(0x10));
    157   __ j(zero, move_last_15);
    158   if (direction == BACKWARD) __ sub(src, Immediate(0x10));
    159   __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
    160   if (direction == FORWARD) __ add(src, Immediate(0x10));
    161   if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
    162   __ movdqa(Operand(dst, 0), xmm0);
    163   if (direction == FORWARD) __ add(dst, Immediate(0x10));
    164 }
    165 
    166 
    167 void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
    168   __ pop(esi);
    169   __ pop(edi);
    170   __ ret(0);
    171 }
    172 
    173 
    174 #undef __
    175 #define __ masm.
    176 
    177 
    178 class LabelConverter {
    179  public:
    180   explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
    181   int32_t address(Label* l) const {
    182     return reinterpret_cast<int32_t>(buffer_) + l->pos();
    183   }
    184  private:
    185   byte* buffer_;
    186 };
    187 
    188 
    189 MemMoveFunction CreateMemMoveFunction() {
    190   size_t actual_size;
    191   // Allocate buffer in executable space.
    192   byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
    193   if (buffer == NULL) return NULL;
    194   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
    195   LabelConverter conv(buffer);
    196 
    197   // Generated code is put into a fixed, unmovable buffer, and not into
    198   // the V8 heap. We can't, and don't, refer to any relocatable addresses
    199   // (e.g. the JavaScript nan-object).
    200 
    201   // 32-bit C declaration function calls pass arguments on stack.
    202 
    203   // Stack layout:
    204   // esp[12]: Third argument, size.
    205   // esp[8]: Second argument, source pointer.
    206   // esp[4]: First argument, destination pointer.
    207   // esp[0]: return address
    208 
    209   const int kDestinationOffset = 1 * kPointerSize;
    210   const int kSourceOffset = 2 * kPointerSize;
    211   const int kSizeOffset = 3 * kPointerSize;
    212 
    213   // When copying up to this many bytes, use special "small" handlers.
    214   const size_t kSmallCopySize = 8;
    215   // When copying up to this many bytes, use special "medium" handlers.
    216   const size_t kMediumCopySize = 63;
    217   // When non-overlapping region of src and dst is less than this,
    218   // use a more careful implementation (slightly slower).
    219   const size_t kMinMoveDistance = 16;
    220   // Note that these values are dictated by the implementation below,
    221   // do not just change them and hope things will work!
    222 
    223   int stack_offset = 0;  // Update if we change the stack height.
    224 
    225   Label backward, backward_much_overlap;
    226   Label forward_much_overlap, small_size, medium_size, pop_and_return;
    227   __ push(edi);
    228   __ push(esi);
    229   stack_offset += 2 * kPointerSize;
    230   Register dst = edi;
    231   Register src = esi;
    232   Register count = ecx;
    233   Register loop_count = edx;
    234   __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
    235   __ mov(src, Operand(esp, stack_offset + kSourceOffset));
    236   __ mov(count, Operand(esp, stack_offset + kSizeOffset));
    237 
    238   __ cmp(dst, src);
    239   __ j(equal, &pop_and_return);
    240 
    241   __ prefetch(Operand(src, 0), 1);
    242   __ cmp(count, kSmallCopySize);
    243   __ j(below_equal, &small_size);
    244   __ cmp(count, kMediumCopySize);
    245   __ j(below_equal, &medium_size);
    246   __ cmp(dst, src);
    247   __ j(above, &backward);
    248 
    249   {
    250     // |dst| is a lower address than |src|. Copy front-to-back.
    251     Label unaligned_source, move_last_15, skip_last_move;
    252     __ mov(eax, src);
    253     __ sub(eax, dst);
    254     __ cmp(eax, kMinMoveDistance);
    255     __ j(below, &forward_much_overlap);
    256     // Copy first 16 bytes.
    257     __ movdqu(xmm0, Operand(src, 0));
    258     __ movdqu(Operand(dst, 0), xmm0);
    259     // Determine distance to alignment: 16 - (dst & 0xF).
    260     __ mov(edx, dst);
    261     __ and_(edx, 0xF);
    262     __ neg(edx);
    263     __ add(edx, Immediate(16));
    264     __ add(dst, edx);
    265     __ add(src, edx);
    266     __ sub(count, edx);
    267     // dst is now aligned. Main copy loop.
    268     __ mov(loop_count, count);
    269     __ shr(loop_count, 6);
    270     // Check if src is also aligned.
    271     __ test(src, Immediate(0xF));
    272     __ j(not_zero, &unaligned_source);
    273     // Copy loop for aligned source and destination.
    274     MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
    275     // At most 15 bytes to copy. Copy 16 bytes at end of string.
    276     __ bind(&move_last_15);
    277     __ and_(count, 0xF);
    278     __ j(zero, &skip_last_move, Label::kNear);
    279     __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
    280     __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
    281     __ bind(&skip_last_move);
    282     MemMoveEmitPopAndReturn(&masm);
    283 
    284     // Copy loop for unaligned source and aligned destination.
    285     __ bind(&unaligned_source);
    286     MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
    287     __ jmp(&move_last_15);
    288 
    289     // Less than kMinMoveDistance offset between dst and src.
    290     Label loop_until_aligned, last_15_much_overlap;
    291     __ bind(&loop_until_aligned);
    292     __ mov_b(eax, Operand(src, 0));
    293     __ inc(src);
    294     __ mov_b(Operand(dst, 0), eax);
    295     __ inc(dst);
    296     __ dec(count);
    297     __ bind(&forward_much_overlap);  // Entry point into this block.
    298     __ test(dst, Immediate(0xF));
    299     __ j(not_zero, &loop_until_aligned);
    300     // dst is now aligned, src can't be. Main copy loop.
    301     __ mov(loop_count, count);
    302     __ shr(loop_count, 6);
    303     MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
    304                         FORWARD, MOVE_UNALIGNED);
    305     __ bind(&last_15_much_overlap);
    306     __ and_(count, 0xF);
    307     __ j(zero, &pop_and_return);
    308     __ cmp(count, kSmallCopySize);
    309     __ j(below_equal, &small_size);
    310     __ jmp(&medium_size);
    311   }
    312 
    313   {
    314     // |dst| is a higher address than |src|. Copy backwards.
    315     Label unaligned_source, move_first_15, skip_last_move;
    316     __ bind(&backward);
    317     // |dst| and |src| always point to the end of what's left to copy.
    318     __ add(dst, count);
    319     __ add(src, count);
    320     __ mov(eax, dst);
    321     __ sub(eax, src);
    322     __ cmp(eax, kMinMoveDistance);
    323     __ j(below, &backward_much_overlap);
    324     // Copy last 16 bytes.
    325     __ movdqu(xmm0, Operand(src, -0x10));
    326     __ movdqu(Operand(dst, -0x10), xmm0);
    327     // Find distance to alignment: dst & 0xF
    328     __ mov(edx, dst);
    329     __ and_(edx, 0xF);
    330     __ sub(dst, edx);
    331     __ sub(src, edx);
    332     __ sub(count, edx);
    333     // dst is now aligned. Main copy loop.
    334     __ mov(loop_count, count);
    335     __ shr(loop_count, 6);
    336     // Check if src is also aligned.
    337     __ test(src, Immediate(0xF));
    338     __ j(not_zero, &unaligned_source);
    339     // Copy loop for aligned source and destination.
    340     MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
    341     // At most 15 bytes to copy. Copy 16 bytes at beginning of string.
    342     __ bind(&move_first_15);
    343     __ and_(count, 0xF);
    344     __ j(zero, &skip_last_move, Label::kNear);
    345     __ sub(src, count);
    346     __ sub(dst, count);
    347     __ movdqu(xmm0, Operand(src, 0));
    348     __ movdqu(Operand(dst, 0), xmm0);
    349     __ bind(&skip_last_move);
    350     MemMoveEmitPopAndReturn(&masm);
    351 
    352     // Copy loop for unaligned source and aligned destination.
    353     __ bind(&unaligned_source);
    354     MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
    355     __ jmp(&move_first_15);
    356 
    357     // Less than kMinMoveDistance offset between dst and src.
    358     Label loop_until_aligned, first_15_much_overlap;
    359     __ bind(&loop_until_aligned);
    360     __ dec(src);
    361     __ dec(dst);
    362     __ mov_b(eax, Operand(src, 0));
    363     __ mov_b(Operand(dst, 0), eax);
    364     __ dec(count);
    365     __ bind(&backward_much_overlap);  // Entry point into this block.
    366     __ test(dst, Immediate(0xF));
    367     __ j(not_zero, &loop_until_aligned);
    368     // dst is now aligned, src can't be. Main copy loop.
    369     __ mov(loop_count, count);
    370     __ shr(loop_count, 6);
    371     MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
    372                         BACKWARD, MOVE_UNALIGNED);
    373     __ bind(&first_15_much_overlap);
    374     __ and_(count, 0xF);
    375     __ j(zero, &pop_and_return);
    376     // Small/medium handlers expect dst/src to point to the beginning.
    377     __ sub(dst, count);
    378     __ sub(src, count);
    379     __ cmp(count, kSmallCopySize);
    380     __ j(below_equal, &small_size);
    381     __ jmp(&medium_size);
    382   }
    383   {
    384     // Special handlers for 9 <= copy_size < 64. No assumptions about
    385     // alignment or move distance, so all reads must be unaligned and
    386     // must happen before any writes.
    387     Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
    388 
    389     __ bind(&f9_16);
    390     __ movsd(xmm0, Operand(src, 0));
    391     __ movsd(xmm1, Operand(src, count, times_1, -8));
    392     __ movsd(Operand(dst, 0), xmm0);
    393     __ movsd(Operand(dst, count, times_1, -8), xmm1);
    394     MemMoveEmitPopAndReturn(&masm);
    395 
    396     __ bind(&f17_32);
    397     __ movdqu(xmm0, Operand(src, 0));
    398     __ movdqu(xmm1, Operand(src, count, times_1, -0x10));
    399     __ movdqu(Operand(dst, 0x00), xmm0);
    400     __ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
    401     MemMoveEmitPopAndReturn(&masm);
    402 
    403     __ bind(&f33_48);
    404     __ movdqu(xmm0, Operand(src, 0x00));
    405     __ movdqu(xmm1, Operand(src, 0x10));
    406     __ movdqu(xmm2, Operand(src, count, times_1, -0x10));
    407     __ movdqu(Operand(dst, 0x00), xmm0);
    408     __ movdqu(Operand(dst, 0x10), xmm1);
    409     __ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
    410     MemMoveEmitPopAndReturn(&masm);
    411 
    412     __ bind(&f49_63);
    413     __ movdqu(xmm0, Operand(src, 0x00));
    414     __ movdqu(xmm1, Operand(src, 0x10));
    415     __ movdqu(xmm2, Operand(src, 0x20));
    416     __ movdqu(xmm3, Operand(src, count, times_1, -0x10));
    417     __ movdqu(Operand(dst, 0x00), xmm0);
    418     __ movdqu(Operand(dst, 0x10), xmm1);
    419     __ movdqu(Operand(dst, 0x20), xmm2);
    420     __ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
    421     MemMoveEmitPopAndReturn(&masm);
    422 
    423     __ bind(&medium_handlers);
    424     __ dd(conv.address(&f9_16));
    425     __ dd(conv.address(&f17_32));
    426     __ dd(conv.address(&f33_48));
    427     __ dd(conv.address(&f49_63));
    428 
    429     __ bind(&medium_size);  // Entry point into this block.
    430     __ mov(eax, count);
    431     __ dec(eax);
    432     __ shr(eax, 4);
    433     if (FLAG_debug_code) {
    434       Label ok;
    435       __ cmp(eax, 3);
    436       __ j(below_equal, &ok);
    437       __ int3();
    438       __ bind(&ok);
    439     }
    440     __ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers)));
    441     __ jmp(eax);
    442   }
    443   {
    444     // Specialized copiers for copy_size <= 8 bytes.
    445     Label small_handlers, f0, f1, f2, f3, f4, f5_8;
    446     __ bind(&f0);
    447     MemMoveEmitPopAndReturn(&masm);
    448 
    449     __ bind(&f1);
    450     __ mov_b(eax, Operand(src, 0));
    451     __ mov_b(Operand(dst, 0), eax);
    452     MemMoveEmitPopAndReturn(&masm);
    453 
    454     __ bind(&f2);
    455     __ mov_w(eax, Operand(src, 0));
    456     __ mov_w(Operand(dst, 0), eax);
    457     MemMoveEmitPopAndReturn(&masm);
    458 
    459     __ bind(&f3);
    460     __ mov_w(eax, Operand(src, 0));
    461     __ mov_b(edx, Operand(src, 2));
    462     __ mov_w(Operand(dst, 0), eax);
    463     __ mov_b(Operand(dst, 2), edx);
    464     MemMoveEmitPopAndReturn(&masm);
    465 
    466     __ bind(&f4);
    467     __ mov(eax, Operand(src, 0));
    468     __ mov(Operand(dst, 0), eax);
    469     MemMoveEmitPopAndReturn(&masm);
    470 
    471     __ bind(&f5_8);
    472     __ mov(eax, Operand(src, 0));
    473     __ mov(edx, Operand(src, count, times_1, -4));
    474     __ mov(Operand(dst, 0), eax);
    475     __ mov(Operand(dst, count, times_1, -4), edx);
    476     MemMoveEmitPopAndReturn(&masm);
    477 
    478     __ bind(&small_handlers);
    479     __ dd(conv.address(&f0));
    480     __ dd(conv.address(&f1));
    481     __ dd(conv.address(&f2));
    482     __ dd(conv.address(&f3));
    483     __ dd(conv.address(&f4));
    484     __ dd(conv.address(&f5_8));
    485     __ dd(conv.address(&f5_8));
    486     __ dd(conv.address(&f5_8));
    487     __ dd(conv.address(&f5_8));
    488 
    489     __ bind(&small_size);  // Entry point into this block.
    490     if (FLAG_debug_code) {
    491       Label ok;
    492       __ cmp(count, 8);
    493       __ j(below_equal, &ok);
    494       __ int3();
    495       __ bind(&ok);
    496     }
    497     __ mov(eax, Operand(count, times_4, conv.address(&small_handlers)));
    498     __ jmp(eax);
    499   }
    500 
    501   __ bind(&pop_and_return);
    502   MemMoveEmitPopAndReturn(&masm);
    503 
    504   CodeDesc desc;
    505   masm.GetCode(&desc);
    506   ASSERT(!RelocInfo::RequiresRelocation(desc));
    507   CPU::FlushICache(buffer, actual_size);
    508   OS::ProtectCode(buffer, actual_size);
    509   // TODO(jkummerow): It would be nice to register this code creation event
    510   // with the PROFILE / GDBJIT system.
    511   return FUNCTION_CAST<MemMoveFunction>(buffer);
    512 }
    513 
    514 
    515 #undef __
    516 
    517 // -------------------------------------------------------------------------
    518 // Code generators
    519 
    520 #define __ ACCESS_MASM(masm)
    521 
    522 
    523 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
    524     MacroAssembler* masm, AllocationSiteMode mode,
    525     Label* allocation_memento_found) {
    526   // ----------- S t a t e -------------
    527   //  -- eax    : value
    528   //  -- ebx    : target map
    529   //  -- ecx    : key
    530   //  -- edx    : receiver
    531   //  -- esp[0] : return address
    532   // -----------------------------------
    533   if (mode == TRACK_ALLOCATION_SITE) {
    534     ASSERT(allocation_memento_found != NULL);
    535     __ JumpIfJSArrayHasAllocationMemento(edx, edi, allocation_memento_found);
    536   }
    537 
    538   // Set transitioned map.
    539   __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
    540   __ RecordWriteField(edx,
    541                       HeapObject::kMapOffset,
    542                       ebx,
    543                       edi,
    544                       kDontSaveFPRegs,
    545                       EMIT_REMEMBERED_SET,
    546                       OMIT_SMI_CHECK);
    547 }
    548 
    549 
    550 void ElementsTransitionGenerator::GenerateSmiToDouble(
    551     MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
    552   // ----------- S t a t e -------------
    553   //  -- eax    : value
    554   //  -- ebx    : target map
    555   //  -- ecx    : key
    556   //  -- edx    : receiver
    557   //  -- esp[0] : return address
    558   // -----------------------------------
    559   Label loop, entry, convert_hole, gc_required, only_change_map;
    560 
    561   if (mode == TRACK_ALLOCATION_SITE) {
    562     __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
    563   }
    564 
    565   // Check for empty arrays, which only require a map transition and no changes
    566   // to the backing store.
    567   __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
    568   __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
    569   __ j(equal, &only_change_map);
    570 
    571   __ push(eax);
    572   __ push(ebx);
    573 
    574   __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
    575 
    576   // Allocate new FixedDoubleArray.
    577   // edx: receiver
    578   // edi: length of source FixedArray (smi-tagged)
    579   AllocationFlags flags =
    580       static_cast<AllocationFlags>(TAG_OBJECT | DOUBLE_ALIGNMENT);
    581   __ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
    582               REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
    583 
    584   // eax: destination FixedDoubleArray
    585   // edi: number of elements
    586   // edx: receiver
    587   __ mov(FieldOperand(eax, HeapObject::kMapOffset),
    588          Immediate(masm->isolate()->factory()->fixed_double_array_map()));
    589   __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
    590   __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
    591   // Replace receiver's backing store with newly created FixedDoubleArray.
    592   __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
    593   __ mov(ebx, eax);
    594   __ RecordWriteField(edx,
    595                       JSObject::kElementsOffset,
    596                       ebx,
    597                       edi,
    598                       kDontSaveFPRegs,
    599                       EMIT_REMEMBERED_SET,
    600                       OMIT_SMI_CHECK);
    601 
    602   __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
    603 
    604   // Prepare for conversion loop.
    605   ExternalReference canonical_the_hole_nan_reference =
    606       ExternalReference::address_of_the_hole_nan();
    607   XMMRegister the_hole_nan = xmm1;
    608   __ movsd(the_hole_nan,
    609            Operand::StaticVariable(canonical_the_hole_nan_reference));
    610   __ jmp(&entry);
    611 
    612   // Call into runtime if GC is required.
    613   __ bind(&gc_required);
    614   // Restore registers before jumping into runtime.
    615   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
    616   __ pop(ebx);
    617   __ pop(eax);
    618   __ jmp(fail);
    619 
    620   // Convert and copy elements
    621   // esi: source FixedArray
    622   __ bind(&loop);
    623   __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
    624   // ebx: current element from source
    625   // edi: index of current element
    626   __ JumpIfNotSmi(ebx, &convert_hole);
    627 
    628   // Normal smi, convert it to double and store.
    629   __ SmiUntag(ebx);
    630   __ Cvtsi2sd(xmm0, ebx);
    631   __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
    632            xmm0);
    633   __ jmp(&entry);
    634 
    635   // Found hole, store hole_nan_as_double instead.
    636   __ bind(&convert_hole);
    637 
    638   if (FLAG_debug_code) {
    639     __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
    640     __ Assert(equal, kObjectFoundInSmiOnlyArray);
    641   }
    642 
    643   __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
    644            the_hole_nan);
    645 
    646   __ bind(&entry);
    647   __ sub(edi, Immediate(Smi::FromInt(1)));
    648   __ j(not_sign, &loop);
    649 
    650   __ pop(ebx);
    651   __ pop(eax);
    652 
    653   // Restore esi.
    654   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
    655 
    656   __ bind(&only_change_map);
    657   // eax: value
    658   // ebx: target map
    659   // Set transitioned map.
    660   __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
    661   __ RecordWriteField(edx,
    662                       HeapObject::kMapOffset,
    663                       ebx,
    664                       edi,
    665                       kDontSaveFPRegs,
    666                       OMIT_REMEMBERED_SET,
    667                       OMIT_SMI_CHECK);
    668 }
    669 
    670 
    671 void ElementsTransitionGenerator::GenerateDoubleToObject(
    672     MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
    673   // ----------- S t a t e -------------
    674   //  -- eax    : value
    675   //  -- ebx    : target map
    676   //  -- ecx    : key
    677   //  -- edx    : receiver
    678   //  -- esp[0] : return address
    679   // -----------------------------------
    680   Label loop, entry, convert_hole, gc_required, only_change_map, success;
    681 
    682   if (mode == TRACK_ALLOCATION_SITE) {
    683     __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
    684   }
    685 
    686   // Check for empty arrays, which only require a map transition and no changes
    687   // to the backing store.
    688   __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
    689   __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
    690   __ j(equal, &only_change_map);
    691 
    692   __ push(eax);
    693   __ push(edx);
    694   __ push(ebx);
    695 
    696   __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
    697 
    698   // Allocate new FixedArray.
    699   // ebx: length of source FixedDoubleArray (smi-tagged)
    700   __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
    701   __ Allocate(edi, eax, esi, no_reg, &gc_required, TAG_OBJECT);
    702 
    703   // eax: destination FixedArray
    704   // ebx: number of elements
    705   __ mov(FieldOperand(eax, HeapObject::kMapOffset),
    706          Immediate(masm->isolate()->factory()->fixed_array_map()));
    707   __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
    708   __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
    709 
    710   __ jmp(&entry);
    711 
    712   // ebx: target map
    713   // edx: receiver
    714   // Set transitioned map.
    715   __ bind(&only_change_map);
    716   __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
    717   __ RecordWriteField(edx,
    718                       HeapObject::kMapOffset,
    719                       ebx,
    720                       edi,
    721                       kDontSaveFPRegs,
    722                       OMIT_REMEMBERED_SET,
    723                       OMIT_SMI_CHECK);
    724   __ jmp(&success);
    725 
    726   // Call into runtime if GC is required.
    727   __ bind(&gc_required);
    728   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
    729   __ pop(ebx);
    730   __ pop(edx);
    731   __ pop(eax);
    732   __ jmp(fail);
    733 
    734   // Box doubles into heap numbers.
    735   // edi: source FixedDoubleArray
    736   // eax: destination FixedArray
    737   __ bind(&loop);
    738   // ebx: index of current element (smi-tagged)
    739   uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
    740   __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
    741   __ j(equal, &convert_hole);
    742 
    743   // Non-hole double, copy value into a heap number.
    744   __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
    745   // edx: new heap number
    746   __ movsd(xmm0,
    747            FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
    748   __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
    749   __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
    750   __ mov(esi, ebx);
    751   __ RecordWriteArray(eax,
    752                       edx,
    753                       esi,
    754                       kDontSaveFPRegs,
    755                       EMIT_REMEMBERED_SET,
    756                       OMIT_SMI_CHECK);
    757   __ jmp(&entry, Label::kNear);
    758 
    759   // Replace the-hole NaN with the-hole pointer.
    760   __ bind(&convert_hole);
    761   __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
    762          masm->isolate()->factory()->the_hole_value());
    763 
    764   __ bind(&entry);
    765   __ sub(ebx, Immediate(Smi::FromInt(1)));
    766   __ j(not_sign, &loop);
    767 
    768   __ pop(ebx);
    769   __ pop(edx);
    770   // ebx: target map
    771   // edx: receiver
    772   // Set transitioned map.
    773   __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
    774   __ RecordWriteField(edx,
    775                       HeapObject::kMapOffset,
    776                       ebx,
    777                       edi,
    778                       kDontSaveFPRegs,
    779                       OMIT_REMEMBERED_SET,
    780                       OMIT_SMI_CHECK);
    781   // Replace receiver's backing store with newly created and filled FixedArray.
    782   __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
    783   __ RecordWriteField(edx,
    784                       JSObject::kElementsOffset,
    785                       eax,
    786                       edi,
    787                       kDontSaveFPRegs,
    788                       EMIT_REMEMBERED_SET,
    789                       OMIT_SMI_CHECK);
    790 
    791   // Restore registers.
    792   __ pop(eax);
    793   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
    794 
    795   __ bind(&success);
    796 }
    797 
    798 
    799 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
    800                                        Factory* factory,
    801                                        Register string,
    802                                        Register index,
    803                                        Register result,
    804                                        Label* call_runtime) {
    805   // Fetch the instance type of the receiver into result register.
    806   __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
    807   __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
    808 
    809   // We need special handling for indirect strings.
    810   Label check_sequential;
    811   __ test(result, Immediate(kIsIndirectStringMask));
    812   __ j(zero, &check_sequential, Label::kNear);
    813 
    814   // Dispatch on the indirect string shape: slice or cons.
    815   Label cons_string;
    816   __ test(result, Immediate(kSlicedNotConsMask));
    817   __ j(zero, &cons_string, Label::kNear);
    818 
    819   // Handle slices.
    820   Label indirect_string_loaded;
    821   __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
    822   __ SmiUntag(result);
    823   __ add(index, result);
    824   __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
    825   __ jmp(&indirect_string_loaded, Label::kNear);
    826 
    827   // Handle cons strings.
    828   // Check whether the right hand side is the empty string (i.e. if
    829   // this is really a flat string in a cons string). If that is not
    830   // the case we would rather go to the runtime system now to flatten
    831   // the string.
    832   __ bind(&cons_string);
    833   __ cmp(FieldOperand(string, ConsString::kSecondOffset),
    834          Immediate(factory->empty_string()));
    835   __ j(not_equal, call_runtime);
    836   __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
    837 
    838   __ bind(&indirect_string_loaded);
    839   __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
    840   __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
    841 
    842   // Distinguish sequential and external strings. Only these two string
    843   // representations can reach here (slices and flat cons strings have been
    844   // reduced to the underlying sequential or external string).
    845   Label seq_string;
    846   __ bind(&check_sequential);
    847   STATIC_ASSERT(kSeqStringTag == 0);
    848   __ test(result, Immediate(kStringRepresentationMask));
    849   __ j(zero, &seq_string, Label::kNear);
    850 
    851   // Handle external strings.
    852   Label ascii_external, done;
    853   if (FLAG_debug_code) {
    854     // Assert that we do not have a cons or slice (indirect strings) here.
    855     // Sequential strings have already been ruled out.
    856     __ test(result, Immediate(kIsIndirectStringMask));
    857     __ Assert(zero, kExternalStringExpectedButNotFound);
    858   }
    859   // Rule out short external strings.
    860   STATIC_ASSERT(kShortExternalStringTag != 0);
    861   __ test_b(result, kShortExternalStringMask);
    862   __ j(not_zero, call_runtime);
    863   // Check encoding.
    864   STATIC_ASSERT(kTwoByteStringTag == 0);
    865   __ test_b(result, kStringEncodingMask);
    866   __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
    867   __ j(not_equal, &ascii_external, Label::kNear);
    868   // Two-byte string.
    869   __ movzx_w(result, Operand(result, index, times_2, 0));
    870   __ jmp(&done, Label::kNear);
    871   __ bind(&ascii_external);
    872   // Ascii string.
    873   __ movzx_b(result, Operand(result, index, times_1, 0));
    874   __ jmp(&done, Label::kNear);
    875 
    876   // Dispatch on the encoding: ASCII or two-byte.
    877   Label ascii;
    878   __ bind(&seq_string);
    879   STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
    880   STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
    881   __ test(result, Immediate(kStringEncodingMask));
    882   __ j(not_zero, &ascii, Label::kNear);
    883 
    884   // Two-byte string.
    885   // Load the two-byte character code into the result register.
    886   __ movzx_w(result, FieldOperand(string,
    887                                   index,
    888                                   times_2,
    889                                   SeqTwoByteString::kHeaderSize));
    890   __ jmp(&done, Label::kNear);
    891 
    892   // Ascii string.
    893   // Load the byte into the result register.
    894   __ bind(&ascii);
    895   __ movzx_b(result, FieldOperand(string,
    896                                   index,
    897                                   times_1,
    898                                   SeqOneByteString::kHeaderSize));
    899   __ bind(&done);
    900 }
    901 
    902 
    903 static Operand ExpConstant(int index) {
    904   return Operand::StaticVariable(ExternalReference::math_exp_constants(index));
    905 }
    906 
    907 
    908 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
    909                                    XMMRegister input,
    910                                    XMMRegister result,
    911                                    XMMRegister double_scratch,
    912                                    Register temp1,
    913                                    Register temp2) {
    914   ASSERT(!input.is(double_scratch));
    915   ASSERT(!input.is(result));
    916   ASSERT(!result.is(double_scratch));
    917   ASSERT(!temp1.is(temp2));
    918   ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
    919 
    920   Label done;
    921 
    922   __ movsd(double_scratch, ExpConstant(0));
    923   __ xorpd(result, result);
    924   __ ucomisd(double_scratch, input);
    925   __ j(above_equal, &done);
    926   __ ucomisd(input, ExpConstant(1));
    927   __ movsd(result, ExpConstant(2));
    928   __ j(above_equal, &done);
    929   __ movsd(double_scratch, ExpConstant(3));
    930   __ movsd(result, ExpConstant(4));
    931   __ mulsd(double_scratch, input);
    932   __ addsd(double_scratch, result);
    933   __ movd(temp2, double_scratch);
    934   __ subsd(double_scratch, result);
    935   __ movsd(result, ExpConstant(6));
    936   __ mulsd(double_scratch, ExpConstant(5));
    937   __ subsd(double_scratch, input);
    938   __ subsd(result, double_scratch);
    939   __ movsd(input, double_scratch);
    940   __ mulsd(input, double_scratch);
    941   __ mulsd(result, input);
    942   __ mov(temp1, temp2);
    943   __ mulsd(result, ExpConstant(7));
    944   __ subsd(result, double_scratch);
    945   __ add(temp1, Immediate(0x1ff800));
    946   __ addsd(result, ExpConstant(8));
    947   __ and_(temp2, Immediate(0x7ff));
    948   __ shr(temp1, 11);
    949   __ shl(temp1, 20);
    950   __ movd(input, temp1);
    951   __ pshufd(input, input, static_cast<uint8_t>(0xe1));  // Order: 11 10 00 01
    952   __ movsd(double_scratch, Operand::StaticArray(
    953       temp2, times_8, ExternalReference::math_exp_log_table()));
    954   __ orps(input, double_scratch);
    955   __ mulsd(result, input);
    956   __ bind(&done);
    957 }
    958 
    959 #undef __
    960 
    961 
    962 CodeAgingHelper::CodeAgingHelper() {
    963   ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
    964   CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
    965   patcher.masm()->push(ebp);
    966   patcher.masm()->mov(ebp, esp);
    967   patcher.masm()->push(esi);
    968   patcher.masm()->push(edi);
    969 }
    970 
    971 
    972 #ifdef DEBUG
    973 bool CodeAgingHelper::IsOld(byte* candidate) const {
    974   return *candidate == kCallOpcode;
    975 }
    976 #endif
    977 
    978 
    979 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
    980   bool result = isolate->code_aging_helper()->IsYoung(sequence);
    981   ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
    982   return result;
    983 }
    984 
    985 
    986 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
    987                                MarkingParity* parity) {
    988   if (IsYoungSequence(isolate, sequence)) {
    989     *age = kNoAgeCodeAge;
    990     *parity = NO_MARKING_PARITY;
    991   } else {
    992     sequence++;  // Skip the kCallOpcode byte
    993     Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
    994         Assembler::kCallTargetAddressOffset;
    995     Code* stub = GetCodeFromTargetAddress(target_address);
    996     GetCodeAgeAndParity(stub, age, parity);
    997   }
    998 }
    999 
   1000 
   1001 void Code::PatchPlatformCodeAge(Isolate* isolate,
   1002                                 byte* sequence,
   1003                                 Code::Age age,
   1004                                 MarkingParity parity) {
   1005   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
   1006   if (age == kNoAgeCodeAge) {
   1007     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
   1008     CPU::FlushICache(sequence, young_length);
   1009   } else {
   1010     Code* stub = GetCodeAgeStub(isolate, age, parity);
   1011     CodePatcher patcher(sequence, young_length);
   1012     patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
   1013   }
   1014 }
   1015 
   1016 
   1017 } }  // namespace v8::internal
   1018 
   1019 #endif  // V8_TARGET_ARCH_IA32
   1020