Home | History | Annotate | Download | only in ia32
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/ia32/codegen-ia32.h"
      6 
      7 #if V8_TARGET_ARCH_IA32
      8 
      9 #include "src/codegen.h"
     10 #include "src/heap/heap.h"
     11 #include "src/macro-assembler.h"
     12 
     13 namespace v8 {
     14 namespace internal {
     15 
     16 
     17 // -------------------------------------------------------------------------
     18 // Platform-specific RuntimeCallHelper functions.
     19 
     20 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
     21   masm->EnterFrame(StackFrame::INTERNAL);
     22   DCHECK(!masm->has_frame());
     23   masm->set_has_frame(true);
     24 }
     25 
     26 
     27 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
     28   masm->LeaveFrame(StackFrame::INTERNAL);
     29   DCHECK(masm->has_frame());
     30   masm->set_has_frame(false);
     31 }
     32 
     33 
     34 #define __ masm.
     35 
     36 
     37 UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
     38   size_t actual_size;
     39   // Allocate buffer in executable space.
     40   byte* buffer =
     41       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
     42   if (buffer == nullptr) return nullptr;
     43   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
     44                       CodeObjectRequired::kNo);
     45   // esp[1 * kPointerSize]: raw double input
     46   // esp[0 * kPointerSize]: return address
     47   // Move double input into registers.
     48   {
     49     __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
     50     __ sqrtsd(xmm0, xmm0);
     51     __ movsd(Operand(esp, 1 * kPointerSize), xmm0);
     52     // Load result into floating point register as return value.
     53     __ fld_d(Operand(esp, 1 * kPointerSize));
     54     __ Ret();
     55   }
     56 
     57   CodeDesc desc;
     58   masm.GetCode(&desc);
     59   DCHECK(!RelocInfo::RequiresRelocation(desc));
     60 
     61   Assembler::FlushICache(isolate, buffer, actual_size);
     62   base::OS::ProtectCode(buffer, actual_size);
     63   return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
     64 }
     65 
     66 
     67 // Helper functions for CreateMemMoveFunction.
     68 #undef __
     69 #define __ ACCESS_MASM(masm)
     70 
     71 enum Direction { FORWARD, BACKWARD };
     72 enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
     73 
     74 // Expects registers:
     75 // esi - source, aligned if alignment == ALIGNED
     76 // edi - destination, always aligned
     77 // ecx - count (copy size in bytes)
     78 // edx - loop count (number of 64 byte chunks)
     79 void MemMoveEmitMainLoop(MacroAssembler* masm,
     80                          Label* move_last_15,
     81                          Direction direction,
     82                          Alignment alignment) {
     83   Register src = esi;
     84   Register dst = edi;
     85   Register count = ecx;
     86   Register loop_count = edx;
     87   Label loop, move_last_31, move_last_63;
     88   __ cmp(loop_count, 0);
     89   __ j(equal, &move_last_63);
     90   __ bind(&loop);
     91   // Main loop. Copy in 64 byte chunks.
     92   if (direction == BACKWARD) __ sub(src, Immediate(0x40));
     93   __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
     94   __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
     95   __ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
     96   __ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
     97   if (direction == FORWARD) __ add(src, Immediate(0x40));
     98   if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
     99   __ movdqa(Operand(dst, 0x00), xmm0);
    100   __ movdqa(Operand(dst, 0x10), xmm1);
    101   __ movdqa(Operand(dst, 0x20), xmm2);
    102   __ movdqa(Operand(dst, 0x30), xmm3);
    103   if (direction == FORWARD) __ add(dst, Immediate(0x40));
    104   __ dec(loop_count);
    105   __ j(not_zero, &loop);
    106   // At most 63 bytes left to copy.
    107   __ bind(&move_last_63);
    108   __ test(count, Immediate(0x20));
    109   __ j(zero, &move_last_31);
    110   if (direction == BACKWARD) __ sub(src, Immediate(0x20));
    111   __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
    112   __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
    113   if (direction == FORWARD) __ add(src, Immediate(0x20));
    114   if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
    115   __ movdqa(Operand(dst, 0x00), xmm0);
    116   __ movdqa(Operand(dst, 0x10), xmm1);
    117   if (direction == FORWARD) __ add(dst, Immediate(0x20));
    118   // At most 31 bytes left to copy.
    119   __ bind(&move_last_31);
    120   __ test(count, Immediate(0x10));
    121   __ j(zero, move_last_15);
    122   if (direction == BACKWARD) __ sub(src, Immediate(0x10));
    123   __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
    124   if (direction == FORWARD) __ add(src, Immediate(0x10));
    125   if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
    126   __ movdqa(Operand(dst, 0), xmm0);
    127   if (direction == FORWARD) __ add(dst, Immediate(0x10));
    128 }
    129 
    130 
    131 void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
    132   __ pop(esi);
    133   __ pop(edi);
    134   __ ret(0);
    135 }
    136 
    137 
    138 #undef __
    139 #define __ masm.
    140 
    141 
    142 class LabelConverter {
    143  public:
    144   explicit LabelConverter(byte* buffer) : buffer_(buffer) {}
    145   int32_t address(Label* l) const {
    146     return reinterpret_cast<int32_t>(buffer_) + l->pos();
    147   }
    148  private:
    149   byte* buffer_;
    150 };
    151 
    152 
    153 MemMoveFunction CreateMemMoveFunction(Isolate* isolate) {
    154   size_t actual_size;
    155   // Allocate buffer in executable space.
    156   byte* buffer =
    157       static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
    158   if (buffer == nullptr) return nullptr;
    159   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
    160                       CodeObjectRequired::kNo);
    161   LabelConverter conv(buffer);
    162 
    163   // Generated code is put into a fixed, unmovable buffer, and not into
    164   // the V8 heap. We can't, and don't, refer to any relocatable addresses
    165   // (e.g. the JavaScript nan-object).
    166 
    167   // 32-bit C declaration function calls pass arguments on stack.
    168 
    169   // Stack layout:
    170   // esp[12]: Third argument, size.
    171   // esp[8]: Second argument, source pointer.
    172   // esp[4]: First argument, destination pointer.
    173   // esp[0]: return address
    174 
    175   const int kDestinationOffset = 1 * kPointerSize;
    176   const int kSourceOffset = 2 * kPointerSize;
    177   const int kSizeOffset = 3 * kPointerSize;
    178 
    179   // When copying up to this many bytes, use special "small" handlers.
    180   const size_t kSmallCopySize = 8;
    181   // When copying up to this many bytes, use special "medium" handlers.
    182   const size_t kMediumCopySize = 63;
    183   // When non-overlapping region of src and dst is less than this,
    184   // use a more careful implementation (slightly slower).
    185   const size_t kMinMoveDistance = 16;
    186   // Note that these values are dictated by the implementation below,
    187   // do not just change them and hope things will work!
    188 
    189   int stack_offset = 0;  // Update if we change the stack height.
    190 
    191   Label backward, backward_much_overlap;
    192   Label forward_much_overlap, small_size, medium_size, pop_and_return;
    193   __ push(edi);
    194   __ push(esi);
    195   stack_offset += 2 * kPointerSize;
    196   Register dst = edi;
    197   Register src = esi;
    198   Register count = ecx;
    199   Register loop_count = edx;
    200   __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
    201   __ mov(src, Operand(esp, stack_offset + kSourceOffset));
    202   __ mov(count, Operand(esp, stack_offset + kSizeOffset));
    203 
    204   __ cmp(dst, src);
    205   __ j(equal, &pop_and_return);
    206 
    207   __ prefetch(Operand(src, 0), 1);
    208   __ cmp(count, kSmallCopySize);
    209   __ j(below_equal, &small_size);
    210   __ cmp(count, kMediumCopySize);
    211   __ j(below_equal, &medium_size);
    212   __ cmp(dst, src);
    213   __ j(above, &backward);
    214 
    215   {
    216     // |dst| is a lower address than |src|. Copy front-to-back.
    217     Label unaligned_source, move_last_15, skip_last_move;
    218     __ mov(eax, src);
    219     __ sub(eax, dst);
    220     __ cmp(eax, kMinMoveDistance);
    221     __ j(below, &forward_much_overlap);
    222     // Copy first 16 bytes.
    223     __ movdqu(xmm0, Operand(src, 0));
    224     __ movdqu(Operand(dst, 0), xmm0);
    225     // Determine distance to alignment: 16 - (dst & 0xF).
    226     __ mov(edx, dst);
    227     __ and_(edx, 0xF);
    228     __ neg(edx);
    229     __ add(edx, Immediate(16));
    230     __ add(dst, edx);
    231     __ add(src, edx);
    232     __ sub(count, edx);
    233     // dst is now aligned. Main copy loop.
    234     __ mov(loop_count, count);
    235     __ shr(loop_count, 6);
    236     // Check if src is also aligned.
    237     __ test(src, Immediate(0xF));
    238     __ j(not_zero, &unaligned_source);
    239     // Copy loop for aligned source and destination.
    240     MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
    241     // At most 15 bytes to copy. Copy 16 bytes at end of string.
    242     __ bind(&move_last_15);
    243     __ and_(count, 0xF);
    244     __ j(zero, &skip_last_move, Label::kNear);
    245     __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
    246     __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
    247     __ bind(&skip_last_move);
    248     MemMoveEmitPopAndReturn(&masm);
    249 
    250     // Copy loop for unaligned source and aligned destination.
    251     __ bind(&unaligned_source);
    252     MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
    253     __ jmp(&move_last_15);
    254 
    255     // Less than kMinMoveDistance offset between dst and src.
    256     Label loop_until_aligned, last_15_much_overlap;
    257     __ bind(&loop_until_aligned);
    258     __ mov_b(eax, Operand(src, 0));
    259     __ inc(src);
    260     __ mov_b(Operand(dst, 0), eax);
    261     __ inc(dst);
    262     __ dec(count);
    263     __ bind(&forward_much_overlap);  // Entry point into this block.
    264     __ test(dst, Immediate(0xF));
    265     __ j(not_zero, &loop_until_aligned);
    266     // dst is now aligned, src can't be. Main copy loop.
    267     __ mov(loop_count, count);
    268     __ shr(loop_count, 6);
    269     MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
    270                         FORWARD, MOVE_UNALIGNED);
    271     __ bind(&last_15_much_overlap);
    272     __ and_(count, 0xF);
    273     __ j(zero, &pop_and_return);
    274     __ cmp(count, kSmallCopySize);
    275     __ j(below_equal, &small_size);
    276     __ jmp(&medium_size);
    277   }
    278 
    279   {
    280     // |dst| is a higher address than |src|. Copy backwards.
    281     Label unaligned_source, move_first_15, skip_last_move;
    282     __ bind(&backward);
    283     // |dst| and |src| always point to the end of what's left to copy.
    284     __ add(dst, count);
    285     __ add(src, count);
    286     __ mov(eax, dst);
    287     __ sub(eax, src);
    288     __ cmp(eax, kMinMoveDistance);
    289     __ j(below, &backward_much_overlap);
    290     // Copy last 16 bytes.
    291     __ movdqu(xmm0, Operand(src, -0x10));
    292     __ movdqu(Operand(dst, -0x10), xmm0);
    293     // Find distance to alignment: dst & 0xF
    294     __ mov(edx, dst);
    295     __ and_(edx, 0xF);
    296     __ sub(dst, edx);
    297     __ sub(src, edx);
    298     __ sub(count, edx);
    299     // dst is now aligned. Main copy loop.
    300     __ mov(loop_count, count);
    301     __ shr(loop_count, 6);
    302     // Check if src is also aligned.
    303     __ test(src, Immediate(0xF));
    304     __ j(not_zero, &unaligned_source);
    305     // Copy loop for aligned source and destination.
    306     MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
    307     // At most 15 bytes to copy. Copy 16 bytes at beginning of string.
    308     __ bind(&move_first_15);
    309     __ and_(count, 0xF);
    310     __ j(zero, &skip_last_move, Label::kNear);
    311     __ sub(src, count);
    312     __ sub(dst, count);
    313     __ movdqu(xmm0, Operand(src, 0));
    314     __ movdqu(Operand(dst, 0), xmm0);
    315     __ bind(&skip_last_move);
    316     MemMoveEmitPopAndReturn(&masm);
    317 
    318     // Copy loop for unaligned source and aligned destination.
    319     __ bind(&unaligned_source);
    320     MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
    321     __ jmp(&move_first_15);
    322 
    323     // Less than kMinMoveDistance offset between dst and src.
    324     Label loop_until_aligned, first_15_much_overlap;
    325     __ bind(&loop_until_aligned);
    326     __ dec(src);
    327     __ dec(dst);
    328     __ mov_b(eax, Operand(src, 0));
    329     __ mov_b(Operand(dst, 0), eax);
    330     __ dec(count);
    331     __ bind(&backward_much_overlap);  // Entry point into this block.
    332     __ test(dst, Immediate(0xF));
    333     __ j(not_zero, &loop_until_aligned);
    334     // dst is now aligned, src can't be. Main copy loop.
    335     __ mov(loop_count, count);
    336     __ shr(loop_count, 6);
    337     MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
    338                         BACKWARD, MOVE_UNALIGNED);
    339     __ bind(&first_15_much_overlap);
    340     __ and_(count, 0xF);
    341     __ j(zero, &pop_and_return);
    342     // Small/medium handlers expect dst/src to point to the beginning.
    343     __ sub(dst, count);
    344     __ sub(src, count);
    345     __ cmp(count, kSmallCopySize);
    346     __ j(below_equal, &small_size);
    347     __ jmp(&medium_size);
    348   }
    349   {
    350     // Special handlers for 9 <= copy_size < 64. No assumptions about
    351     // alignment or move distance, so all reads must be unaligned and
    352     // must happen before any writes.
    353     Label medium_handlers, f9_16, f17_32, f33_48, f49_63;
    354 
    355     __ bind(&f9_16);
    356     __ movsd(xmm0, Operand(src, 0));
    357     __ movsd(xmm1, Operand(src, count, times_1, -8));
    358     __ movsd(Operand(dst, 0), xmm0);
    359     __ movsd(Operand(dst, count, times_1, -8), xmm1);
    360     MemMoveEmitPopAndReturn(&masm);
    361 
    362     __ bind(&f17_32);
    363     __ movdqu(xmm0, Operand(src, 0));
    364     __ movdqu(xmm1, Operand(src, count, times_1, -0x10));
    365     __ movdqu(Operand(dst, 0x00), xmm0);
    366     __ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
    367     MemMoveEmitPopAndReturn(&masm);
    368 
    369     __ bind(&f33_48);
    370     __ movdqu(xmm0, Operand(src, 0x00));
    371     __ movdqu(xmm1, Operand(src, 0x10));
    372     __ movdqu(xmm2, Operand(src, count, times_1, -0x10));
    373     __ movdqu(Operand(dst, 0x00), xmm0);
    374     __ movdqu(Operand(dst, 0x10), xmm1);
    375     __ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
    376     MemMoveEmitPopAndReturn(&masm);
    377 
    378     __ bind(&f49_63);
    379     __ movdqu(xmm0, Operand(src, 0x00));
    380     __ movdqu(xmm1, Operand(src, 0x10));
    381     __ movdqu(xmm2, Operand(src, 0x20));
    382     __ movdqu(xmm3, Operand(src, count, times_1, -0x10));
    383     __ movdqu(Operand(dst, 0x00), xmm0);
    384     __ movdqu(Operand(dst, 0x10), xmm1);
    385     __ movdqu(Operand(dst, 0x20), xmm2);
    386     __ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
    387     MemMoveEmitPopAndReturn(&masm);
    388 
    389     __ bind(&medium_handlers);
    390     __ dd(conv.address(&f9_16));
    391     __ dd(conv.address(&f17_32));
    392     __ dd(conv.address(&f33_48));
    393     __ dd(conv.address(&f49_63));
    394 
    395     __ bind(&medium_size);  // Entry point into this block.
    396     __ mov(eax, count);
    397     __ dec(eax);
    398     __ shr(eax, 4);
    399     if (FLAG_debug_code) {
    400       Label ok;
    401       __ cmp(eax, 3);
    402       __ j(below_equal, &ok);
    403       __ int3();
    404       __ bind(&ok);
    405     }
    406     __ mov(eax, Operand(eax, times_4, conv.address(&medium_handlers)));
    407     __ jmp(eax);
    408   }
    409   {
    410     // Specialized copiers for copy_size <= 8 bytes.
    411     Label small_handlers, f0, f1, f2, f3, f4, f5_8;
    412     __ bind(&f0);
    413     MemMoveEmitPopAndReturn(&masm);
    414 
    415     __ bind(&f1);
    416     __ mov_b(eax, Operand(src, 0));
    417     __ mov_b(Operand(dst, 0), eax);
    418     MemMoveEmitPopAndReturn(&masm);
    419 
    420     __ bind(&f2);
    421     __ mov_w(eax, Operand(src, 0));
    422     __ mov_w(Operand(dst, 0), eax);
    423     MemMoveEmitPopAndReturn(&masm);
    424 
    425     __ bind(&f3);
    426     __ mov_w(eax, Operand(src, 0));
    427     __ mov_b(edx, Operand(src, 2));
    428     __ mov_w(Operand(dst, 0), eax);
    429     __ mov_b(Operand(dst, 2), edx);
    430     MemMoveEmitPopAndReturn(&masm);
    431 
    432     __ bind(&f4);
    433     __ mov(eax, Operand(src, 0));
    434     __ mov(Operand(dst, 0), eax);
    435     MemMoveEmitPopAndReturn(&masm);
    436 
    437     __ bind(&f5_8);
    438     __ mov(eax, Operand(src, 0));
    439     __ mov(edx, Operand(src, count, times_1, -4));
    440     __ mov(Operand(dst, 0), eax);
    441     __ mov(Operand(dst, count, times_1, -4), edx);
    442     MemMoveEmitPopAndReturn(&masm);
    443 
    444     __ bind(&small_handlers);
    445     __ dd(conv.address(&f0));
    446     __ dd(conv.address(&f1));
    447     __ dd(conv.address(&f2));
    448     __ dd(conv.address(&f3));
    449     __ dd(conv.address(&f4));
    450     __ dd(conv.address(&f5_8));
    451     __ dd(conv.address(&f5_8));
    452     __ dd(conv.address(&f5_8));
    453     __ dd(conv.address(&f5_8));
    454 
    455     __ bind(&small_size);  // Entry point into this block.
    456     if (FLAG_debug_code) {
    457       Label ok;
    458       __ cmp(count, 8);
    459       __ j(below_equal, &ok);
    460       __ int3();
    461       __ bind(&ok);
    462     }
    463     __ mov(eax, Operand(count, times_4, conv.address(&small_handlers)));
    464     __ jmp(eax);
    465   }
    466 
    467   __ bind(&pop_and_return);
    468   MemMoveEmitPopAndReturn(&masm);
    469 
    470   CodeDesc desc;
    471   masm.GetCode(&desc);
    472   DCHECK(!RelocInfo::RequiresRelocation(desc));
    473   Assembler::FlushICache(isolate, buffer, actual_size);
    474   base::OS::ProtectCode(buffer, actual_size);
    475   // TODO(jkummerow): It would be nice to register this code creation event
    476   // with the PROFILE / GDBJIT system.
    477   return FUNCTION_CAST<MemMoveFunction>(buffer);
    478 }
    479 
    480 
    481 #undef __
    482 
    483 // -------------------------------------------------------------------------
    484 // Code generators
    485 
    486 #define __ ACCESS_MASM(masm)
    487 
    488 
    489 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
    490     MacroAssembler* masm,
    491     Register receiver,
    492     Register key,
    493     Register value,
    494     Register target_map,
    495     AllocationSiteMode mode,
    496     Label* allocation_memento_found) {
    497   Register scratch = edi;
    498   DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
    499 
    500   if (mode == TRACK_ALLOCATION_SITE) {
    501     DCHECK(allocation_memento_found != NULL);
    502     __ JumpIfJSArrayHasAllocationMemento(
    503         receiver, scratch, allocation_memento_found);
    504   }
    505 
    506   // Set transitioned map.
    507   __ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
    508   __ RecordWriteField(receiver,
    509                       HeapObject::kMapOffset,
    510                       target_map,
    511                       scratch,
    512                       kDontSaveFPRegs,
    513                       EMIT_REMEMBERED_SET,
    514                       OMIT_SMI_CHECK);
    515 }
    516 
    517 
    518 void ElementsTransitionGenerator::GenerateSmiToDouble(
    519     MacroAssembler* masm,
    520     Register receiver,
    521     Register key,
    522     Register value,
    523     Register target_map,
    524     AllocationSiteMode mode,
    525     Label* fail) {
    526   // Return address is on the stack.
    527   DCHECK(receiver.is(edx));
    528   DCHECK(key.is(ecx));
    529   DCHECK(value.is(eax));
    530   DCHECK(target_map.is(ebx));
    531 
    532   Label loop, entry, convert_hole, gc_required, only_change_map;
    533 
    534   if (mode == TRACK_ALLOCATION_SITE) {
    535     __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
    536   }
    537 
    538   // Check for empty arrays, which only require a map transition and no changes
    539   // to the backing store.
    540   __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
    541   __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
    542   __ j(equal, &only_change_map);
    543 
    544   __ push(eax);
    545   __ push(ebx);
    546   __ push(esi);
    547 
    548   __ mov(edi, FieldOperand(edi, FixedArray::kLengthOffset));
    549 
    550   // Allocate new FixedDoubleArray.
    551   // edx: receiver
    552   // edi: length of source FixedArray (smi-tagged)
    553   AllocationFlags flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT);
    554   __ Allocate(FixedDoubleArray::kHeaderSize, times_8, edi,
    555               REGISTER_VALUE_IS_SMI, eax, ebx, no_reg, &gc_required, flags);
    556 
    557   // eax: destination FixedDoubleArray
    558   // edi: number of elements
    559   // edx: receiver
    560   __ mov(FieldOperand(eax, HeapObject::kMapOffset),
    561          Immediate(masm->isolate()->factory()->fixed_double_array_map()));
    562   __ mov(FieldOperand(eax, FixedDoubleArray::kLengthOffset), edi);
    563   __ mov(esi, FieldOperand(edx, JSObject::kElementsOffset));
    564   // Replace receiver's backing store with newly created FixedDoubleArray.
    565   __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
    566   __ mov(ebx, eax);
    567   __ RecordWriteField(edx,
    568                       JSObject::kElementsOffset,
    569                       ebx,
    570                       edi,
    571                       kDontSaveFPRegs,
    572                       EMIT_REMEMBERED_SET,
    573                       OMIT_SMI_CHECK);
    574 
    575   __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
    576 
    577   // Prepare for conversion loop.
    578   ExternalReference canonical_the_hole_nan_reference =
    579       ExternalReference::address_of_the_hole_nan();
    580   XMMRegister the_hole_nan = xmm1;
    581   __ movsd(the_hole_nan,
    582            Operand::StaticVariable(canonical_the_hole_nan_reference));
    583   __ jmp(&entry);
    584 
    585   // Call into runtime if GC is required.
    586   __ bind(&gc_required);
    587 
    588   // Restore registers before jumping into runtime.
    589   __ pop(esi);
    590   __ pop(ebx);
    591   __ pop(eax);
    592   __ jmp(fail);
    593 
    594   // Convert and copy elements
    595   // esi: source FixedArray
    596   __ bind(&loop);
    597   __ mov(ebx, FieldOperand(esi, edi, times_2, FixedArray::kHeaderSize));
    598   // ebx: current element from source
    599   // edi: index of current element
    600   __ JumpIfNotSmi(ebx, &convert_hole);
    601 
    602   // Normal smi, convert it to double and store.
    603   __ SmiUntag(ebx);
    604   __ Cvtsi2sd(xmm0, ebx);
    605   __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
    606            xmm0);
    607   __ jmp(&entry);
    608 
    609   // Found hole, store hole_nan_as_double instead.
    610   __ bind(&convert_hole);
    611 
    612   if (FLAG_debug_code) {
    613     __ cmp(ebx, masm->isolate()->factory()->the_hole_value());
    614     __ Assert(equal, kObjectFoundInSmiOnlyArray);
    615   }
    616 
    617   __ movsd(FieldOperand(eax, edi, times_4, FixedDoubleArray::kHeaderSize),
    618            the_hole_nan);
    619 
    620   __ bind(&entry);
    621   __ sub(edi, Immediate(Smi::FromInt(1)));
    622   __ j(not_sign, &loop);
    623 
    624   // Restore registers.
    625   __ pop(esi);
    626   __ pop(ebx);
    627   __ pop(eax);
    628 
    629   __ bind(&only_change_map);
    630   // eax: value
    631   // ebx: target map
    632   // Set transitioned map.
    633   __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
    634   __ RecordWriteField(edx,
    635                       HeapObject::kMapOffset,
    636                       ebx,
    637                       edi,
    638                       kDontSaveFPRegs,
    639                       OMIT_REMEMBERED_SET,
    640                       OMIT_SMI_CHECK);
    641 }
    642 
    643 
    644 void ElementsTransitionGenerator::GenerateDoubleToObject(
    645     MacroAssembler* masm,
    646     Register receiver,
    647     Register key,
    648     Register value,
    649     Register target_map,
    650     AllocationSiteMode mode,
    651     Label* fail) {
    652   // Return address is on the stack.
    653   DCHECK(receiver.is(edx));
    654   DCHECK(key.is(ecx));
    655   DCHECK(value.is(eax));
    656   DCHECK(target_map.is(ebx));
    657 
    658   Label loop, entry, convert_hole, gc_required, only_change_map, success;
    659 
    660   if (mode == TRACK_ALLOCATION_SITE) {
    661     __ JumpIfJSArrayHasAllocationMemento(edx, edi, fail);
    662   }
    663 
    664   // Check for empty arrays, which only require a map transition and no changes
    665   // to the backing store.
    666   __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
    667   __ cmp(edi, Immediate(masm->isolate()->factory()->empty_fixed_array()));
    668   __ j(equal, &only_change_map);
    669 
    670   __ push(esi);
    671   __ push(eax);
    672   __ push(edx);
    673   __ push(ebx);
    674 
    675   __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
    676 
    677   // Allocate new FixedArray.
    678   // ebx: length of source FixedDoubleArray (smi-tagged)
    679   __ lea(edi, Operand(ebx, times_2, FixedArray::kHeaderSize));
    680   __ Allocate(edi, eax, esi, no_reg, &gc_required, NO_ALLOCATION_FLAGS);
    681 
    682   // eax: destination FixedArray
    683   // ebx: number of elements
    684   __ mov(FieldOperand(eax, HeapObject::kMapOffset),
    685          Immediate(masm->isolate()->factory()->fixed_array_map()));
    686   __ mov(FieldOperand(eax, FixedArray::kLengthOffset), ebx);
    687   __ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
    688 
    689   // Allocating heap numbers in the loop below can fail and cause a jump to
    690   // gc_required. We can't leave a partly initialized FixedArray behind,
    691   // so pessimistically fill it with holes now.
    692   Label initialization_loop, initialization_loop_entry;
    693   __ jmp(&initialization_loop_entry, Label::kNear);
    694   __ bind(&initialization_loop);
    695   __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
    696          masm->isolate()->factory()->the_hole_value());
    697   __ bind(&initialization_loop_entry);
    698   __ sub(ebx, Immediate(Smi::FromInt(1)));
    699   __ j(not_sign, &initialization_loop);
    700 
    701   __ mov(ebx, FieldOperand(edi, FixedDoubleArray::kLengthOffset));
    702   __ jmp(&entry);
    703 
    704   // ebx: target map
    705   // edx: receiver
    706   // Set transitioned map.
    707   __ bind(&only_change_map);
    708   __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
    709   __ RecordWriteField(edx,
    710                       HeapObject::kMapOffset,
    711                       ebx,
    712                       edi,
    713                       kDontSaveFPRegs,
    714                       OMIT_REMEMBERED_SET,
    715                       OMIT_SMI_CHECK);
    716   __ jmp(&success);
    717 
    718   // Call into runtime if GC is required.
    719   __ bind(&gc_required);
    720   __ pop(ebx);
    721   __ pop(edx);
    722   __ pop(eax);
    723   __ pop(esi);
    724   __ jmp(fail);
    725 
    726   // Box doubles into heap numbers.
    727   // edi: source FixedDoubleArray
    728   // eax: destination FixedArray
    729   __ bind(&loop);
    730   // ebx: index of current element (smi-tagged)
    731   uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
    732   __ cmp(FieldOperand(edi, ebx, times_4, offset), Immediate(kHoleNanUpper32));
    733   __ j(equal, &convert_hole);
    734 
    735   // Non-hole double, copy value into a heap number.
    736   __ AllocateHeapNumber(edx, esi, no_reg, &gc_required);
    737   // edx: new heap number
    738   __ movsd(xmm0,
    739            FieldOperand(edi, ebx, times_4, FixedDoubleArray::kHeaderSize));
    740   __ movsd(FieldOperand(edx, HeapNumber::kValueOffset), xmm0);
    741   __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
    742   __ mov(esi, ebx);
    743   __ RecordWriteArray(eax,
    744                       edx,
    745                       esi,
    746                       kDontSaveFPRegs,
    747                       EMIT_REMEMBERED_SET,
    748                       OMIT_SMI_CHECK);
    749   __ jmp(&entry, Label::kNear);
    750 
    751   // Replace the-hole NaN with the-hole pointer.
    752   __ bind(&convert_hole);
    753   __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize),
    754          masm->isolate()->factory()->the_hole_value());
    755 
    756   __ bind(&entry);
    757   __ sub(ebx, Immediate(Smi::FromInt(1)));
    758   __ j(not_sign, &loop);
    759 
    760   __ pop(ebx);
    761   __ pop(edx);
    762   // ebx: target map
    763   // edx: receiver
    764   // Set transitioned map.
    765   __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
    766   __ RecordWriteField(edx,
    767                       HeapObject::kMapOffset,
    768                       ebx,
    769                       edi,
    770                       kDontSaveFPRegs,
    771                       OMIT_REMEMBERED_SET,
    772                       OMIT_SMI_CHECK);
    773   // Replace receiver's backing store with newly created and filled FixedArray.
    774   __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
    775   __ RecordWriteField(edx,
    776                       JSObject::kElementsOffset,
    777                       eax,
    778                       edi,
    779                       kDontSaveFPRegs,
    780                       EMIT_REMEMBERED_SET,
    781                       OMIT_SMI_CHECK);
    782 
    783   // Restore registers.
    784   __ pop(eax);
    785   __ pop(esi);
    786 
    787   __ bind(&success);
    788 }
    789 
    790 
    791 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
    792                                        Factory* factory,
    793                                        Register string,
    794                                        Register index,
    795                                        Register result,
    796                                        Label* call_runtime) {
    797   // Fetch the instance type of the receiver into result register.
    798   __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
    799   __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
    800 
    801   // We need special handling for indirect strings.
    802   Label check_sequential;
    803   __ test(result, Immediate(kIsIndirectStringMask));
    804   __ j(zero, &check_sequential, Label::kNear);
    805 
    806   // Dispatch on the indirect string shape: slice or cons.
    807   Label cons_string;
    808   __ test(result, Immediate(kSlicedNotConsMask));
    809   __ j(zero, &cons_string, Label::kNear);
    810 
    811   // Handle slices.
    812   Label indirect_string_loaded;
    813   __ mov(result, FieldOperand(string, SlicedString::kOffsetOffset));
    814   __ SmiUntag(result);
    815   __ add(index, result);
    816   __ mov(string, FieldOperand(string, SlicedString::kParentOffset));
    817   __ jmp(&indirect_string_loaded, Label::kNear);
    818 
    819   // Handle cons strings.
    820   // Check whether the right hand side is the empty string (i.e. if
    821   // this is really a flat string in a cons string). If that is not
    822   // the case we would rather go to the runtime system now to flatten
    823   // the string.
    824   __ bind(&cons_string);
    825   __ cmp(FieldOperand(string, ConsString::kSecondOffset),
    826          Immediate(factory->empty_string()));
    827   __ j(not_equal, call_runtime);
    828   __ mov(string, FieldOperand(string, ConsString::kFirstOffset));
    829 
    830   __ bind(&indirect_string_loaded);
    831   __ mov(result, FieldOperand(string, HeapObject::kMapOffset));
    832   __ movzx_b(result, FieldOperand(result, Map::kInstanceTypeOffset));
    833 
    834   // Distinguish sequential and external strings. Only these two string
    835   // representations can reach here (slices and flat cons strings have been
    836   // reduced to the underlying sequential or external string).
    837   Label seq_string;
    838   __ bind(&check_sequential);
    839   STATIC_ASSERT(kSeqStringTag == 0);
    840   __ test(result, Immediate(kStringRepresentationMask));
    841   __ j(zero, &seq_string, Label::kNear);
    842 
    843   // Handle external strings.
    844   Label one_byte_external, done;
    845   if (FLAG_debug_code) {
    846     // Assert that we do not have a cons or slice (indirect strings) here.
    847     // Sequential strings have already been ruled out.
    848     __ test(result, Immediate(kIsIndirectStringMask));
    849     __ Assert(zero, kExternalStringExpectedButNotFound);
    850   }
    851   // Rule out short external strings.
    852   STATIC_ASSERT(kShortExternalStringTag != 0);
    853   __ test_b(result, Immediate(kShortExternalStringMask));
    854   __ j(not_zero, call_runtime);
    855   // Check encoding.
    856   STATIC_ASSERT(kTwoByteStringTag == 0);
    857   __ test_b(result, Immediate(kStringEncodingMask));
    858   __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
    859   __ j(not_equal, &one_byte_external, Label::kNear);
    860   // Two-byte string.
    861   __ movzx_w(result, Operand(result, index, times_2, 0));
    862   __ jmp(&done, Label::kNear);
    863   __ bind(&one_byte_external);
    864   // One-byte string.
    865   __ movzx_b(result, Operand(result, index, times_1, 0));
    866   __ jmp(&done, Label::kNear);
    867 
    868   // Dispatch on the encoding: one-byte or two-byte.
    869   Label one_byte;
    870   __ bind(&seq_string);
    871   STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
    872   STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
    873   __ test(result, Immediate(kStringEncodingMask));
    874   __ j(not_zero, &one_byte, Label::kNear);
    875 
    876   // Two-byte string.
    877   // Load the two-byte character code into the result register.
    878   __ movzx_w(result, FieldOperand(string,
    879                                   index,
    880                                   times_2,
    881                                   SeqTwoByteString::kHeaderSize));
    882   __ jmp(&done, Label::kNear);
    883 
    884   // One-byte string.
    885   // Load the byte into the result register.
    886   __ bind(&one_byte);
    887   __ movzx_b(result, FieldOperand(string,
    888                                   index,
    889                                   times_1,
    890                                   SeqOneByteString::kHeaderSize));
    891   __ bind(&done);
    892 }
    893 
    894 #undef __
    895 
    896 
    897 CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
    898   USE(isolate);
    899   DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
    900   CodePatcher patcher(isolate, young_sequence_.start(),
    901                       young_sequence_.length());
    902   patcher.masm()->push(ebp);
    903   patcher.masm()->mov(ebp, esp);
    904   patcher.masm()->push(esi);
    905   patcher.masm()->push(edi);
    906 }
    907 
    908 
    909 #ifdef DEBUG
    910 bool CodeAgingHelper::IsOld(byte* candidate) const {
    911   return *candidate == kCallOpcode;
    912 }
    913 #endif
    914 
    915 
    916 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
    917   bool result = isolate->code_aging_helper()->IsYoung(sequence);
    918   DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
    919   return result;
    920 }
    921 
    922 
    923 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
    924                                MarkingParity* parity) {
    925   if (IsYoungSequence(isolate, sequence)) {
    926     *age = kNoAgeCodeAge;
    927     *parity = NO_MARKING_PARITY;
    928   } else {
    929     sequence++;  // Skip the kCallOpcode byte
    930     Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
    931         Assembler::kCallTargetAddressOffset;
    932     Code* stub = GetCodeFromTargetAddress(target_address);
    933     GetCodeAgeAndParity(stub, age, parity);
    934   }
    935 }
    936 
    937 
    938 void Code::PatchPlatformCodeAge(Isolate* isolate,
    939                                 byte* sequence,
    940                                 Code::Age age,
    941                                 MarkingParity parity) {
    942   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
    943   if (age == kNoAgeCodeAge) {
    944     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
    945     Assembler::FlushICache(isolate, sequence, young_length);
    946   } else {
    947     Code* stub = GetCodeAgeStub(isolate, age, parity);
    948     CodePatcher patcher(isolate, sequence, young_length);
    949     patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE32);
    950   }
    951 }
    952 
    953 
    954 }  // namespace internal
    955 }  // namespace v8
    956 
    957 #endif  // V8_TARGET_ARCH_IA32
    958