Home | History | Annotate | Download | only in ia32
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 #include "v8.h"
     29 
     30 #if V8_TARGET_ARCH_IA32
     31 
     32 #include "ia32/lithium-codegen-ia32.h"
     33 #include "ic.h"
     34 #include "code-stubs.h"
     35 #include "deoptimizer.h"
     36 #include "stub-cache.h"
     37 #include "codegen.h"
     38 
     39 namespace v8 {
     40 namespace internal {
     41 
     42 
     43 static SaveFPRegsMode GetSaveFPRegsMode() {
     44   // We don't need to save floating point regs when generating the snapshot
     45   return CpuFeatures::IsSafeForSnapshot(SSE2) ? kSaveFPRegs : kDontSaveFPRegs;
     46 }
     47 
     48 
     49 // When invoking builtins, we need to record the safepoint in the middle of
     50 // the invoke instruction sequence generated by the macro assembler.
     51 class SafepointGenerator : public CallWrapper {
     52  public:
     53   SafepointGenerator(LCodeGen* codegen,
     54                      LPointerMap* pointers,
     55                      Safepoint::DeoptMode mode)
     56       : codegen_(codegen),
     57         pointers_(pointers),
     58         deopt_mode_(mode) {}
     59   virtual ~SafepointGenerator() { }
     60 
     61   virtual void BeforeCall(int call_size) const {}
     62 
     63   virtual void AfterCall() const {
     64     codegen_->RecordSafepoint(pointers_, deopt_mode_);
     65   }
     66 
     67  private:
     68   LCodeGen* codegen_;
     69   LPointerMap* pointers_;
     70   Safepoint::DeoptMode deopt_mode_;
     71 };
     72 
     73 
     74 #define __ masm()->
     75 
     76 bool LCodeGen::GenerateCode() {
     77   LPhase phase("Z_Code generation", chunk());
     78   ASSERT(is_unused());
     79   status_ = GENERATING;
     80 
     81   // Open a frame scope to indicate that there is a frame on the stack.  The
     82   // MANUAL indicates that the scope shouldn't actually generate code to set up
     83   // the frame (that is done in GeneratePrologue).
     84   FrameScope frame_scope(masm_, StackFrame::MANUAL);
     85 
     86   support_aligned_spilled_doubles_ = info()->IsOptimizing();
     87 
     88   dynamic_frame_alignment_ = info()->IsOptimizing() &&
     89       ((chunk()->num_double_slots() > 2 &&
     90         !chunk()->graph()->is_recursive()) ||
     91        !info()->osr_ast_id().IsNone());
     92 
     93   return GeneratePrologue() &&
     94       GenerateBody() &&
     95       GenerateDeferredCode() &&
     96       GenerateJumpTable() &&
     97       GenerateSafepointTable();
     98 }
     99 
    100 
    101 void LCodeGen::FinishCode(Handle<Code> code) {
    102   ASSERT(is_done());
    103   code->set_stack_slots(GetStackSlotCount());
    104   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
    105   if (FLAG_weak_embedded_maps_in_optimized_code) {
    106     RegisterDependentCodeForEmbeddedMaps(code);
    107   }
    108   PopulateDeoptimizationData(code);
    109   if (!info()->IsStub()) {
    110     Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
    111   }
    112   info()->CommitDependencies(code);
    113 }
    114 
    115 
    116 void LCodeGen::Abort(BailoutReason reason) {
    117   info()->set_bailout_reason(reason);
    118   status_ = ABORTED;
    119 }
    120 
    121 
    122 void LCodeGen::Comment(const char* format, ...) {
    123   if (!FLAG_code_comments) return;
    124   char buffer[4 * KB];
    125   StringBuilder builder(buffer, ARRAY_SIZE(buffer));
    126   va_list arguments;
    127   va_start(arguments, format);
    128   builder.AddFormattedList(format, arguments);
    129   va_end(arguments);
    130 
    131   // Copy the string before recording it in the assembler to avoid
    132   // issues when the stack allocated buffer goes out of scope.
    133   size_t length = builder.position();
    134   Vector<char> copy = Vector<char>::New(length + 1);
    135   OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
    136   masm()->RecordComment(copy.start());
    137 }
    138 
    139 
    140 #ifdef _MSC_VER
    141 void LCodeGen::MakeSureStackPagesMapped(int offset) {
    142   const int kPageSize = 4 * KB;
    143   for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
    144     __ mov(Operand(esp, offset), eax);
    145   }
    146 }
    147 #endif
    148 
    149 
    150 bool LCodeGen::GeneratePrologue() {
    151   ASSERT(is_generating());
    152 
    153   if (info()->IsOptimizing()) {
    154     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
    155 
    156 #ifdef DEBUG
    157     if (strlen(FLAG_stop_at) > 0 &&
    158         info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
    159       __ int3();
    160     }
    161 #endif
    162 
    163     // Strict mode functions and builtins need to replace the receiver
    164     // with undefined when called as functions (without an explicit
    165     // receiver object). ecx is zero for method calls and non-zero for
    166     // function calls.
    167     if (!info_->is_classic_mode() || info_->is_native()) {
    168       Label ok;
    169       __ test(ecx, Operand(ecx));
    170       __ j(zero, &ok, Label::kNear);
    171       // +1 for return address.
    172       int receiver_offset = (scope()->num_parameters() + 1) * kPointerSize;
    173       __ mov(Operand(esp, receiver_offset),
    174              Immediate(isolate()->factory()->undefined_value()));
    175       __ bind(&ok);
    176     }
    177 
    178     if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
    179       // Move state of dynamic frame alignment into edx.
    180       __ mov(edx, Immediate(kNoAlignmentPadding));
    181 
    182       Label do_not_pad, align_loop;
    183       STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
    184       // Align esp + 4 to a multiple of 2 * kPointerSize.
    185       __ test(esp, Immediate(kPointerSize));
    186       __ j(not_zero, &do_not_pad, Label::kNear);
    187       __ push(Immediate(0));
    188       __ mov(ebx, esp);
    189       __ mov(edx, Immediate(kAlignmentPaddingPushed));
    190       // Copy arguments, receiver, and return address.
    191       __ mov(ecx, Immediate(scope()->num_parameters() + 2));
    192 
    193       __ bind(&align_loop);
    194       __ mov(eax, Operand(ebx, 1 * kPointerSize));
    195       __ mov(Operand(ebx, 0), eax);
    196       __ add(Operand(ebx), Immediate(kPointerSize));
    197       __ dec(ecx);
    198       __ j(not_zero, &align_loop, Label::kNear);
    199       __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
    200       __ bind(&do_not_pad);
    201     }
    202   }
    203 
    204   info()->set_prologue_offset(masm_->pc_offset());
    205   if (NeedsEagerFrame()) {
    206     ASSERT(!frame_is_built_);
    207     frame_is_built_ = true;
    208     __ push(ebp);  // Caller's frame pointer.
    209     __ mov(ebp, esp);
    210     info()->AddNoFrameRange(0, masm_->pc_offset());
    211     __ push(esi);  // Callee's context.
    212     if (info()->IsStub()) {
    213       __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
    214     } else {
    215       __ push(edi);  // Callee's JS function.
    216     }
    217   }
    218 
    219   if (info()->IsOptimizing() &&
    220       dynamic_frame_alignment_ &&
    221       FLAG_debug_code) {
    222     __ test(esp, Immediate(kPointerSize));
    223     __ Assert(zero, kFrameIsExpectedToBeAligned);
    224   }
    225 
    226   // Reserve space for the stack slots needed by the code.
    227   int slots = GetStackSlotCount();
    228   ASSERT(slots != 0 || !info()->IsOptimizing());
    229   if (slots > 0) {
    230     if (slots == 1) {
    231       if (dynamic_frame_alignment_) {
    232         __ push(edx);
    233       } else {
    234         __ push(Immediate(kNoAlignmentPadding));
    235       }
    236     } else {
    237       if (FLAG_debug_code) {
    238         __ sub(Operand(esp), Immediate(slots * kPointerSize));
    239 #ifdef _MSC_VER
    240         MakeSureStackPagesMapped(slots * kPointerSize);
    241 #endif
    242         __ push(eax);
    243         __ mov(Operand(eax), Immediate(slots));
    244         Label loop;
    245         __ bind(&loop);
    246         __ mov(MemOperand(esp, eax, times_4, 0),
    247                Immediate(kSlotsZapValue));
    248         __ dec(eax);
    249         __ j(not_zero, &loop);
    250         __ pop(eax);
    251       } else {
    252         __ sub(Operand(esp), Immediate(slots * kPointerSize));
    253 #ifdef _MSC_VER
    254         MakeSureStackPagesMapped(slots * kPointerSize);
    255 #endif
    256       }
    257 
    258       if (support_aligned_spilled_doubles_) {
    259         Comment(";;; Store dynamic frame alignment tag for spilled doubles");
    260         // Store dynamic frame alignment state in the first local.
    261         int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
    262         if (dynamic_frame_alignment_) {
    263           __ mov(Operand(ebp, offset), edx);
    264         } else {
    265           __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
    266         }
    267       }
    268     }
    269 
    270     if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
    271       Comment(";;; Save clobbered callee double registers");
    272       CpuFeatureScope scope(masm(), SSE2);
    273       int count = 0;
    274       BitVector* doubles = chunk()->allocated_double_registers();
    275       BitVector::Iterator save_iterator(doubles);
    276       while (!save_iterator.Done()) {
    277         __ movdbl(MemOperand(esp, count * kDoubleSize),
    278                   XMMRegister::FromAllocationIndex(save_iterator.Current()));
    279         save_iterator.Advance();
    280         count++;
    281       }
    282     }
    283   }
    284 
    285   // Possibly allocate a local context.
    286   int heap_slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
    287   if (heap_slots > 0) {
    288     Comment(";;; Allocate local context");
    289     // Argument to NewContext is the function, which is still in edi.
    290     __ push(edi);
    291     if (heap_slots <= FastNewContextStub::kMaximumSlots) {
    292       FastNewContextStub stub(heap_slots);
    293       __ CallStub(&stub);
    294     } else {
    295       __ CallRuntime(Runtime::kNewFunctionContext, 1);
    296     }
    297     RecordSafepoint(Safepoint::kNoLazyDeopt);
    298     // Context is returned in both eax and esi.  It replaces the context
    299     // passed to us.  It's saved in the stack and kept live in esi.
    300     __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), esi);
    301 
    302     // Copy parameters into context if necessary.
    303     int num_parameters = scope()->num_parameters();
    304     for (int i = 0; i < num_parameters; i++) {
    305       Variable* var = scope()->parameter(i);
    306       if (var->IsContextSlot()) {
    307         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
    308             (num_parameters - 1 - i) * kPointerSize;
    309         // Load parameter from stack.
    310         __ mov(eax, Operand(ebp, parameter_offset));
    311         // Store it in the context.
    312         int context_offset = Context::SlotOffset(var->index());
    313         __ mov(Operand(esi, context_offset), eax);
    314         // Update the write barrier. This clobbers eax and ebx.
    315         __ RecordWriteContextSlot(esi,
    316                                   context_offset,
    317                                   eax,
    318                                   ebx,
    319                                   kDontSaveFPRegs);
    320       }
    321     }
    322     Comment(";;; End allocate local context");
    323   }
    324 
    325   // Trace the call.
    326   if (FLAG_trace && info()->IsOptimizing()) {
    327     // We have not executed any compiled code yet, so esi still holds the
    328     // incoming context.
    329     __ CallRuntime(Runtime::kTraceEnter, 0);
    330   }
    331   return !is_aborted();
    332 }
    333 
    334 
    335 bool LCodeGen::GenerateBody() {
    336   ASSERT(is_generating());
    337   bool emit_instructions = true;
    338   for (current_instruction_ = 0;
    339        !is_aborted() && current_instruction_ < instructions_->length();
    340        current_instruction_++) {
    341     LInstruction* instr = instructions_->at(current_instruction_);
    342 
    343     // Don't emit code for basic blocks with a replacement.
    344     if (instr->IsLabel()) {
    345       emit_instructions = !LLabel::cast(instr)->HasReplacement();
    346     }
    347     if (!emit_instructions) continue;
    348 
    349     if (FLAG_code_comments && instr->HasInterestingComment(this)) {
    350       Comment(";;; <@%d,#%d> %s",
    351               current_instruction_,
    352               instr->hydrogen_value()->id(),
    353               instr->Mnemonic());
    354     }
    355 
    356     if (!CpuFeatures::IsSupported(SSE2)) FlushX87StackIfNecessary(instr);
    357 
    358     RecordAndUpdatePosition(instr->position());
    359 
    360     instr->CompileToNative(this);
    361 
    362     if (!CpuFeatures::IsSupported(SSE2)) {
    363       if (FLAG_debug_code && FLAG_enable_slow_asserts) {
    364         __ VerifyX87StackDepth(x87_stack_depth_);
    365       }
    366     }
    367   }
    368   EnsureSpaceForLazyDeopt();
    369   return !is_aborted();
    370 }
    371 
    372 
    373 bool LCodeGen::GenerateJumpTable() {
    374   Label needs_frame;
    375   if (jump_table_.length() > 0) {
    376     Comment(";;; -------------------- Jump table --------------------");
    377   }
    378   for (int i = 0; i < jump_table_.length(); i++) {
    379     __ bind(&jump_table_[i].label);
    380     Address entry = jump_table_[i].address;
    381     Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
    382     int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
    383     if (id == Deoptimizer::kNotDeoptimizationEntry) {
    384       Comment(";;; jump table entry %d.", i);
    385     } else {
    386       Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
    387     }
    388     if (jump_table_[i].needs_frame) {
    389       __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
    390       if (needs_frame.is_bound()) {
    391         __ jmp(&needs_frame);
    392       } else {
    393         __ bind(&needs_frame);
    394         __ push(MemOperand(ebp, StandardFrameConstants::kContextOffset));
    395         // This variant of deopt can only be used with stubs. Since we don't
    396         // have a function pointer to install in the stack frame that we're
    397         // building, install a special marker there instead.
    398         ASSERT(info()->IsStub());
    399         __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
    400         // Push a PC inside the function so that the deopt code can find where
    401         // the deopt comes from. It doesn't have to be the precise return
    402         // address of a "calling" LAZY deopt, it only has to be somewhere
    403         // inside the code body.
    404         Label push_approx_pc;
    405         __ call(&push_approx_pc);
    406         __ bind(&push_approx_pc);
    407         // Push the continuation which was stashed were the ebp should
    408         // be. Replace it with the saved ebp.
    409         __ push(MemOperand(esp, 3 * kPointerSize));
    410         __ mov(MemOperand(esp, 4 * kPointerSize), ebp);
    411         __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
    412         __ ret(0);  // Call the continuation without clobbering registers.
    413       }
    414     } else {
    415       __ call(entry, RelocInfo::RUNTIME_ENTRY);
    416     }
    417   }
    418   return !is_aborted();
    419 }
    420 
    421 
    422 bool LCodeGen::GenerateDeferredCode() {
    423   ASSERT(is_generating());
    424   if (deferred_.length() > 0) {
    425     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
    426       LDeferredCode* code = deferred_[i];
    427 
    428       int pos = instructions_->at(code->instruction_index())->position();
    429       RecordAndUpdatePosition(pos);
    430 
    431       Comment(";;; <@%d,#%d> "
    432               "-------------------- Deferred %s --------------------",
    433               code->instruction_index(),
    434               code->instr()->hydrogen_value()->id(),
    435               code->instr()->Mnemonic());
    436       __ bind(code->entry());
    437       if (NeedsDeferredFrame()) {
    438         Comment(";;; Build frame");
    439         ASSERT(!frame_is_built_);
    440         ASSERT(info()->IsStub());
    441         frame_is_built_ = true;
    442         // Build the frame in such a way that esi isn't trashed.
    443         __ push(ebp);  // Caller's frame pointer.
    444         __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
    445         __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
    446         __ lea(ebp, Operand(esp, 2 * kPointerSize));
    447         Comment(";;; Deferred code");
    448       }
    449       code->Generate();
    450       if (NeedsDeferredFrame()) {
    451         Comment(";;; Destroy frame");
    452         ASSERT(frame_is_built_);
    453         frame_is_built_ = false;
    454         __ mov(esp, ebp);
    455         __ pop(ebp);
    456       }
    457       __ jmp(code->exit());
    458     }
    459   }
    460 
    461   // Deferred code is the last part of the instruction sequence. Mark
    462   // the generated code as done unless we bailed out.
    463   if (!is_aborted()) status_ = DONE;
    464   return !is_aborted();
    465 }
    466 
    467 
    468 bool LCodeGen::GenerateSafepointTable() {
    469   ASSERT(is_done());
    470   if (!info()->IsStub()) {
    471     // For lazy deoptimization we need space to patch a call after every call.
    472     // Ensure there is always space for such patching, even if the code ends
    473     // in a call.
    474     int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
    475     while (masm()->pc_offset() < target_offset) {
    476       masm()->nop();
    477     }
    478   }
    479   safepoints_.Emit(masm(), GetStackSlotCount());
    480   return !is_aborted();
    481 }
    482 
    483 
    484 Register LCodeGen::ToRegister(int index) const {
    485   return Register::FromAllocationIndex(index);
    486 }
    487 
    488 
    489 X87Register LCodeGen::ToX87Register(int index) const {
    490   return X87Register::FromAllocationIndex(index);
    491 }
    492 
    493 
    494 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
    495   return XMMRegister::FromAllocationIndex(index);
    496 }
    497 
    498 
    499 void LCodeGen::X87LoadForUsage(X87Register reg) {
    500   ASSERT(X87StackContains(reg));
    501   X87Fxch(reg);
    502   x87_stack_depth_--;
    503 }
    504 
    505 
    506 void LCodeGen::X87Fxch(X87Register reg, int other_slot) {
    507   ASSERT(X87StackContains(reg) && x87_stack_depth_ > other_slot);
    508   int i  = X87ArrayIndex(reg);
    509   int st = x87_st2idx(i);
    510   if (st != other_slot) {
    511     int other_i = x87_st2idx(other_slot);
    512     X87Register other   = x87_stack_[other_i];
    513     x87_stack_[other_i] = reg;
    514     x87_stack_[i]       = other;
    515     if (st == 0) {
    516       __ fxch(other_slot);
    517     } else if (other_slot == 0) {
    518       __ fxch(st);
    519     } else {
    520       __ fxch(st);
    521       __ fxch(other_slot);
    522       __ fxch(st);
    523     }
    524   }
    525 }
    526 
    527 
    528 int LCodeGen::x87_st2idx(int pos) {
    529   return x87_stack_depth_ - pos - 1;
    530 }
    531 
    532 
    533 int LCodeGen::X87ArrayIndex(X87Register reg) {
    534   for (int i = 0; i < x87_stack_depth_; i++) {
    535     if (x87_stack_[i].is(reg)) return i;
    536   }
    537   UNREACHABLE();
    538   return -1;
    539 }
    540 
    541 
    542 bool LCodeGen::X87StackContains(X87Register reg) {
    543   for (int i = 0; i < x87_stack_depth_; i++) {
    544     if (x87_stack_[i].is(reg)) return true;
    545   }
    546   return false;
    547 }
    548 
    549 
    550 void LCodeGen::X87Free(X87Register reg) {
    551   ASSERT(X87StackContains(reg));
    552   int i  = X87ArrayIndex(reg);
    553   int st = x87_st2idx(i);
    554   if (st > 0) {
    555     // keep track of how fstp(i) changes the order of elements
    556     int tos_i = x87_st2idx(0);
    557     x87_stack_[i] = x87_stack_[tos_i];
    558   }
    559   x87_stack_depth_--;
    560   __ fstp(st);
    561 }
    562 
    563 
    564 void LCodeGen::X87Mov(X87Register dst, Operand src, X87OperandType opts) {
    565   if (X87StackContains(dst)) {
    566     X87Fxch(dst);
    567     __ fstp(0);
    568   } else {
    569     ASSERT(x87_stack_depth_ < X87Register::kNumAllocatableRegisters);
    570     x87_stack_[x87_stack_depth_] = dst;
    571     x87_stack_depth_++;
    572   }
    573   X87Fld(src, opts);
    574 }
    575 
    576 
    577 void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
    578   if (opts == kX87DoubleOperand) {
    579     __ fld_d(src);
    580   } else if (opts == kX87FloatOperand) {
    581     __ fld_s(src);
    582   } else if (opts == kX87IntOperand) {
    583     __ fild_s(src);
    584   } else {
    585     UNREACHABLE();
    586   }
    587 }
    588 
    589 
    590 void LCodeGen::X87Mov(Operand dst, X87Register src) {
    591   X87Fxch(src);
    592   __ fst_d(dst);
    593 }
    594 
    595 
    596 void LCodeGen::X87PrepareToWrite(X87Register reg) {
    597   if (X87StackContains(reg)) {
    598     X87Free(reg);
    599   }
    600   // Mark this register as the next register to write to
    601   x87_stack_[x87_stack_depth_] = reg;
    602 }
    603 
    604 
    605 void LCodeGen::X87CommitWrite(X87Register reg) {
    606   // Assert the reg is prepared to write, but not on the virtual stack yet
    607   ASSERT(!X87StackContains(reg) && x87_stack_[x87_stack_depth_].is(reg) &&
    608       x87_stack_depth_ < X87Register::kNumAllocatableRegisters);
    609   x87_stack_depth_++;
    610 }
    611 
    612 
    613 void LCodeGen::X87PrepareBinaryOp(
    614     X87Register left, X87Register right, X87Register result) {
    615   // You need to use DefineSameAsFirst for x87 instructions
    616   ASSERT(result.is(left));
    617   X87Fxch(right, 1);
    618   X87Fxch(left);
    619 }
    620 
    621 
    622 void LCodeGen::FlushX87StackIfNecessary(LInstruction* instr) {
    623   if (x87_stack_depth_ > 0 && instr->ClobbersDoubleRegisters()) {
    624     bool double_inputs = instr->HasDoubleRegisterInput();
    625 
    626     // Flush stack from tos down, since FreeX87() will mess with tos
    627     for (int i = x87_stack_depth_-1; i >= 0; i--) {
    628       X87Register reg = x87_stack_[i];
    629       // Skip registers which contain the inputs for the next instruction
    630       // when flushing the stack
    631       if (double_inputs && instr->IsDoubleInput(reg, this)) {
    632         continue;
    633       }
    634       X87Free(reg);
    635       if (i < x87_stack_depth_-1) i++;
    636     }
    637   }
    638   if (instr->IsReturn()) {
    639     while (x87_stack_depth_ > 0) {
    640       __ fstp(0);
    641       x87_stack_depth_--;
    642     }
    643   }
    644 }
    645 
    646 
    647 void LCodeGen::EmitFlushX87ForDeopt() {
    648   for (int i = 0; i < x87_stack_depth_; i++) __ fstp(0);
    649 }
    650 
    651 
    652 Register LCodeGen::ToRegister(LOperand* op) const {
    653   ASSERT(op->IsRegister());
    654   return ToRegister(op->index());
    655 }
    656 
    657 
    658 X87Register LCodeGen::ToX87Register(LOperand* op) const {
    659   ASSERT(op->IsDoubleRegister());
    660   return ToX87Register(op->index());
    661 }
    662 
    663 
    664 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
    665   ASSERT(op->IsDoubleRegister());
    666   return ToDoubleRegister(op->index());
    667 }
    668 
    669 
    670 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
    671   return ToRepresentation(op, Representation::Integer32());
    672 }
    673 
    674 
    675 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
    676                                    const Representation& r) const {
    677   HConstant* constant = chunk_->LookupConstant(op);
    678   int32_t value = constant->Integer32Value();
    679   if (r.IsInteger32()) return value;
    680   ASSERT(r.IsSmiOrTagged());
    681   return reinterpret_cast<int32_t>(Smi::FromInt(value));
    682 }
    683 
    684 
    685 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
    686   HConstant* constant = chunk_->LookupConstant(op);
    687   ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
    688   return constant->handle();
    689 }
    690 
    691 
    692 double LCodeGen::ToDouble(LConstantOperand* op) const {
    693   HConstant* constant = chunk_->LookupConstant(op);
    694   ASSERT(constant->HasDoubleValue());
    695   return constant->DoubleValue();
    696 }
    697 
    698 
    699 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
    700   HConstant* constant = chunk_->LookupConstant(op);
    701   ASSERT(constant->HasExternalReferenceValue());
    702   return constant->ExternalReferenceValue();
    703 }
    704 
    705 
    706 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
    707   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
    708 }
    709 
    710 
    711 bool LCodeGen::IsSmi(LConstantOperand* op) const {
    712   return chunk_->LookupLiteralRepresentation(op).IsSmi();
    713 }
    714 
    715 
    716 Operand LCodeGen::ToOperand(LOperand* op) const {
    717   if (op->IsRegister()) return Operand(ToRegister(op));
    718   if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
    719   ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
    720   return Operand(ebp, StackSlotOffset(op->index()));
    721 }
    722 
    723 
    724 Operand LCodeGen::HighOperand(LOperand* op) {
    725   ASSERT(op->IsDoubleStackSlot());
    726   return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
    727 }
    728 
    729 
    730 void LCodeGen::WriteTranslation(LEnvironment* environment,
    731                                 Translation* translation) {
    732   if (environment == NULL) return;
    733 
    734   // The translation includes one command per value in the environment.
    735   int translation_size = environment->translation_size();
    736   // The output frame height does not include the parameters.
    737   int height = translation_size - environment->parameter_count();
    738 
    739   WriteTranslation(environment->outer(), translation);
    740   bool has_closure_id = !info()->closure().is_null() &&
    741       !info()->closure().is_identical_to(environment->closure());
    742   int closure_id = has_closure_id
    743       ? DefineDeoptimizationLiteral(environment->closure())
    744       : Translation::kSelfLiteralId;
    745   switch (environment->frame_type()) {
    746     case JS_FUNCTION:
    747       translation->BeginJSFrame(environment->ast_id(), closure_id, height);
    748       break;
    749     case JS_CONSTRUCT:
    750       translation->BeginConstructStubFrame(closure_id, translation_size);
    751       break;
    752     case JS_GETTER:
    753       ASSERT(translation_size == 1);
    754       ASSERT(height == 0);
    755       translation->BeginGetterStubFrame(closure_id);
    756       break;
    757     case JS_SETTER:
    758       ASSERT(translation_size == 2);
    759       ASSERT(height == 0);
    760       translation->BeginSetterStubFrame(closure_id);
    761       break;
    762     case ARGUMENTS_ADAPTOR:
    763       translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
    764       break;
    765     case STUB:
    766       translation->BeginCompiledStubFrame();
    767       break;
    768     default:
    769       UNREACHABLE();
    770   }
    771 
    772   int object_index = 0;
    773   int dematerialized_index = 0;
    774   for (int i = 0; i < translation_size; ++i) {
    775     LOperand* value = environment->values()->at(i);
    776     AddToTranslation(environment,
    777                      translation,
    778                      value,
    779                      environment->HasTaggedValueAt(i),
    780                      environment->HasUint32ValueAt(i),
    781                      &object_index,
    782                      &dematerialized_index);
    783   }
    784 }
    785 
    786 
    787 void LCodeGen::AddToTranslation(LEnvironment* environment,
    788                                 Translation* translation,
    789                                 LOperand* op,
    790                                 bool is_tagged,
    791                                 bool is_uint32,
    792                                 int* object_index_pointer,
    793                                 int* dematerialized_index_pointer) {
    794   if (op == LEnvironment::materialization_marker()) {
    795     int object_index = (*object_index_pointer)++;
    796     if (environment->ObjectIsDuplicateAt(object_index)) {
    797       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
    798       translation->DuplicateObject(dupe_of);
    799       return;
    800     }
    801     int object_length = environment->ObjectLengthAt(object_index);
    802     if (environment->ObjectIsArgumentsAt(object_index)) {
    803       translation->BeginArgumentsObject(object_length);
    804     } else {
    805       translation->BeginCapturedObject(object_length);
    806     }
    807     int dematerialized_index = *dematerialized_index_pointer;
    808     int env_offset = environment->translation_size() + dematerialized_index;
    809     *dematerialized_index_pointer += object_length;
    810     for (int i = 0; i < object_length; ++i) {
    811       LOperand* value = environment->values()->at(env_offset + i);
    812       AddToTranslation(environment,
    813                        translation,
    814                        value,
    815                        environment->HasTaggedValueAt(env_offset + i),
    816                        environment->HasUint32ValueAt(env_offset + i),
    817                        object_index_pointer,
    818                        dematerialized_index_pointer);
    819     }
    820     return;
    821   }
    822 
    823   if (op->IsStackSlot()) {
    824     if (is_tagged) {
    825       translation->StoreStackSlot(op->index());
    826     } else if (is_uint32) {
    827       translation->StoreUint32StackSlot(op->index());
    828     } else {
    829       translation->StoreInt32StackSlot(op->index());
    830     }
    831   } else if (op->IsDoubleStackSlot()) {
    832     translation->StoreDoubleStackSlot(op->index());
    833   } else if (op->IsArgument()) {
    834     ASSERT(is_tagged);
    835     int src_index = GetStackSlotCount() + op->index();
    836     translation->StoreStackSlot(src_index);
    837   } else if (op->IsRegister()) {
    838     Register reg = ToRegister(op);
    839     if (is_tagged) {
    840       translation->StoreRegister(reg);
    841     } else if (is_uint32) {
    842       translation->StoreUint32Register(reg);
    843     } else {
    844       translation->StoreInt32Register(reg);
    845     }
    846   } else if (op->IsDoubleRegister()) {
    847     XMMRegister reg = ToDoubleRegister(op);
    848     translation->StoreDoubleRegister(reg);
    849   } else if (op->IsConstantOperand()) {
    850     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
    851     int src_index = DefineDeoptimizationLiteral(constant->handle());
    852     translation->StoreLiteral(src_index);
    853   } else {
    854     UNREACHABLE();
    855   }
    856 }
    857 
    858 
    859 void LCodeGen::CallCodeGeneric(Handle<Code> code,
    860                                RelocInfo::Mode mode,
    861                                LInstruction* instr,
    862                                SafepointMode safepoint_mode) {
    863   ASSERT(instr != NULL);
    864   LPointerMap* pointers = instr->pointer_map();
    865   RecordPosition(pointers->position());
    866   __ call(code, mode);
    867   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
    868 
    869   // Signal that we don't inline smi code before these stubs in the
    870   // optimizing code generator.
    871   if (code->kind() == Code::BINARY_OP_IC ||
    872       code->kind() == Code::COMPARE_IC) {
    873     __ nop();
    874   }
    875 }
    876 
    877 
    878 void LCodeGen::CallCode(Handle<Code> code,
    879                         RelocInfo::Mode mode,
    880                         LInstruction* instr) {
    881   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
    882 }
    883 
    884 
    885 void LCodeGen::CallRuntime(const Runtime::Function* fun,
    886                            int argc,
    887                            LInstruction* instr) {
    888   ASSERT(instr != NULL);
    889   ASSERT(instr->HasPointerMap());
    890   LPointerMap* pointers = instr->pointer_map();
    891   RecordPosition(pointers->position());
    892 
    893   __ CallRuntime(fun, argc);
    894 
    895   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
    896 
    897   ASSERT(info()->is_calling());
    898 }
    899 
    900 
    901 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
    902   if (context->IsRegister()) {
    903     if (!ToRegister(context).is(esi)) {
    904       __ mov(esi, ToRegister(context));
    905     }
    906   } else if (context->IsStackSlot()) {
    907     __ mov(esi, ToOperand(context));
    908   } else if (context->IsConstantOperand()) {
    909     HConstant* constant =
    910         chunk_->LookupConstant(LConstantOperand::cast(context));
    911     __ LoadObject(esi, Handle<Object>::cast(constant->handle()));
    912   } else {
    913     UNREACHABLE();
    914   }
    915 }
    916 
    917 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
    918                                        int argc,
    919                                        LInstruction* instr,
    920                                        LOperand* context) {
    921   LoadContextFromDeferred(context);
    922 
    923   __ CallRuntimeSaveDoubles(id);
    924   RecordSafepointWithRegisters(
    925       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
    926 
    927   ASSERT(info()->is_calling());
    928 }
    929 
    930 
    931 void LCodeGen::RegisterEnvironmentForDeoptimization(
    932     LEnvironment* environment, Safepoint::DeoptMode mode) {
    933   if (!environment->HasBeenRegistered()) {
    934     // Physical stack frame layout:
    935     // -x ............. -4  0 ..................................... y
    936     // [incoming arguments] [spill slots] [pushed outgoing arguments]
    937 
    938     // Layout of the environment:
    939     // 0 ..................................................... size-1
    940     // [parameters] [locals] [expression stack including arguments]
    941 
    942     // Layout of the translation:
    943     // 0 ........................................................ size - 1 + 4
    944     // [expression stack including arguments] [locals] [4 words] [parameters]
    945     // |>------------  translation_size ------------<|
    946 
    947     int frame_count = 0;
    948     int jsframe_count = 0;
    949     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
    950       ++frame_count;
    951       if (e->frame_type() == JS_FUNCTION) {
    952         ++jsframe_count;
    953       }
    954     }
    955     Translation translation(&translations_, frame_count, jsframe_count, zone());
    956     WriteTranslation(environment, &translation);
    957     int deoptimization_index = deoptimizations_.length();
    958     int pc_offset = masm()->pc_offset();
    959     environment->Register(deoptimization_index,
    960                           translation.index(),
    961                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
    962     deoptimizations_.Add(environment, zone());
    963   }
    964 }
    965 
    966 
    967 void LCodeGen::DeoptimizeIf(Condition cc,
    968                             LEnvironment* environment,
    969                             Deoptimizer::BailoutType bailout_type) {
    970   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
    971   ASSERT(environment->HasBeenRegistered());
    972   int id = environment->deoptimization_index();
    973   ASSERT(info()->IsOptimizing() || info()->IsStub());
    974   Address entry =
    975       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
    976   if (entry == NULL) {
    977     Abort(kBailoutWasNotPrepared);
    978     return;
    979   }
    980 
    981   if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
    982     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
    983     Label no_deopt;
    984     __ pushfd();
    985     __ push(eax);
    986     __ mov(eax, Operand::StaticVariable(count));
    987     __ sub(eax, Immediate(1));
    988     __ j(not_zero, &no_deopt, Label::kNear);
    989     if (FLAG_trap_on_deopt) __ int3();
    990     __ mov(eax, Immediate(FLAG_deopt_every_n_times));
    991     __ mov(Operand::StaticVariable(count), eax);
    992     __ pop(eax);
    993     __ popfd();
    994     ASSERT(frame_is_built_);
    995     __ call(entry, RelocInfo::RUNTIME_ENTRY);
    996     __ bind(&no_deopt);
    997     __ mov(Operand::StaticVariable(count), eax);
    998     __ pop(eax);
    999     __ popfd();
   1000   }
   1001 
   1002   // Before Instructions which can deopt, we normally flush the x87 stack. But
   1003   // we can have inputs or outputs of the current instruction on the stack,
   1004   // thus we need to flush them here from the physical stack to leave it in a
   1005   // consistent state.
   1006   if (x87_stack_depth_ > 0) {
   1007     Label done;
   1008     if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
   1009     EmitFlushX87ForDeopt();
   1010     __ bind(&done);
   1011   }
   1012 
   1013   if (info()->ShouldTrapOnDeopt()) {
   1014     Label done;
   1015     if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
   1016     __ int3();
   1017     __ bind(&done);
   1018   }
   1019 
   1020   ASSERT(info()->IsStub() || frame_is_built_);
   1021   if (cc == no_condition && frame_is_built_) {
   1022     __ call(entry, RelocInfo::RUNTIME_ENTRY);
   1023   } else {
   1024     // We often have several deopts to the same entry, reuse the last
   1025     // jump entry if this is the case.
   1026     if (jump_table_.is_empty() ||
   1027         jump_table_.last().address != entry ||
   1028         jump_table_.last().needs_frame != !frame_is_built_ ||
   1029         jump_table_.last().bailout_type != bailout_type) {
   1030       Deoptimizer::JumpTableEntry table_entry(entry,
   1031                                               bailout_type,
   1032                                               !frame_is_built_);
   1033       jump_table_.Add(table_entry, zone());
   1034     }
   1035     if (cc == no_condition) {
   1036       __ jmp(&jump_table_.last().label);
   1037     } else {
   1038       __ j(cc, &jump_table_.last().label);
   1039     }
   1040   }
   1041 }
   1042 
   1043 
   1044 void LCodeGen::DeoptimizeIf(Condition cc,
   1045                             LEnvironment* environment) {
   1046   Deoptimizer::BailoutType bailout_type = info()->IsStub()
   1047       ? Deoptimizer::LAZY
   1048       : Deoptimizer::EAGER;
   1049   DeoptimizeIf(cc, environment, bailout_type);
   1050 }
   1051 
   1052 
   1053 void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) {
   1054   ZoneList<Handle<Map> > maps(1, zone());
   1055   int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
   1056   for (RelocIterator it(*code, mode_mask); !it.done(); it.next()) {
   1057     RelocInfo::Mode mode = it.rinfo()->rmode();
   1058     if (mode == RelocInfo::EMBEDDED_OBJECT &&
   1059         it.rinfo()->target_object()->IsMap()) {
   1060       Handle<Map> map(Map::cast(it.rinfo()->target_object()));
   1061       if (map->CanTransition()) {
   1062         maps.Add(map, zone());
   1063       }
   1064     }
   1065   }
   1066 #ifdef VERIFY_HEAP
   1067   // This disables verification of weak embedded maps after full GC.
   1068   // AddDependentCode can cause a GC, which would observe the state where
   1069   // this code is not yet in the depended code lists of the embedded maps.
   1070   NoWeakEmbeddedMapsVerificationScope disable_verification_of_embedded_maps;
   1071 #endif
   1072   for (int i = 0; i < maps.length(); i++) {
   1073     maps.at(i)->AddDependentCode(DependentCode::kWeaklyEmbeddedGroup, code);
   1074   }
   1075 }
   1076 
   1077 
   1078 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
   1079   int length = deoptimizations_.length();
   1080   if (length == 0) return;
   1081   Handle<DeoptimizationInputData> data =
   1082       factory()->NewDeoptimizationInputData(length, TENURED);
   1083 
   1084   Handle<ByteArray> translations =
   1085       translations_.CreateByteArray(isolate()->factory());
   1086   data->SetTranslationByteArray(*translations);
   1087   data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
   1088 
   1089   Handle<FixedArray> literals =
   1090       factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
   1091   { AllowDeferredHandleDereference copy_handles;
   1092     for (int i = 0; i < deoptimization_literals_.length(); i++) {
   1093       literals->set(i, *deoptimization_literals_[i]);
   1094     }
   1095     data->SetLiteralArray(*literals);
   1096   }
   1097 
   1098   data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
   1099   data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
   1100 
   1101   // Populate the deoptimization entries.
   1102   for (int i = 0; i < length; i++) {
   1103     LEnvironment* env = deoptimizations_[i];
   1104     data->SetAstId(i, env->ast_id());
   1105     data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
   1106     data->SetArgumentsStackHeight(i,
   1107                                   Smi::FromInt(env->arguments_stack_height()));
   1108     data->SetPc(i, Smi::FromInt(env->pc_offset()));
   1109   }
   1110   code->set_deoptimization_data(*data);
   1111 }
   1112 
   1113 
   1114 int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
   1115   int result = deoptimization_literals_.length();
   1116   for (int i = 0; i < deoptimization_literals_.length(); ++i) {
   1117     if (deoptimization_literals_[i].is_identical_to(literal)) return i;
   1118   }
   1119   deoptimization_literals_.Add(literal, zone());
   1120   return result;
   1121 }
   1122 
   1123 
   1124 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
   1125   ASSERT(deoptimization_literals_.length() == 0);
   1126 
   1127   const ZoneList<Handle<JSFunction> >* inlined_closures =
   1128       chunk()->inlined_closures();
   1129 
   1130   for (int i = 0, length = inlined_closures->length();
   1131        i < length;
   1132        i++) {
   1133     DefineDeoptimizationLiteral(inlined_closures->at(i));
   1134   }
   1135 
   1136   inlined_function_count_ = deoptimization_literals_.length();
   1137 }
   1138 
   1139 
   1140 void LCodeGen::RecordSafepointWithLazyDeopt(
   1141     LInstruction* instr, SafepointMode safepoint_mode) {
   1142   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
   1143     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
   1144   } else {
   1145     ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   1146     RecordSafepointWithRegisters(
   1147         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
   1148   }
   1149 }
   1150 
   1151 
   1152 void LCodeGen::RecordSafepoint(
   1153     LPointerMap* pointers,
   1154     Safepoint::Kind kind,
   1155     int arguments,
   1156     Safepoint::DeoptMode deopt_mode) {
   1157   ASSERT(kind == expected_safepoint_kind_);
   1158   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
   1159   Safepoint safepoint =
   1160       safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
   1161   for (int i = 0; i < operands->length(); i++) {
   1162     LOperand* pointer = operands->at(i);
   1163     if (pointer->IsStackSlot()) {
   1164       safepoint.DefinePointerSlot(pointer->index(), zone());
   1165     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
   1166       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
   1167     }
   1168   }
   1169 }
   1170 
   1171 
   1172 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
   1173                                Safepoint::DeoptMode mode) {
   1174   RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
   1175 }
   1176 
   1177 
   1178 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
   1179   LPointerMap empty_pointers(RelocInfo::kNoPosition, zone());
   1180   RecordSafepoint(&empty_pointers, mode);
   1181 }
   1182 
   1183 
   1184 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
   1185                                             int arguments,
   1186                                             Safepoint::DeoptMode mode) {
   1187   RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
   1188 }
   1189 
   1190 
   1191 void LCodeGen::RecordPosition(int position) {
   1192   if (position == RelocInfo::kNoPosition) return;
   1193   masm()->positions_recorder()->RecordPosition(position);
   1194 }
   1195 
   1196 
   1197 void LCodeGen::RecordAndUpdatePosition(int position) {
   1198   if (position >= 0 && position != old_position_) {
   1199     masm()->positions_recorder()->RecordPosition(position);
   1200     old_position_ = position;
   1201   }
   1202 }
   1203 
   1204 
   1205 static const char* LabelType(LLabel* label) {
   1206   if (label->is_loop_header()) return " (loop header)";
   1207   if (label->is_osr_entry()) return " (OSR entry)";
   1208   return "";
   1209 }
   1210 
   1211 
   1212 void LCodeGen::DoLabel(LLabel* label) {
   1213   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
   1214           current_instruction_,
   1215           label->hydrogen_value()->id(),
   1216           label->block_id(),
   1217           LabelType(label));
   1218   __ bind(label->label());
   1219   current_block_ = label->block_id();
   1220   DoGap(label);
   1221 }
   1222 
   1223 
   1224 void LCodeGen::DoParallelMove(LParallelMove* move) {
   1225   resolver_.Resolve(move);
   1226 }
   1227 
   1228 
   1229 void LCodeGen::DoGap(LGap* gap) {
   1230   for (int i = LGap::FIRST_INNER_POSITION;
   1231        i <= LGap::LAST_INNER_POSITION;
   1232        i++) {
   1233     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
   1234     LParallelMove* move = gap->GetParallelMove(inner_pos);
   1235     if (move != NULL) DoParallelMove(move);
   1236   }
   1237 }
   1238 
   1239 
   1240 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
   1241   DoGap(instr);
   1242 }
   1243 
   1244 
   1245 void LCodeGen::DoParameter(LParameter* instr) {
   1246   // Nothing to do.
   1247 }
   1248 
   1249 
   1250 void LCodeGen::DoCallStub(LCallStub* instr) {
   1251   ASSERT(ToRegister(instr->context()).is(esi));
   1252   ASSERT(ToRegister(instr->result()).is(eax));
   1253   switch (instr->hydrogen()->major_key()) {
   1254     case CodeStub::RegExpConstructResult: {
   1255       RegExpConstructResultStub stub;
   1256       CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   1257       break;
   1258     }
   1259     case CodeStub::RegExpExec: {
   1260       RegExpExecStub stub;
   1261       CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   1262       break;
   1263     }
   1264     case CodeStub::SubString: {
   1265       SubStringStub stub;
   1266       CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   1267       break;
   1268     }
   1269     case CodeStub::NumberToString: {
   1270       NumberToStringStub stub;
   1271       CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   1272       break;
   1273     }
   1274     case CodeStub::StringCompare: {
   1275       StringCompareStub stub;
   1276       CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   1277       break;
   1278     }
   1279     case CodeStub::TranscendentalCache: {
   1280       TranscendentalCacheStub stub(instr->transcendental_type(),
   1281                                    TranscendentalCacheStub::TAGGED);
   1282       CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   1283       break;
   1284     }
   1285     default:
   1286       UNREACHABLE();
   1287   }
   1288 }
   1289 
   1290 
   1291 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
   1292   // Record the address of the first unknown OSR value as the place to enter.
   1293   if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
   1294 }
   1295 
   1296 
   1297 void LCodeGen::DoModI(LModI* instr) {
   1298   HMod* hmod = instr->hydrogen();
   1299   HValue* left = hmod->left();
   1300   HValue* right = hmod->right();
   1301   if (hmod->HasPowerOf2Divisor()) {
   1302     // TODO(svenpanne) We should really do the strength reduction on the
   1303     // Hydrogen level.
   1304     Register left_reg = ToRegister(instr->left());
   1305     ASSERT(left_reg.is(ToRegister(instr->result())));
   1306 
   1307     // Note: The code below even works when right contains kMinInt.
   1308     int32_t divisor = Abs(right->GetInteger32Constant());
   1309 
   1310     Label left_is_not_negative, done;
   1311     if (left->CanBeNegative()) {
   1312       __ test(left_reg, Operand(left_reg));
   1313       __ j(not_sign, &left_is_not_negative, Label::kNear);
   1314       __ neg(left_reg);
   1315       __ and_(left_reg, divisor - 1);
   1316       __ neg(left_reg);
   1317       if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1318         DeoptimizeIf(zero, instr->environment());
   1319       }
   1320       __ jmp(&done, Label::kNear);
   1321     }
   1322 
   1323     __ bind(&left_is_not_negative);
   1324     __ and_(left_reg, divisor - 1);
   1325     __ bind(&done);
   1326 
   1327   } else if (hmod->fixed_right_arg().has_value) {
   1328     Register left_reg = ToRegister(instr->left());
   1329     ASSERT(left_reg.is(ToRegister(instr->result())));
   1330     Register right_reg = ToRegister(instr->right());
   1331 
   1332     int32_t divisor = hmod->fixed_right_arg().value;
   1333     ASSERT(IsPowerOf2(divisor));
   1334 
   1335     // Check if our assumption of a fixed right operand still holds.
   1336     __ cmp(right_reg, Immediate(divisor));
   1337     DeoptimizeIf(not_equal, instr->environment());
   1338 
   1339     Label left_is_not_negative, done;
   1340     if (left->CanBeNegative()) {
   1341       __ test(left_reg, Operand(left_reg));
   1342       __ j(not_sign, &left_is_not_negative, Label::kNear);
   1343       __ neg(left_reg);
   1344       __ and_(left_reg, divisor - 1);
   1345       __ neg(left_reg);
   1346       if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1347         DeoptimizeIf(zero, instr->environment());
   1348       }
   1349       __ jmp(&done, Label::kNear);
   1350     }
   1351 
   1352     __ bind(&left_is_not_negative);
   1353     __ and_(left_reg, divisor - 1);
   1354     __ bind(&done);
   1355 
   1356   } else {
   1357     Register left_reg = ToRegister(instr->left());
   1358     ASSERT(left_reg.is(eax));
   1359     Register right_reg = ToRegister(instr->right());
   1360     ASSERT(!right_reg.is(eax));
   1361     ASSERT(!right_reg.is(edx));
   1362     Register result_reg = ToRegister(instr->result());
   1363     ASSERT(result_reg.is(edx));
   1364 
   1365     Label done;
   1366     // Check for x % 0, idiv would signal a divide error. We have to
   1367     // deopt in this case because we can't return a NaN.
   1368     if (right->CanBeZero()) {
   1369       __ test(right_reg, Operand(right_reg));
   1370       DeoptimizeIf(zero, instr->environment());
   1371     }
   1372 
   1373     // Check for kMinInt % -1, idiv would signal a divide error. We
   1374     // have to deopt if we care about -0, because we can't return that.
   1375     if (left->RangeCanInclude(kMinInt) && right->RangeCanInclude(-1)) {
   1376       Label no_overflow_possible;
   1377       __ cmp(left_reg, kMinInt);
   1378       __ j(not_equal, &no_overflow_possible, Label::kNear);
   1379       __ cmp(right_reg, -1);
   1380       if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1381         DeoptimizeIf(equal, instr->environment());
   1382       } else {
   1383         __ j(not_equal, &no_overflow_possible, Label::kNear);
   1384         __ Set(result_reg, Immediate(0));
   1385         __ jmp(&done, Label::kNear);
   1386       }
   1387       __ bind(&no_overflow_possible);
   1388     }
   1389 
   1390     // Sign extend dividend in eax into edx:eax.
   1391     __ cdq();
   1392 
   1393     // If we care about -0, test if the dividend is <0 and the result is 0.
   1394     if (left->CanBeNegative() &&
   1395         hmod->CanBeZero() &&
   1396         hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1397       Label positive_left;
   1398       __ test(left_reg, Operand(left_reg));
   1399       __ j(not_sign, &positive_left, Label::kNear);
   1400       __ idiv(right_reg);
   1401       __ test(result_reg, Operand(result_reg));
   1402       DeoptimizeIf(zero, instr->environment());
   1403       __ jmp(&done, Label::kNear);
   1404       __ bind(&positive_left);
   1405     }
   1406     __ idiv(right_reg);
   1407     __ bind(&done);
   1408   }
   1409 }
   1410 
   1411 
   1412 void LCodeGen::DoDivI(LDivI* instr) {
   1413   if (!instr->is_flooring() && instr->hydrogen()->HasPowerOf2Divisor()) {
   1414     Register dividend = ToRegister(instr->left());
   1415     int32_t divisor = instr->hydrogen()->right()->GetInteger32Constant();
   1416     int32_t test_value = 0;
   1417     int32_t power = 0;
   1418 
   1419     if (divisor > 0) {
   1420       test_value = divisor - 1;
   1421       power = WhichPowerOf2(divisor);
   1422     } else {
   1423       // Check for (0 / -x) that will produce negative zero.
   1424       if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1425         __ test(dividend, Operand(dividend));
   1426         DeoptimizeIf(zero, instr->environment());
   1427       }
   1428       // Check for (kMinInt / -1).
   1429       if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1430         __ cmp(dividend, kMinInt);
   1431         DeoptimizeIf(zero, instr->environment());
   1432       }
   1433       test_value = - divisor - 1;
   1434       power = WhichPowerOf2(-divisor);
   1435     }
   1436 
   1437     if (test_value != 0) {
   1438       if (instr->hydrogen()->CheckFlag(
   1439           HInstruction::kAllUsesTruncatingToInt32)) {
   1440         Label done, negative;
   1441         __ cmp(dividend, 0);
   1442         __ j(less, &negative, Label::kNear);
   1443         __ sar(dividend, power);
   1444         if (divisor < 0) __ neg(dividend);
   1445         __ jmp(&done, Label::kNear);
   1446 
   1447         __ bind(&negative);
   1448         __ neg(dividend);
   1449         __ sar(dividend, power);
   1450         if (divisor > 0) __ neg(dividend);
   1451         __ bind(&done);
   1452         return;  // Don't fall through to "__ neg" below.
   1453       } else {
   1454         // Deoptimize if remainder is not 0.
   1455         __ test(dividend, Immediate(test_value));
   1456         DeoptimizeIf(not_zero, instr->environment());
   1457         __ sar(dividend, power);
   1458       }
   1459     }
   1460 
   1461     if (divisor < 0) __ neg(dividend);
   1462 
   1463     return;
   1464   }
   1465 
   1466   LOperand* right = instr->right();
   1467   ASSERT(ToRegister(instr->result()).is(eax));
   1468   ASSERT(ToRegister(instr->left()).is(eax));
   1469   ASSERT(!ToRegister(instr->right()).is(eax));
   1470   ASSERT(!ToRegister(instr->right()).is(edx));
   1471 
   1472   Register left_reg = eax;
   1473 
   1474   // Check for x / 0.
   1475   Register right_reg = ToRegister(right);
   1476   if (instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) {
   1477     __ test(right_reg, ToOperand(right));
   1478     DeoptimizeIf(zero, instr->environment());
   1479   }
   1480 
   1481   // Check for (0 / -x) that will produce negative zero.
   1482   if (instr->hydrogen_value()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1483     Label left_not_zero;
   1484     __ test(left_reg, Operand(left_reg));
   1485     __ j(not_zero, &left_not_zero, Label::kNear);
   1486     __ test(right_reg, ToOperand(right));
   1487     DeoptimizeIf(sign, instr->environment());
   1488     __ bind(&left_not_zero);
   1489   }
   1490 
   1491   // Check for (kMinInt / -1).
   1492   if (instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow)) {
   1493     Label left_not_min_int;
   1494     __ cmp(left_reg, kMinInt);
   1495     __ j(not_zero, &left_not_min_int, Label::kNear);
   1496     __ cmp(right_reg, -1);
   1497     DeoptimizeIf(zero, instr->environment());
   1498     __ bind(&left_not_min_int);
   1499   }
   1500 
   1501   // Sign extend to edx.
   1502   __ cdq();
   1503   __ idiv(right_reg);
   1504 
   1505   if (instr->is_flooring()) {
   1506     Label done;
   1507     __ test(edx, edx);
   1508     __ j(zero, &done, Label::kNear);
   1509     __ xor_(edx, right_reg);
   1510     __ sar(edx, 31);
   1511     __ add(eax, edx);
   1512     __ bind(&done);
   1513   } else if (!instr->hydrogen()->CheckFlag(
   1514       HInstruction::kAllUsesTruncatingToInt32)) {
   1515     // Deoptimize if remainder is not 0.
   1516     __ test(edx, Operand(edx));
   1517     DeoptimizeIf(not_zero, instr->environment());
   1518   }
   1519 }
   1520 
   1521 
   1522 void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
   1523   ASSERT(instr->right()->IsConstantOperand());
   1524 
   1525   Register dividend = ToRegister(instr->left());
   1526   int32_t divisor = ToInteger32(LConstantOperand::cast(instr->right()));
   1527   Register result = ToRegister(instr->result());
   1528 
   1529   switch (divisor) {
   1530   case 0:
   1531     DeoptimizeIf(no_condition, instr->environment());
   1532     return;
   1533 
   1534   case 1:
   1535     __ Move(result, dividend);
   1536     return;
   1537 
   1538   case -1:
   1539     __ Move(result, dividend);
   1540     __ neg(result);
   1541     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1542       DeoptimizeIf(zero, instr->environment());
   1543     }
   1544     if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1545       DeoptimizeIf(overflow, instr->environment());
   1546     }
   1547     return;
   1548   }
   1549 
   1550   uint32_t divisor_abs = abs(divisor);
   1551   if (IsPowerOf2(divisor_abs)) {
   1552     int32_t power = WhichPowerOf2(divisor_abs);
   1553     if (divisor < 0) {
   1554       // Input[dividend] is clobbered.
   1555       // The sequence is tedious because neg(dividend) might overflow.
   1556       __ mov(result, dividend);
   1557       __ sar(dividend, 31);
   1558       __ neg(result);
   1559       if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1560         DeoptimizeIf(zero, instr->environment());
   1561       }
   1562       __ shl(dividend, 32 - power);
   1563       __ sar(result, power);
   1564       __ not_(dividend);
   1565       // Clear result.sign if dividend.sign is set.
   1566       __ and_(result, dividend);
   1567     } else {
   1568       __ Move(result, dividend);
   1569       __ sar(result, power);
   1570     }
   1571   } else {
   1572     ASSERT(ToRegister(instr->left()).is(eax));
   1573     ASSERT(ToRegister(instr->result()).is(edx));
   1574     Register scratch = ToRegister(instr->temp());
   1575 
   1576     // Find b which: 2^b < divisor_abs < 2^(b+1).
   1577     unsigned b = 31 - CompilerIntrinsics::CountLeadingZeros(divisor_abs);
   1578     unsigned shift = 32 + b;  // Precision +1bit (effectively).
   1579     double multiplier_f =
   1580         static_cast<double>(static_cast<uint64_t>(1) << shift) / divisor_abs;
   1581     int64_t multiplier;
   1582     if (multiplier_f - floor(multiplier_f) < 0.5) {
   1583         multiplier = static_cast<int64_t>(floor(multiplier_f));
   1584     } else {
   1585         multiplier = static_cast<int64_t>(floor(multiplier_f)) + 1;
   1586     }
   1587     // The multiplier is a uint32.
   1588     ASSERT(multiplier > 0 &&
   1589            multiplier < (static_cast<int64_t>(1) << 32));
   1590     __ mov(scratch, dividend);
   1591     if (divisor < 0 &&
   1592         instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1593       __ test(dividend, dividend);
   1594       DeoptimizeIf(zero, instr->environment());
   1595     }
   1596     __ mov(edx, static_cast<int32_t>(multiplier));
   1597     __ imul(edx);
   1598     if (static_cast<int32_t>(multiplier) < 0) {
   1599       __ add(edx, scratch);
   1600     }
   1601     Register reg_lo = eax;
   1602     Register reg_byte_scratch = scratch;
   1603     if (!reg_byte_scratch.is_byte_register()) {
   1604         __ xchg(reg_lo, reg_byte_scratch);
   1605         reg_lo = scratch;
   1606         reg_byte_scratch = eax;
   1607     }
   1608     if (divisor < 0) {
   1609       __ xor_(reg_byte_scratch, reg_byte_scratch);
   1610       __ cmp(reg_lo, 0x40000000);
   1611       __ setcc(above, reg_byte_scratch);
   1612       __ neg(edx);
   1613       __ sub(edx, reg_byte_scratch);
   1614     } else {
   1615       __ xor_(reg_byte_scratch, reg_byte_scratch);
   1616       __ cmp(reg_lo, 0xC0000000);
   1617       __ setcc(above_equal, reg_byte_scratch);
   1618       __ add(edx, reg_byte_scratch);
   1619     }
   1620     __ sar(edx, shift - 32);
   1621   }
   1622 }
   1623 
   1624 
   1625 void LCodeGen::DoMulI(LMulI* instr) {
   1626   Register left = ToRegister(instr->left());
   1627   LOperand* right = instr->right();
   1628 
   1629   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1630     __ mov(ToRegister(instr->temp()), left);
   1631   }
   1632 
   1633   if (right->IsConstantOperand()) {
   1634     // Try strength reductions on the multiplication.
   1635     // All replacement instructions are at most as long as the imul
   1636     // and have better latency.
   1637     int constant = ToInteger32(LConstantOperand::cast(right));
   1638     if (constant == -1) {
   1639       __ neg(left);
   1640     } else if (constant == 0) {
   1641       __ xor_(left, Operand(left));
   1642     } else if (constant == 2) {
   1643       __ add(left, Operand(left));
   1644     } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1645       // If we know that the multiplication can't overflow, it's safe to
   1646       // use instructions that don't set the overflow flag for the
   1647       // multiplication.
   1648       switch (constant) {
   1649         case 1:
   1650           // Do nothing.
   1651           break;
   1652         case 3:
   1653           __ lea(left, Operand(left, left, times_2, 0));
   1654           break;
   1655         case 4:
   1656           __ shl(left, 2);
   1657           break;
   1658         case 5:
   1659           __ lea(left, Operand(left, left, times_4, 0));
   1660           break;
   1661         case 8:
   1662           __ shl(left, 3);
   1663           break;
   1664         case 9:
   1665           __ lea(left, Operand(left, left, times_8, 0));
   1666           break;
   1667        case 16:
   1668          __ shl(left, 4);
   1669          break;
   1670         default:
   1671           __ imul(left, left, constant);
   1672           break;
   1673       }
   1674     } else {
   1675       __ imul(left, left, constant);
   1676     }
   1677   } else {
   1678     if (instr->hydrogen()->representation().IsSmi()) {
   1679       __ SmiUntag(left);
   1680     }
   1681     __ imul(left, ToOperand(right));
   1682   }
   1683 
   1684   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1685     DeoptimizeIf(overflow, instr->environment());
   1686   }
   1687 
   1688   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1689     // Bail out if the result is supposed to be negative zero.
   1690     Label done;
   1691     __ test(left, Operand(left));
   1692     __ j(not_zero, &done, Label::kNear);
   1693     if (right->IsConstantOperand()) {
   1694       if (ToInteger32(LConstantOperand::cast(right)) < 0) {
   1695         DeoptimizeIf(no_condition, instr->environment());
   1696       } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
   1697         __ cmp(ToRegister(instr->temp()), Immediate(0));
   1698         DeoptimizeIf(less, instr->environment());
   1699       }
   1700     } else {
   1701       // Test the non-zero operand for negative sign.
   1702       __ or_(ToRegister(instr->temp()), ToOperand(right));
   1703       DeoptimizeIf(sign, instr->environment());
   1704     }
   1705     __ bind(&done);
   1706   }
   1707 }
   1708 
   1709 
   1710 void LCodeGen::DoBitI(LBitI* instr) {
   1711   LOperand* left = instr->left();
   1712   LOperand* right = instr->right();
   1713   ASSERT(left->Equals(instr->result()));
   1714   ASSERT(left->IsRegister());
   1715 
   1716   if (right->IsConstantOperand()) {
   1717     int32_t right_operand =
   1718         ToRepresentation(LConstantOperand::cast(right),
   1719                          instr->hydrogen()->representation());
   1720     switch (instr->op()) {
   1721       case Token::BIT_AND:
   1722         __ and_(ToRegister(left), right_operand);
   1723         break;
   1724       case Token::BIT_OR:
   1725         __ or_(ToRegister(left), right_operand);
   1726         break;
   1727       case Token::BIT_XOR:
   1728         if (right_operand == int32_t(~0)) {
   1729           __ not_(ToRegister(left));
   1730         } else {
   1731           __ xor_(ToRegister(left), right_operand);
   1732         }
   1733         break;
   1734       default:
   1735         UNREACHABLE();
   1736         break;
   1737     }
   1738   } else {
   1739     switch (instr->op()) {
   1740       case Token::BIT_AND:
   1741         __ and_(ToRegister(left), ToOperand(right));
   1742         break;
   1743       case Token::BIT_OR:
   1744         __ or_(ToRegister(left), ToOperand(right));
   1745         break;
   1746       case Token::BIT_XOR:
   1747         __ xor_(ToRegister(left), ToOperand(right));
   1748         break;
   1749       default:
   1750         UNREACHABLE();
   1751         break;
   1752     }
   1753   }
   1754 }
   1755 
   1756 
   1757 void LCodeGen::DoShiftI(LShiftI* instr) {
   1758   LOperand* left = instr->left();
   1759   LOperand* right = instr->right();
   1760   ASSERT(left->Equals(instr->result()));
   1761   ASSERT(left->IsRegister());
   1762   if (right->IsRegister()) {
   1763     ASSERT(ToRegister(right).is(ecx));
   1764 
   1765     switch (instr->op()) {
   1766       case Token::ROR:
   1767         __ ror_cl(ToRegister(left));
   1768         if (instr->can_deopt()) {
   1769           __ test(ToRegister(left), Immediate(0x80000000));
   1770           DeoptimizeIf(not_zero, instr->environment());
   1771         }
   1772         break;
   1773       case Token::SAR:
   1774         __ sar_cl(ToRegister(left));
   1775         break;
   1776       case Token::SHR:
   1777         __ shr_cl(ToRegister(left));
   1778         if (instr->can_deopt()) {
   1779           __ test(ToRegister(left), Immediate(0x80000000));
   1780           DeoptimizeIf(not_zero, instr->environment());
   1781         }
   1782         break;
   1783       case Token::SHL:
   1784         __ shl_cl(ToRegister(left));
   1785         break;
   1786       default:
   1787         UNREACHABLE();
   1788         break;
   1789     }
   1790   } else {
   1791     int value = ToInteger32(LConstantOperand::cast(right));
   1792     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
   1793     switch (instr->op()) {
   1794       case Token::ROR:
   1795         if (shift_count == 0 && instr->can_deopt()) {
   1796           __ test(ToRegister(left), Immediate(0x80000000));
   1797           DeoptimizeIf(not_zero, instr->environment());
   1798         } else {
   1799           __ ror(ToRegister(left), shift_count);
   1800         }
   1801         break;
   1802       case Token::SAR:
   1803         if (shift_count != 0) {
   1804           __ sar(ToRegister(left), shift_count);
   1805         }
   1806         break;
   1807       case Token::SHR:
   1808         if (shift_count == 0 && instr->can_deopt()) {
   1809           __ test(ToRegister(left), Immediate(0x80000000));
   1810           DeoptimizeIf(not_zero, instr->environment());
   1811         } else {
   1812           __ shr(ToRegister(left), shift_count);
   1813         }
   1814         break;
   1815       case Token::SHL:
   1816         if (shift_count != 0) {
   1817           if (instr->hydrogen_value()->representation().IsSmi() &&
   1818               instr->can_deopt()) {
   1819             if (shift_count != 1) {
   1820               __ shl(ToRegister(left), shift_count - 1);
   1821             }
   1822             __ SmiTag(ToRegister(left));
   1823             DeoptimizeIf(overflow, instr->environment());
   1824           } else {
   1825             __ shl(ToRegister(left), shift_count);
   1826           }
   1827         }
   1828         break;
   1829       default:
   1830         UNREACHABLE();
   1831         break;
   1832     }
   1833   }
   1834 }
   1835 
   1836 
   1837 void LCodeGen::DoSubI(LSubI* instr) {
   1838   LOperand* left = instr->left();
   1839   LOperand* right = instr->right();
   1840   ASSERT(left->Equals(instr->result()));
   1841 
   1842   if (right->IsConstantOperand()) {
   1843     __ sub(ToOperand(left),
   1844            ToImmediate(right, instr->hydrogen()->representation()));
   1845   } else {
   1846     __ sub(ToRegister(left), ToOperand(right));
   1847   }
   1848   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1849     DeoptimizeIf(overflow, instr->environment());
   1850   }
   1851 }
   1852 
   1853 
   1854 void LCodeGen::DoConstantI(LConstantI* instr) {
   1855   __ Set(ToRegister(instr->result()), Immediate(instr->value()));
   1856 }
   1857 
   1858 
   1859 void LCodeGen::DoConstantS(LConstantS* instr) {
   1860   __ Set(ToRegister(instr->result()), Immediate(instr->value()));
   1861 }
   1862 
   1863 
   1864 void LCodeGen::DoConstantD(LConstantD* instr) {
   1865   double v = instr->value();
   1866   uint64_t int_val = BitCast<uint64_t, double>(v);
   1867   int32_t lower = static_cast<int32_t>(int_val);
   1868   int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
   1869 
   1870   if (!CpuFeatures::IsSafeForSnapshot(SSE2)) {
   1871     __ push(Immediate(upper));
   1872     __ push(Immediate(lower));
   1873     X87Mov(ToX87Register(instr->result()), Operand(esp, 0));
   1874     __ add(Operand(esp), Immediate(kDoubleSize));
   1875   } else {
   1876     CpuFeatureScope scope1(masm(), SSE2);
   1877     ASSERT(instr->result()->IsDoubleRegister());
   1878     XMMRegister res = ToDoubleRegister(instr->result());
   1879     if (int_val == 0) {
   1880       __ xorps(res, res);
   1881     } else {
   1882       Register temp = ToRegister(instr->temp());
   1883       if (CpuFeatures::IsSupported(SSE4_1)) {
   1884         CpuFeatureScope scope2(masm(), SSE4_1);
   1885         if (lower != 0) {
   1886           __ Set(temp, Immediate(lower));
   1887           __ movd(res, Operand(temp));
   1888           __ Set(temp, Immediate(upper));
   1889           __ pinsrd(res, Operand(temp), 1);
   1890         } else {
   1891           __ xorps(res, res);
   1892           __ Set(temp, Immediate(upper));
   1893           __ pinsrd(res, Operand(temp), 1);
   1894         }
   1895       } else {
   1896         __ Set(temp, Immediate(upper));
   1897         __ movd(res, Operand(temp));
   1898         __ psllq(res, 32);
   1899         if (lower != 0) {
   1900           __ Set(temp, Immediate(lower));
   1901           __ movd(xmm0, Operand(temp));
   1902           __ por(res, xmm0);
   1903         }
   1904       }
   1905     }
   1906   }
   1907 }
   1908 
   1909 
   1910 void LCodeGen::DoConstantE(LConstantE* instr) {
   1911   __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
   1912 }
   1913 
   1914 
   1915 void LCodeGen::DoConstantT(LConstantT* instr) {
   1916   Register reg = ToRegister(instr->result());
   1917   Handle<Object> handle = instr->value();
   1918   AllowDeferredHandleDereference smi_check;
   1919   __ LoadObject(reg, handle);
   1920 }
   1921 
   1922 
   1923 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
   1924   Register result = ToRegister(instr->result());
   1925   Register map = ToRegister(instr->value());
   1926   __ EnumLength(result, map);
   1927 }
   1928 
   1929 
   1930 void LCodeGen::DoElementsKind(LElementsKind* instr) {
   1931   Register result = ToRegister(instr->result());
   1932   Register input = ToRegister(instr->value());
   1933 
   1934   // Load map into |result|.
   1935   __ mov(result, FieldOperand(input, HeapObject::kMapOffset));
   1936   // Load the map's "bit field 2" into |result|. We only need the first byte,
   1937   // but the following masking takes care of that anyway.
   1938   __ mov(result, FieldOperand(result, Map::kBitField2Offset));
   1939   // Retrieve elements_kind from bit field 2.
   1940   __ and_(result, Map::kElementsKindMask);
   1941   __ shr(result, Map::kElementsKindShift);
   1942 }
   1943 
   1944 
   1945 void LCodeGen::DoValueOf(LValueOf* instr) {
   1946   Register input = ToRegister(instr->value());
   1947   Register result = ToRegister(instr->result());
   1948   Register map = ToRegister(instr->temp());
   1949   ASSERT(input.is(result));
   1950 
   1951   Label done;
   1952 
   1953   if (!instr->hydrogen()->value()->IsHeapObject()) {
   1954     // If the object is a smi return the object.
   1955     __ JumpIfSmi(input, &done, Label::kNear);
   1956   }
   1957 
   1958   // If the object is not a value type, return the object.
   1959   __ CmpObjectType(input, JS_VALUE_TYPE, map);
   1960   __ j(not_equal, &done, Label::kNear);
   1961   __ mov(result, FieldOperand(input, JSValue::kValueOffset));
   1962 
   1963   __ bind(&done);
   1964 }
   1965 
   1966 
   1967 void LCodeGen::DoDateField(LDateField* instr) {
   1968   Register object = ToRegister(instr->date());
   1969   Register result = ToRegister(instr->result());
   1970   Register scratch = ToRegister(instr->temp());
   1971   Smi* index = instr->index();
   1972   Label runtime, done;
   1973   ASSERT(object.is(result));
   1974   ASSERT(object.is(eax));
   1975 
   1976   __ test(object, Immediate(kSmiTagMask));
   1977   DeoptimizeIf(zero, instr->environment());
   1978   __ CmpObjectType(object, JS_DATE_TYPE, scratch);
   1979   DeoptimizeIf(not_equal, instr->environment());
   1980 
   1981   if (index->value() == 0) {
   1982     __ mov(result, FieldOperand(object, JSDate::kValueOffset));
   1983   } else {
   1984     if (index->value() < JSDate::kFirstUncachedField) {
   1985       ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
   1986       __ mov(scratch, Operand::StaticVariable(stamp));
   1987       __ cmp(scratch, FieldOperand(object, JSDate::kCacheStampOffset));
   1988       __ j(not_equal, &runtime, Label::kNear);
   1989       __ mov(result, FieldOperand(object, JSDate::kValueOffset +
   1990                                           kPointerSize * index->value()));
   1991       __ jmp(&done);
   1992     }
   1993     __ bind(&runtime);
   1994     __ PrepareCallCFunction(2, scratch);
   1995     __ mov(Operand(esp, 0), object);
   1996     __ mov(Operand(esp, 1 * kPointerSize), Immediate(index));
   1997     __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
   1998     __ bind(&done);
   1999   }
   2000 }
   2001 
   2002 
   2003 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
   2004   Register string = ToRegister(instr->string());
   2005   Register index = ToRegister(instr->index());
   2006   Register value = ToRegister(instr->value());
   2007   String::Encoding encoding = instr->encoding();
   2008 
   2009   if (FLAG_debug_code) {
   2010     __ push(value);
   2011     __ mov(value, FieldOperand(string, HeapObject::kMapOffset));
   2012     __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
   2013 
   2014     __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
   2015     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   2016     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   2017     __ cmp(value, Immediate(encoding == String::ONE_BYTE_ENCODING
   2018                                 ? one_byte_seq_type : two_byte_seq_type));
   2019     __ Check(equal, kUnexpectedStringType);
   2020     __ pop(value);
   2021   }
   2022 
   2023   if (encoding == String::ONE_BYTE_ENCODING) {
   2024     __ mov_b(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
   2025              value);
   2026   } else {
   2027     __ mov_w(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
   2028              value);
   2029   }
   2030 }
   2031 
   2032 
   2033 void LCodeGen::DoThrow(LThrow* instr) {
   2034   __ push(ToOperand(instr->value()));
   2035   ASSERT(ToRegister(instr->context()).is(esi));
   2036   CallRuntime(Runtime::kThrow, 1, instr);
   2037 
   2038   if (FLAG_debug_code) {
   2039     Comment("Unreachable code.");
   2040     __ int3();
   2041   }
   2042 }
   2043 
   2044 
   2045 void LCodeGen::DoAddI(LAddI* instr) {
   2046   LOperand* left = instr->left();
   2047   LOperand* right = instr->right();
   2048 
   2049   if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
   2050     if (right->IsConstantOperand()) {
   2051       int32_t offset = ToRepresentation(LConstantOperand::cast(right),
   2052                                         instr->hydrogen()->representation());
   2053       __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
   2054     } else {
   2055       Operand address(ToRegister(left), ToRegister(right), times_1, 0);
   2056       __ lea(ToRegister(instr->result()), address);
   2057     }
   2058   } else {
   2059     if (right->IsConstantOperand()) {
   2060       __ add(ToOperand(left),
   2061              ToImmediate(right, instr->hydrogen()->representation()));
   2062     } else {
   2063       __ add(ToRegister(left), ToOperand(right));
   2064     }
   2065     if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   2066       DeoptimizeIf(overflow, instr->environment());
   2067     }
   2068   }
   2069 }
   2070 
   2071 
   2072 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
   2073   CpuFeatureScope scope(masm(), SSE2);
   2074   LOperand* left = instr->left();
   2075   LOperand* right = instr->right();
   2076   ASSERT(left->Equals(instr->result()));
   2077   HMathMinMax::Operation operation = instr->hydrogen()->operation();
   2078   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
   2079     Label return_left;
   2080     Condition condition = (operation == HMathMinMax::kMathMin)
   2081         ? less_equal
   2082         : greater_equal;
   2083     if (right->IsConstantOperand()) {
   2084       Operand left_op = ToOperand(left);
   2085       Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
   2086                                         instr->hydrogen()->representation());
   2087       __ cmp(left_op, immediate);
   2088       __ j(condition, &return_left, Label::kNear);
   2089       __ mov(left_op, immediate);
   2090     } else {
   2091       Register left_reg = ToRegister(left);
   2092       Operand right_op = ToOperand(right);
   2093       __ cmp(left_reg, right_op);
   2094       __ j(condition, &return_left, Label::kNear);
   2095       __ mov(left_reg, right_op);
   2096     }
   2097     __ bind(&return_left);
   2098   } else {
   2099     ASSERT(instr->hydrogen()->representation().IsDouble());
   2100     Label check_nan_left, check_zero, return_left, return_right;
   2101     Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
   2102     XMMRegister left_reg = ToDoubleRegister(left);
   2103     XMMRegister right_reg = ToDoubleRegister(right);
   2104     __ ucomisd(left_reg, right_reg);
   2105     __ j(parity_even, &check_nan_left, Label::kNear);  // At least one NaN.
   2106     __ j(equal, &check_zero, Label::kNear);  // left == right.
   2107     __ j(condition, &return_left, Label::kNear);
   2108     __ jmp(&return_right, Label::kNear);
   2109 
   2110     __ bind(&check_zero);
   2111     XMMRegister xmm_scratch = xmm0;
   2112     __ xorps(xmm_scratch, xmm_scratch);
   2113     __ ucomisd(left_reg, xmm_scratch);
   2114     __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
   2115     // At this point, both left and right are either 0 or -0.
   2116     if (operation == HMathMinMax::kMathMin) {
   2117       __ orpd(left_reg, right_reg);
   2118     } else {
   2119       // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
   2120       __ addsd(left_reg, right_reg);
   2121     }
   2122     __ jmp(&return_left, Label::kNear);
   2123 
   2124     __ bind(&check_nan_left);
   2125     __ ucomisd(left_reg, left_reg);  // NaN check.
   2126     __ j(parity_even, &return_left, Label::kNear);  // left == NaN.
   2127     __ bind(&return_right);
   2128     __ movsd(left_reg, right_reg);
   2129 
   2130     __ bind(&return_left);
   2131   }
   2132 }
   2133 
   2134 
   2135 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
   2136   if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
   2137     CpuFeatureScope scope(masm(), SSE2);
   2138     XMMRegister left = ToDoubleRegister(instr->left());
   2139     XMMRegister right = ToDoubleRegister(instr->right());
   2140     XMMRegister result = ToDoubleRegister(instr->result());
   2141     // Modulo uses a fixed result register.
   2142     ASSERT(instr->op() == Token::MOD || left.is(result));
   2143     switch (instr->op()) {
   2144       case Token::ADD:
   2145         __ addsd(left, right);
   2146         break;
   2147       case Token::SUB:
   2148         __ subsd(left, right);
   2149         break;
   2150       case Token::MUL:
   2151         __ mulsd(left, right);
   2152         break;
   2153       case Token::DIV:
   2154         __ divsd(left, right);
   2155         // Don't delete this mov. It may improve performance on some CPUs,
   2156         // when there is a mulsd depending on the result
   2157         __ movaps(left, left);
   2158         break;
   2159       case Token::MOD: {
   2160         // Pass two doubles as arguments on the stack.
   2161         __ PrepareCallCFunction(4, eax);
   2162         __ movdbl(Operand(esp, 0 * kDoubleSize), left);
   2163         __ movdbl(Operand(esp, 1 * kDoubleSize), right);
   2164         __ CallCFunction(
   2165             ExternalReference::double_fp_operation(Token::MOD, isolate()),
   2166             4);
   2167 
   2168         // Return value is in st(0) on ia32.
   2169         // Store it into the (fixed) result register.
   2170         __ sub(Operand(esp), Immediate(kDoubleSize));
   2171         __ fstp_d(Operand(esp, 0));
   2172         __ movdbl(result, Operand(esp, 0));
   2173         __ add(Operand(esp), Immediate(kDoubleSize));
   2174         break;
   2175       }
   2176       default:
   2177         UNREACHABLE();
   2178         break;
   2179     }
   2180   } else {
   2181     X87Register left = ToX87Register(instr->left());
   2182     X87Register right = ToX87Register(instr->right());
   2183     X87Register result = ToX87Register(instr->result());
   2184     X87PrepareBinaryOp(left, right, result);
   2185     switch (instr->op()) {
   2186       case Token::MUL:
   2187         __ fmul_i(1);
   2188         break;
   2189       default:
   2190         UNREACHABLE();
   2191         break;
   2192     }
   2193   }
   2194 }
   2195 
   2196 
   2197 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
   2198   ASSERT(ToRegister(instr->context()).is(esi));
   2199   ASSERT(ToRegister(instr->left()).is(edx));
   2200   ASSERT(ToRegister(instr->right()).is(eax));
   2201   ASSERT(ToRegister(instr->result()).is(eax));
   2202 
   2203   BinaryOpStub stub(instr->op(), NO_OVERWRITE);
   2204   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   2205   __ nop();  // Signals no inlined code.
   2206 }
   2207 
   2208 
   2209 int LCodeGen::GetNextEmittedBlock() const {
   2210   for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
   2211     if (!chunk_->GetLabel(i)->HasReplacement()) return i;
   2212   }
   2213   return -1;
   2214 }
   2215 
   2216 
   2217 template<class InstrType>
   2218 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
   2219   int left_block = instr->TrueDestination(chunk_);
   2220   int right_block = instr->FalseDestination(chunk_);
   2221 
   2222   int next_block = GetNextEmittedBlock();
   2223 
   2224   if (right_block == left_block || cc == no_condition) {
   2225     EmitGoto(left_block);
   2226   } else if (left_block == next_block) {
   2227     __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
   2228   } else if (right_block == next_block) {
   2229     __ j(cc, chunk_->GetAssemblyLabel(left_block));
   2230   } else {
   2231     __ j(cc, chunk_->GetAssemblyLabel(left_block));
   2232     __ jmp(chunk_->GetAssemblyLabel(right_block));
   2233   }
   2234 }
   2235 
   2236 
   2237 template<class InstrType>
   2238 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
   2239   int false_block = instr->FalseDestination(chunk_);
   2240   if (cc == no_condition) {
   2241     __ jmp(chunk_->GetAssemblyLabel(false_block));
   2242   } else {
   2243     __ j(cc, chunk_->GetAssemblyLabel(false_block));
   2244   }
   2245 }
   2246 
   2247 
   2248 void LCodeGen::DoIsNumberAndBranch(LIsNumberAndBranch* instr) {
   2249   Representation r = instr->hydrogen()->value()->representation();
   2250   if (r.IsSmiOrInteger32() || r.IsDouble()) {
   2251     EmitBranch(instr, no_condition);
   2252   } else {
   2253     ASSERT(r.IsTagged());
   2254     Register reg = ToRegister(instr->value());
   2255     HType type = instr->hydrogen()->value()->type();
   2256     if (type.IsTaggedNumber()) {
   2257       EmitBranch(instr, no_condition);
   2258     }
   2259     __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
   2260     __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
   2261            factory()->heap_number_map());
   2262     EmitBranch(instr, equal);
   2263   }
   2264 }
   2265 
   2266 
   2267 void LCodeGen::DoBranch(LBranch* instr) {
   2268   Representation r = instr->hydrogen()->value()->representation();
   2269   if (r.IsSmiOrInteger32()) {
   2270     ASSERT(!info()->IsStub());
   2271     Register reg = ToRegister(instr->value());
   2272     __ test(reg, Operand(reg));
   2273     EmitBranch(instr, not_zero);
   2274   } else if (r.IsDouble()) {
   2275     ASSERT(!info()->IsStub());
   2276     CpuFeatureScope scope(masm(), SSE2);
   2277     XMMRegister reg = ToDoubleRegister(instr->value());
   2278     __ xorps(xmm0, xmm0);
   2279     __ ucomisd(reg, xmm0);
   2280     EmitBranch(instr, not_equal);
   2281   } else {
   2282     ASSERT(r.IsTagged());
   2283     Register reg = ToRegister(instr->value());
   2284     HType type = instr->hydrogen()->value()->type();
   2285     if (type.IsBoolean()) {
   2286       ASSERT(!info()->IsStub());
   2287       __ cmp(reg, factory()->true_value());
   2288       EmitBranch(instr, equal);
   2289     } else if (type.IsSmi()) {
   2290       ASSERT(!info()->IsStub());
   2291       __ test(reg, Operand(reg));
   2292       EmitBranch(instr, not_equal);
   2293     } else if (type.IsJSArray()) {
   2294       ASSERT(!info()->IsStub());
   2295       EmitBranch(instr, no_condition);
   2296     } else if (type.IsHeapNumber()) {
   2297       ASSERT(!info()->IsStub());
   2298       CpuFeatureScope scope(masm(), SSE2);
   2299       __ xorps(xmm0, xmm0);
   2300       __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
   2301       EmitBranch(instr, not_equal);
   2302     } else if (type.IsString()) {
   2303       ASSERT(!info()->IsStub());
   2304       __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
   2305       EmitBranch(instr, not_equal);
   2306     } else {
   2307       ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
   2308       if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
   2309 
   2310       if (expected.Contains(ToBooleanStub::UNDEFINED)) {
   2311         // undefined -> false.
   2312         __ cmp(reg, factory()->undefined_value());
   2313         __ j(equal, instr->FalseLabel(chunk_));
   2314       }
   2315       if (expected.Contains(ToBooleanStub::BOOLEAN)) {
   2316         // true -> true.
   2317         __ cmp(reg, factory()->true_value());
   2318         __ j(equal, instr->TrueLabel(chunk_));
   2319         // false -> false.
   2320         __ cmp(reg, factory()->false_value());
   2321         __ j(equal, instr->FalseLabel(chunk_));
   2322       }
   2323       if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
   2324         // 'null' -> false.
   2325         __ cmp(reg, factory()->null_value());
   2326         __ j(equal, instr->FalseLabel(chunk_));
   2327       }
   2328 
   2329       if (expected.Contains(ToBooleanStub::SMI)) {
   2330         // Smis: 0 -> false, all other -> true.
   2331         __ test(reg, Operand(reg));
   2332         __ j(equal, instr->FalseLabel(chunk_));
   2333         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
   2334       } else if (expected.NeedsMap()) {
   2335         // If we need a map later and have a Smi -> deopt.
   2336         __ test(reg, Immediate(kSmiTagMask));
   2337         DeoptimizeIf(zero, instr->environment());
   2338       }
   2339 
   2340       Register map = no_reg;  // Keep the compiler happy.
   2341       if (expected.NeedsMap()) {
   2342         map = ToRegister(instr->temp());
   2343         ASSERT(!map.is(reg));
   2344         __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
   2345 
   2346         if (expected.CanBeUndetectable()) {
   2347           // Undetectable -> false.
   2348           __ test_b(FieldOperand(map, Map::kBitFieldOffset),
   2349                     1 << Map::kIsUndetectable);
   2350           __ j(not_zero, instr->FalseLabel(chunk_));
   2351         }
   2352       }
   2353 
   2354       if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
   2355         // spec object -> true.
   2356         __ CmpInstanceType(map, FIRST_SPEC_OBJECT_TYPE);
   2357         __ j(above_equal, instr->TrueLabel(chunk_));
   2358       }
   2359 
   2360       if (expected.Contains(ToBooleanStub::STRING)) {
   2361         // String value -> false iff empty.
   2362         Label not_string;
   2363         __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
   2364         __ j(above_equal, &not_string, Label::kNear);
   2365         __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
   2366         __ j(not_zero, instr->TrueLabel(chunk_));
   2367         __ jmp(instr->FalseLabel(chunk_));
   2368         __ bind(&not_string);
   2369       }
   2370 
   2371       if (expected.Contains(ToBooleanStub::SYMBOL)) {
   2372         // Symbol value -> true.
   2373         __ CmpInstanceType(map, SYMBOL_TYPE);
   2374         __ j(equal, instr->TrueLabel(chunk_));
   2375       }
   2376 
   2377       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
   2378         // heap number -> false iff +0, -0, or NaN.
   2379         Label not_heap_number;
   2380         __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
   2381                factory()->heap_number_map());
   2382         __ j(not_equal, &not_heap_number, Label::kNear);
   2383         if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
   2384           CpuFeatureScope scope(masm(), SSE2);
   2385           __ xorps(xmm0, xmm0);
   2386           __ ucomisd(xmm0, FieldOperand(reg, HeapNumber::kValueOffset));
   2387         } else {
   2388           __ fldz();
   2389           __ fld_d(FieldOperand(reg, HeapNumber::kValueOffset));
   2390           __ FCmp();
   2391         }
   2392         __ j(zero, instr->FalseLabel(chunk_));
   2393         __ jmp(instr->TrueLabel(chunk_));
   2394         __ bind(&not_heap_number);
   2395       }
   2396 
   2397       if (!expected.IsGeneric()) {
   2398         // We've seen something for the first time -> deopt.
   2399         // This can only happen if we are not generic already.
   2400         DeoptimizeIf(no_condition, instr->environment());
   2401       }
   2402     }
   2403   }
   2404 }
   2405 
   2406 
   2407 void LCodeGen::EmitGoto(int block) {
   2408   if (!IsNextEmittedBlock(block)) {
   2409     __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
   2410   }
   2411 }
   2412 
   2413 
   2414 void LCodeGen::DoGoto(LGoto* instr) {
   2415   EmitGoto(instr->block_id());
   2416 }
   2417 
   2418 
   2419 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
   2420   Condition cond = no_condition;
   2421   switch (op) {
   2422     case Token::EQ:
   2423     case Token::EQ_STRICT:
   2424       cond = equal;
   2425       break;
   2426     case Token::LT:
   2427       cond = is_unsigned ? below : less;
   2428       break;
   2429     case Token::GT:
   2430       cond = is_unsigned ? above : greater;
   2431       break;
   2432     case Token::LTE:
   2433       cond = is_unsigned ? below_equal : less_equal;
   2434       break;
   2435     case Token::GTE:
   2436       cond = is_unsigned ? above_equal : greater_equal;
   2437       break;
   2438     case Token::IN:
   2439     case Token::INSTANCEOF:
   2440     default:
   2441       UNREACHABLE();
   2442   }
   2443   return cond;
   2444 }
   2445 
   2446 
   2447 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
   2448   LOperand* left = instr->left();
   2449   LOperand* right = instr->right();
   2450   Condition cc = TokenToCondition(instr->op(), instr->is_double());
   2451 
   2452   if (left->IsConstantOperand() && right->IsConstantOperand()) {
   2453     // We can statically evaluate the comparison.
   2454     double left_val = ToDouble(LConstantOperand::cast(left));
   2455     double right_val = ToDouble(LConstantOperand::cast(right));
   2456     int next_block = EvalComparison(instr->op(), left_val, right_val) ?
   2457         instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
   2458     EmitGoto(next_block);
   2459   } else {
   2460     if (instr->is_double()) {
   2461       CpuFeatureScope scope(masm(), SSE2);
   2462       // Don't base result on EFLAGS when a NaN is involved. Instead
   2463       // jump to the false block.
   2464       __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
   2465       __ j(parity_even, instr->FalseLabel(chunk_));
   2466     } else {
   2467       if (right->IsConstantOperand()) {
   2468         __ cmp(ToOperand(left),
   2469                ToImmediate(right, instr->hydrogen()->representation()));
   2470       } else if (left->IsConstantOperand()) {
   2471         __ cmp(ToOperand(right),
   2472                ToImmediate(left, instr->hydrogen()->representation()));
   2473         // We transposed the operands. Reverse the condition.
   2474         cc = ReverseCondition(cc);
   2475       } else {
   2476         __ cmp(ToRegister(left), ToOperand(right));
   2477       }
   2478     }
   2479     EmitBranch(instr, cc);
   2480   }
   2481 }
   2482 
   2483 
   2484 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   2485   Register left = ToRegister(instr->left());
   2486 
   2487   if (instr->right()->IsConstantOperand()) {
   2488     Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
   2489     __ CmpObject(left, right);
   2490   } else {
   2491     Operand right = ToOperand(instr->right());
   2492     __ cmp(left, right);
   2493   }
   2494   EmitBranch(instr, equal);
   2495 }
   2496 
   2497 
   2498 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
   2499   if (instr->hydrogen()->representation().IsTagged()) {
   2500     Register input_reg = ToRegister(instr->object());
   2501     __ cmp(input_reg, factory()->the_hole_value());
   2502     EmitBranch(instr, equal);
   2503     return;
   2504   }
   2505 
   2506   bool use_sse2 = CpuFeatures::IsSupported(SSE2);
   2507   if (use_sse2) {
   2508     CpuFeatureScope scope(masm(), SSE2);
   2509     XMMRegister input_reg = ToDoubleRegister(instr->object());
   2510     __ ucomisd(input_reg, input_reg);
   2511     EmitFalseBranch(instr, parity_odd);
   2512   } else {
   2513     // Put the value to the top of stack
   2514     X87Register src = ToX87Register(instr->object());
   2515     X87LoadForUsage(src);
   2516     __ fld(0);
   2517     __ fld(0);
   2518     __ FCmp();
   2519     Label ok;
   2520     __ j(parity_even, &ok);
   2521     __ fstp(0);
   2522     EmitFalseBranch(instr, no_condition);
   2523     __ bind(&ok);
   2524   }
   2525 
   2526 
   2527   __ sub(esp, Immediate(kDoubleSize));
   2528   if (use_sse2) {
   2529     CpuFeatureScope scope(masm(), SSE2);
   2530     XMMRegister input_reg = ToDoubleRegister(instr->object());
   2531     __ movdbl(MemOperand(esp, 0), input_reg);
   2532   } else {
   2533     __ fstp_d(MemOperand(esp, 0));
   2534   }
   2535 
   2536   __ add(esp, Immediate(kDoubleSize));
   2537   int offset = sizeof(kHoleNanUpper32);
   2538   __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
   2539   EmitBranch(instr, equal);
   2540 }
   2541 
   2542 
   2543 Condition LCodeGen::EmitIsObject(Register input,
   2544                                  Register temp1,
   2545                                  Label* is_not_object,
   2546                                  Label* is_object) {
   2547   __ JumpIfSmi(input, is_not_object);
   2548 
   2549   __ cmp(input, isolate()->factory()->null_value());
   2550   __ j(equal, is_object);
   2551 
   2552   __ mov(temp1, FieldOperand(input, HeapObject::kMapOffset));
   2553   // Undetectable objects behave like undefined.
   2554   __ test_b(FieldOperand(temp1, Map::kBitFieldOffset),
   2555             1 << Map::kIsUndetectable);
   2556   __ j(not_zero, is_not_object);
   2557 
   2558   __ movzx_b(temp1, FieldOperand(temp1, Map::kInstanceTypeOffset));
   2559   __ cmp(temp1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
   2560   __ j(below, is_not_object);
   2561   __ cmp(temp1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
   2562   return below_equal;
   2563 }
   2564 
   2565 
   2566 void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
   2567   Register reg = ToRegister(instr->value());
   2568   Register temp = ToRegister(instr->temp());
   2569 
   2570   Condition true_cond = EmitIsObject(
   2571       reg, temp, instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
   2572 
   2573   EmitBranch(instr, true_cond);
   2574 }
   2575 
   2576 
   2577 Condition LCodeGen::EmitIsString(Register input,
   2578                                  Register temp1,
   2579                                  Label* is_not_string,
   2580                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
   2581   if (check_needed == INLINE_SMI_CHECK) {
   2582     __ JumpIfSmi(input, is_not_string);
   2583   }
   2584 
   2585   Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
   2586 
   2587   return cond;
   2588 }
   2589 
   2590 
   2591 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
   2592   Register reg = ToRegister(instr->value());
   2593   Register temp = ToRegister(instr->temp());
   2594 
   2595   SmiCheck check_needed =
   2596       instr->hydrogen()->value()->IsHeapObject()
   2597           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2598 
   2599   Condition true_cond = EmitIsString(
   2600       reg, temp, instr->FalseLabel(chunk_), check_needed);
   2601 
   2602   EmitBranch(instr, true_cond);
   2603 }
   2604 
   2605 
   2606 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   2607   Operand input = ToOperand(instr->value());
   2608 
   2609   __ test(input, Immediate(kSmiTagMask));
   2610   EmitBranch(instr, zero);
   2611 }
   2612 
   2613 
   2614 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
   2615   Register input = ToRegister(instr->value());
   2616   Register temp = ToRegister(instr->temp());
   2617 
   2618   if (!instr->hydrogen()->value()->IsHeapObject()) {
   2619     STATIC_ASSERT(kSmiTag == 0);
   2620     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2621   }
   2622   __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
   2623   __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
   2624             1 << Map::kIsUndetectable);
   2625   EmitBranch(instr, not_zero);
   2626 }
   2627 
   2628 
   2629 static Condition ComputeCompareCondition(Token::Value op) {
   2630   switch (op) {
   2631     case Token::EQ_STRICT:
   2632     case Token::EQ:
   2633       return equal;
   2634     case Token::LT:
   2635       return less;
   2636     case Token::GT:
   2637       return greater;
   2638     case Token::LTE:
   2639       return less_equal;
   2640     case Token::GTE:
   2641       return greater_equal;
   2642     default:
   2643       UNREACHABLE();
   2644       return no_condition;
   2645   }
   2646 }
   2647 
   2648 
   2649 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   2650   Token::Value op = instr->op();
   2651 
   2652   Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
   2653   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2654 
   2655   Condition condition = ComputeCompareCondition(op);
   2656   __ test(eax, Operand(eax));
   2657 
   2658   EmitBranch(instr, condition);
   2659 }
   2660 
   2661 
   2662 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   2663   InstanceType from = instr->from();
   2664   InstanceType to = instr->to();
   2665   if (from == FIRST_TYPE) return to;
   2666   ASSERT(from == to || to == LAST_TYPE);
   2667   return from;
   2668 }
   2669 
   2670 
   2671 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
   2672   InstanceType from = instr->from();
   2673   InstanceType to = instr->to();
   2674   if (from == to) return equal;
   2675   if (to == LAST_TYPE) return above_equal;
   2676   if (from == FIRST_TYPE) return below_equal;
   2677   UNREACHABLE();
   2678   return equal;
   2679 }
   2680 
   2681 
   2682 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   2683   Register input = ToRegister(instr->value());
   2684   Register temp = ToRegister(instr->temp());
   2685 
   2686   if (!instr->hydrogen()->value()->IsHeapObject()) {
   2687     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2688   }
   2689 
   2690   __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
   2691   EmitBranch(instr, BranchCondition(instr->hydrogen()));
   2692 }
   2693 
   2694 
   2695 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
   2696   Register input = ToRegister(instr->value());
   2697   Register result = ToRegister(instr->result());
   2698 
   2699   __ AssertString(input);
   2700 
   2701   __ mov(result, FieldOperand(input, String::kHashFieldOffset));
   2702   __ IndexFromHash(result, result);
   2703 }
   2704 
   2705 
   2706 void LCodeGen::DoHasCachedArrayIndexAndBranch(
   2707     LHasCachedArrayIndexAndBranch* instr) {
   2708   Register input = ToRegister(instr->value());
   2709 
   2710   __ test(FieldOperand(input, String::kHashFieldOffset),
   2711           Immediate(String::kContainsCachedArrayIndexMask));
   2712   EmitBranch(instr, equal);
   2713 }
   2714 
   2715 
   2716 // Branches to a label or falls through with the answer in the z flag.  Trashes
   2717 // the temp registers, but not the input.
   2718 void LCodeGen::EmitClassOfTest(Label* is_true,
   2719                                Label* is_false,
   2720                                Handle<String>class_name,
   2721                                Register input,
   2722                                Register temp,
   2723                                Register temp2) {
   2724   ASSERT(!input.is(temp));
   2725   ASSERT(!input.is(temp2));
   2726   ASSERT(!temp.is(temp2));
   2727   __ JumpIfSmi(input, is_false);
   2728 
   2729   if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
   2730     // Assuming the following assertions, we can use the same compares to test
   2731     // for both being a function type and being in the object type range.
   2732     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   2733     STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
   2734                   FIRST_SPEC_OBJECT_TYPE + 1);
   2735     STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
   2736                   LAST_SPEC_OBJECT_TYPE - 1);
   2737     STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
   2738     __ CmpObjectType(input, FIRST_SPEC_OBJECT_TYPE, temp);
   2739     __ j(below, is_false);
   2740     __ j(equal, is_true);
   2741     __ CmpInstanceType(temp, LAST_SPEC_OBJECT_TYPE);
   2742     __ j(equal, is_true);
   2743   } else {
   2744     // Faster code path to avoid two compares: subtract lower bound from the
   2745     // actual type and do a signed compare with the width of the type range.
   2746     __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
   2747     __ movzx_b(temp2, FieldOperand(temp, Map::kInstanceTypeOffset));
   2748     __ sub(Operand(temp2), Immediate(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2749     __ cmp(Operand(temp2), Immediate(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
   2750                                      FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
   2751     __ j(above, is_false);
   2752   }
   2753 
   2754   // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   2755   // Check if the constructor in the map is a function.
   2756   __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
   2757   // Objects with a non-function constructor have class 'Object'.
   2758   __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
   2759   if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
   2760     __ j(not_equal, is_true);
   2761   } else {
   2762     __ j(not_equal, is_false);
   2763   }
   2764 
   2765   // temp now contains the constructor function. Grab the
   2766   // instance class name from there.
   2767   __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
   2768   __ mov(temp, FieldOperand(temp,
   2769                             SharedFunctionInfo::kInstanceClassNameOffset));
   2770   // The class name we are testing against is internalized since it's a literal.
   2771   // The name in the constructor is internalized because of the way the context
   2772   // is booted.  This routine isn't expected to work for random API-created
   2773   // classes and it doesn't have to because you can't access it with natives
   2774   // syntax.  Since both sides are internalized it is sufficient to use an
   2775   // identity comparison.
   2776   __ cmp(temp, class_name);
   2777   // End with the answer in the z flag.
   2778 }
   2779 
   2780 
   2781 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   2782   Register input = ToRegister(instr->value());
   2783   Register temp = ToRegister(instr->temp());
   2784   Register temp2 = ToRegister(instr->temp2());
   2785 
   2786   Handle<String> class_name = instr->hydrogen()->class_name();
   2787 
   2788   EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
   2789       class_name, input, temp, temp2);
   2790 
   2791   EmitBranch(instr, equal);
   2792 }
   2793 
   2794 
   2795 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   2796   Register reg = ToRegister(instr->value());
   2797   __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
   2798   EmitBranch(instr, equal);
   2799 }
   2800 
   2801 
   2802 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
   2803   // Object and function are in fixed registers defined by the stub.
   2804   ASSERT(ToRegister(instr->context()).is(esi));
   2805   InstanceofStub stub(InstanceofStub::kArgsInRegisters);
   2806   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   2807 
   2808   Label true_value, done;
   2809   __ test(eax, Operand(eax));
   2810   __ j(zero, &true_value, Label::kNear);
   2811   __ mov(ToRegister(instr->result()), factory()->false_value());
   2812   __ jmp(&done, Label::kNear);
   2813   __ bind(&true_value);
   2814   __ mov(ToRegister(instr->result()), factory()->true_value());
   2815   __ bind(&done);
   2816 }
   2817 
   2818 
   2819 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
   2820   class DeferredInstanceOfKnownGlobal: public LDeferredCode {
   2821    public:
   2822     DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
   2823                                   LInstanceOfKnownGlobal* instr)
   2824         : LDeferredCode(codegen), instr_(instr) { }
   2825     virtual void Generate() {
   2826       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
   2827     }
   2828     virtual LInstruction* instr() { return instr_; }
   2829     Label* map_check() { return &map_check_; }
   2830    private:
   2831     LInstanceOfKnownGlobal* instr_;
   2832     Label map_check_;
   2833   };
   2834 
   2835   DeferredInstanceOfKnownGlobal* deferred;
   2836   deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
   2837 
   2838   Label done, false_result;
   2839   Register object = ToRegister(instr->value());
   2840   Register temp = ToRegister(instr->temp());
   2841 
   2842   // A Smi is not an instance of anything.
   2843   __ JumpIfSmi(object, &false_result);
   2844 
   2845   // This is the inlined call site instanceof cache. The two occurences of the
   2846   // hole value will be patched to the last map/result pair generated by the
   2847   // instanceof stub.
   2848   Label cache_miss;
   2849   Register map = ToRegister(instr->temp());
   2850   __ mov(map, FieldOperand(object, HeapObject::kMapOffset));
   2851   __ bind(deferred->map_check());  // Label for calculating code patching.
   2852   Handle<Cell> cache_cell = factory()->NewCell(factory()->the_hole_value());
   2853   __ cmp(map, Operand::ForCell(cache_cell));  // Patched to cached map.
   2854   __ j(not_equal, &cache_miss, Label::kNear);
   2855   __ mov(eax, factory()->the_hole_value());  // Patched to either true or false.
   2856   __ jmp(&done);
   2857 
   2858   // The inlined call site cache did not match. Check for null and string
   2859   // before calling the deferred code.
   2860   __ bind(&cache_miss);
   2861   // Null is not an instance of anything.
   2862   __ cmp(object, factory()->null_value());
   2863   __ j(equal, &false_result);
   2864 
   2865   // String values are not instances of anything.
   2866   Condition is_string = masm_->IsObjectStringType(object, temp, temp);
   2867   __ j(is_string, &false_result);
   2868 
   2869   // Go to the deferred code.
   2870   __ jmp(deferred->entry());
   2871 
   2872   __ bind(&false_result);
   2873   __ mov(ToRegister(instr->result()), factory()->false_value());
   2874 
   2875   // Here result has either true or false. Deferred code also produces true or
   2876   // false object.
   2877   __ bind(deferred->exit());
   2878   __ bind(&done);
   2879 }
   2880 
   2881 
   2882 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
   2883                                                Label* map_check) {
   2884   PushSafepointRegistersScope scope(this);
   2885 
   2886   InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
   2887   flags = static_cast<InstanceofStub::Flags>(
   2888       flags | InstanceofStub::kArgsInRegisters);
   2889   flags = static_cast<InstanceofStub::Flags>(
   2890       flags | InstanceofStub::kCallSiteInlineCheck);
   2891   flags = static_cast<InstanceofStub::Flags>(
   2892       flags | InstanceofStub::kReturnTrueFalseObject);
   2893   InstanceofStub stub(flags);
   2894 
   2895   // Get the temp register reserved by the instruction. This needs to be a
   2896   // register which is pushed last by PushSafepointRegisters as top of the
   2897   // stack is used to pass the offset to the location of the map check to
   2898   // the stub.
   2899   Register temp = ToRegister(instr->temp());
   2900   ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
   2901   __ LoadHeapObject(InstanceofStub::right(), instr->function());
   2902   static const int kAdditionalDelta = 13;
   2903   int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
   2904   __ mov(temp, Immediate(delta));
   2905   __ StoreToSafepointRegisterSlot(temp, temp);
   2906   CallCodeGeneric(stub.GetCode(isolate()),
   2907                   RelocInfo::CODE_TARGET,
   2908                   instr,
   2909                   RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   2910   // Get the deoptimization index of the LLazyBailout-environment that
   2911   // corresponds to this instruction.
   2912   LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
   2913   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   2914 
   2915   // Put the result value into the eax slot and restore all registers.
   2916   __ StoreToSafepointRegisterSlot(eax, eax);
   2917 }
   2918 
   2919 
   2920 void LCodeGen::DoInstanceSize(LInstanceSize* instr) {
   2921   Register object = ToRegister(instr->object());
   2922   Register result = ToRegister(instr->result());
   2923   __ mov(result, FieldOperand(object, HeapObject::kMapOffset));
   2924   __ movzx_b(result, FieldOperand(result, Map::kInstanceSizeOffset));
   2925 }
   2926 
   2927 
   2928 void LCodeGen::DoCmpT(LCmpT* instr) {
   2929   Token::Value op = instr->op();
   2930 
   2931   Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
   2932   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2933 
   2934   Condition condition = ComputeCompareCondition(op);
   2935   Label true_value, done;
   2936   __ test(eax, Operand(eax));
   2937   __ j(condition, &true_value, Label::kNear);
   2938   __ mov(ToRegister(instr->result()), factory()->false_value());
   2939   __ jmp(&done, Label::kNear);
   2940   __ bind(&true_value);
   2941   __ mov(ToRegister(instr->result()), factory()->true_value());
   2942   __ bind(&done);
   2943 }
   2944 
   2945 
   2946 void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
   2947   int extra_value_count = dynamic_frame_alignment ? 2 : 1;
   2948 
   2949   if (instr->has_constant_parameter_count()) {
   2950     int parameter_count = ToInteger32(instr->constant_parameter_count());
   2951     if (dynamic_frame_alignment && FLAG_debug_code) {
   2952       __ cmp(Operand(esp,
   2953                      (parameter_count + extra_value_count) * kPointerSize),
   2954              Immediate(kAlignmentZapValue));
   2955       __ Assert(equal, kExpectedAlignmentMarker);
   2956     }
   2957     __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
   2958   } else {
   2959     Register reg = ToRegister(instr->parameter_count());
   2960     // The argument count parameter is a smi
   2961     __ SmiUntag(reg);
   2962     Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
   2963     if (dynamic_frame_alignment && FLAG_debug_code) {
   2964       ASSERT(extra_value_count == 2);
   2965       __ cmp(Operand(esp, reg, times_pointer_size,
   2966                      extra_value_count * kPointerSize),
   2967              Immediate(kAlignmentZapValue));
   2968       __ Assert(equal, kExpectedAlignmentMarker);
   2969     }
   2970 
   2971     // emit code to restore stack based on instr->parameter_count()
   2972     __ pop(return_addr_reg);  // save return address
   2973     if (dynamic_frame_alignment) {
   2974       __ inc(reg);  // 1 more for alignment
   2975     }
   2976     __ shl(reg, kPointerSizeLog2);
   2977     __ add(esp, reg);
   2978     __ jmp(return_addr_reg);
   2979   }
   2980 }
   2981 
   2982 
   2983 void LCodeGen::DoReturn(LReturn* instr) {
   2984   if (FLAG_trace && info()->IsOptimizing()) {
   2985     // Preserve the return value on the stack and rely on the runtime call
   2986     // to return the value in the same register.  We're leaving the code
   2987     // managed by the register allocator and tearing down the frame, it's
   2988     // safe to write to the context register.
   2989     __ push(eax);
   2990     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   2991     __ CallRuntime(Runtime::kTraceExit, 1);
   2992   }
   2993   if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(SSE2)) {
   2994     ASSERT(NeedsEagerFrame());
   2995     CpuFeatureScope scope(masm(), SSE2);
   2996     BitVector* doubles = chunk()->allocated_double_registers();
   2997     BitVector::Iterator save_iterator(doubles);
   2998     int count = 0;
   2999     while (!save_iterator.Done()) {
   3000       __ movdbl(XMMRegister::FromAllocationIndex(save_iterator.Current()),
   3001                 MemOperand(esp, count * kDoubleSize));
   3002       save_iterator.Advance();
   3003       count++;
   3004     }
   3005   }
   3006   if (dynamic_frame_alignment_) {
   3007     // Fetch the state of the dynamic frame alignment.
   3008     __ mov(edx, Operand(ebp,
   3009       JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
   3010   }
   3011   int no_frame_start = -1;
   3012   if (NeedsEagerFrame()) {
   3013     __ mov(esp, ebp);
   3014     __ pop(ebp);
   3015     no_frame_start = masm_->pc_offset();
   3016   }
   3017   if (dynamic_frame_alignment_) {
   3018     Label no_padding;
   3019     __ cmp(edx, Immediate(kNoAlignmentPadding));
   3020     __ j(equal, &no_padding);
   3021 
   3022     EmitReturn(instr, true);
   3023     __ bind(&no_padding);
   3024   }
   3025 
   3026   EmitReturn(instr, false);
   3027   if (no_frame_start != -1) {
   3028     info()->AddNoFrameRange(no_frame_start, masm_->pc_offset());
   3029   }
   3030 }
   3031 
   3032 
   3033 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
   3034   Register result = ToRegister(instr->result());
   3035   __ mov(result, Operand::ForCell(instr->hydrogen()->cell()));
   3036   if (instr->hydrogen()->RequiresHoleCheck()) {
   3037     __ cmp(result, factory()->the_hole_value());
   3038     DeoptimizeIf(equal, instr->environment());
   3039   }
   3040 }
   3041 
   3042 
   3043 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   3044   ASSERT(ToRegister(instr->context()).is(esi));
   3045   ASSERT(ToRegister(instr->global_object()).is(edx));
   3046   ASSERT(ToRegister(instr->result()).is(eax));
   3047 
   3048   __ mov(ecx, instr->name());
   3049   RelocInfo::Mode mode = instr->for_typeof() ? RelocInfo::CODE_TARGET :
   3050                                                RelocInfo::CODE_TARGET_CONTEXT;
   3051   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
   3052   CallCode(ic, mode, instr);
   3053 }
   3054 
   3055 
   3056 void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
   3057   Register value = ToRegister(instr->value());
   3058   Handle<PropertyCell> cell_handle = instr->hydrogen()->cell();
   3059 
   3060   // If the cell we are storing to contains the hole it could have
   3061   // been deleted from the property dictionary. In that case, we need
   3062   // to update the property details in the property dictionary to mark
   3063   // it as no longer deleted. We deoptimize in that case.
   3064   if (instr->hydrogen()->RequiresHoleCheck()) {
   3065     __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
   3066     DeoptimizeIf(equal, instr->environment());
   3067   }
   3068 
   3069   // Store the value.
   3070   __ mov(Operand::ForCell(cell_handle), value);
   3071   // Cells are always rescanned, so no write barrier here.
   3072 }
   3073 
   3074 
   3075 void LCodeGen::DoStoreGlobalGeneric(LStoreGlobalGeneric* instr) {
   3076   ASSERT(ToRegister(instr->context()).is(esi));
   3077   ASSERT(ToRegister(instr->global_object()).is(edx));
   3078   ASSERT(ToRegister(instr->value()).is(eax));
   3079 
   3080   __ mov(ecx, instr->name());
   3081   Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
   3082       ? isolate()->builtins()->StoreIC_Initialize_Strict()
   3083       : isolate()->builtins()->StoreIC_Initialize();
   3084   CallCode(ic, RelocInfo::CODE_TARGET_CONTEXT, instr);
   3085 }
   3086 
   3087 
   3088 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   3089   Register context = ToRegister(instr->context());
   3090   Register result = ToRegister(instr->result());
   3091   __ mov(result, ContextOperand(context, instr->slot_index()));
   3092 
   3093   if (instr->hydrogen()->RequiresHoleCheck()) {
   3094     __ cmp(result, factory()->the_hole_value());
   3095     if (instr->hydrogen()->DeoptimizesOnHole()) {
   3096       DeoptimizeIf(equal, instr->environment());
   3097     } else {
   3098       Label is_not_hole;
   3099       __ j(not_equal, &is_not_hole, Label::kNear);
   3100       __ mov(result, factory()->undefined_value());
   3101       __ bind(&is_not_hole);
   3102     }
   3103   }
   3104 }
   3105 
   3106 
   3107 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   3108   Register context = ToRegister(instr->context());
   3109   Register value = ToRegister(instr->value());
   3110 
   3111   Label skip_assignment;
   3112 
   3113   Operand target = ContextOperand(context, instr->slot_index());
   3114   if (instr->hydrogen()->RequiresHoleCheck()) {
   3115     __ cmp(target, factory()->the_hole_value());
   3116     if (instr->hydrogen()->DeoptimizesOnHole()) {
   3117       DeoptimizeIf(equal, instr->environment());
   3118     } else {
   3119       __ j(not_equal, &skip_assignment, Label::kNear);
   3120     }
   3121   }
   3122 
   3123   __ mov(target, value);
   3124   if (instr->hydrogen()->NeedsWriteBarrier()) {
   3125     SmiCheck check_needed =
   3126         instr->hydrogen()->value()->IsHeapObject()
   3127             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   3128     Register temp = ToRegister(instr->temp());
   3129     int offset = Context::SlotOffset(instr->slot_index());
   3130     __ RecordWriteContextSlot(context,
   3131                               offset,
   3132                               value,
   3133                               temp,
   3134                               GetSaveFPRegsMode(),
   3135                               EMIT_REMEMBERED_SET,
   3136                               check_needed);
   3137   }
   3138 
   3139   __ bind(&skip_assignment);
   3140 }
   3141 
   3142 
   3143 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   3144   HObjectAccess access = instr->hydrogen()->access();
   3145   int offset = access.offset();
   3146 
   3147   if (access.IsExternalMemory()) {
   3148     Register result = ToRegister(instr->result());
   3149     if (instr->object()->IsConstantOperand()) {
   3150       ExternalReference external_reference = ToExternalReference(
   3151           LConstantOperand::cast(instr->object()));
   3152       __ mov(result, MemOperand::StaticVariable(external_reference));
   3153     } else {
   3154       __ mov(result, MemOperand(ToRegister(instr->object()), offset));
   3155     }
   3156     return;
   3157   }
   3158 
   3159   Register object = ToRegister(instr->object());
   3160   if (FLAG_track_double_fields &&
   3161       instr->hydrogen()->representation().IsDouble()) {
   3162     if (CpuFeatures::IsSupported(SSE2)) {
   3163       CpuFeatureScope scope(masm(), SSE2);
   3164       XMMRegister result = ToDoubleRegister(instr->result());
   3165       __ movdbl(result, FieldOperand(object, offset));
   3166     } else {
   3167       X87Mov(ToX87Register(instr->result()), FieldOperand(object, offset));
   3168     }
   3169     return;
   3170   }
   3171 
   3172   Register result = ToRegister(instr->result());
   3173   if (access.IsInobject()) {
   3174     __ mov(result, FieldOperand(object, offset));
   3175   } else {
   3176     __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
   3177     __ mov(result, FieldOperand(result, offset));
   3178   }
   3179 }
   3180 
   3181 
   3182 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
   3183   ASSERT(!operand->IsDoubleRegister());
   3184   if (operand->IsConstantOperand()) {
   3185     Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
   3186     AllowDeferredHandleDereference smi_check;
   3187     if (object->IsSmi()) {
   3188       __ Push(Handle<Smi>::cast(object));
   3189     } else {
   3190       __ PushHeapObject(Handle<HeapObject>::cast(object));
   3191     }
   3192   } else if (operand->IsRegister()) {
   3193     __ push(ToRegister(operand));
   3194   } else {
   3195     __ push(ToOperand(operand));
   3196   }
   3197 }
   3198 
   3199 
   3200 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
   3201   ASSERT(ToRegister(instr->context()).is(esi));
   3202   ASSERT(ToRegister(instr->object()).is(edx));
   3203   ASSERT(ToRegister(instr->result()).is(eax));
   3204 
   3205   __ mov(ecx, instr->name());
   3206   Handle<Code> ic = isolate()->builtins()->LoadIC_Initialize();
   3207   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3208 }
   3209 
   3210 
   3211 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   3212   Register function = ToRegister(instr->function());
   3213   Register temp = ToRegister(instr->temp());
   3214   Register result = ToRegister(instr->result());
   3215 
   3216   // Check that the function really is a function.
   3217   __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
   3218   DeoptimizeIf(not_equal, instr->environment());
   3219 
   3220   // Check whether the function has an instance prototype.
   3221   Label non_instance;
   3222   __ test_b(FieldOperand(result, Map::kBitFieldOffset),
   3223             1 << Map::kHasNonInstancePrototype);
   3224   __ j(not_zero, &non_instance, Label::kNear);
   3225 
   3226   // Get the prototype or initial map from the function.
   3227   __ mov(result,
   3228          FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   3229 
   3230   // Check that the function has a prototype or an initial map.
   3231   __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
   3232   DeoptimizeIf(equal, instr->environment());
   3233 
   3234   // If the function does not have an initial map, we're done.
   3235   Label done;
   3236   __ CmpObjectType(result, MAP_TYPE, temp);
   3237   __ j(not_equal, &done, Label::kNear);
   3238 
   3239   // Get the prototype from the initial map.
   3240   __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
   3241   __ jmp(&done, Label::kNear);
   3242 
   3243   // Non-instance prototype: Fetch prototype from constructor field
   3244   // in the function's map.
   3245   __ bind(&non_instance);
   3246   __ mov(result, FieldOperand(result, Map::kConstructorOffset));
   3247 
   3248   // All done.
   3249   __ bind(&done);
   3250 }
   3251 
   3252 
   3253 void LCodeGen::DoLoadExternalArrayPointer(
   3254     LLoadExternalArrayPointer* instr) {
   3255   Register result = ToRegister(instr->result());
   3256   Register input = ToRegister(instr->object());
   3257   __ mov(result, FieldOperand(input,
   3258                               ExternalArray::kExternalPointerOffset));
   3259 }
   3260 
   3261 
   3262 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
   3263   Register arguments = ToRegister(instr->arguments());
   3264   Register result = ToRegister(instr->result());
   3265   if (instr->length()->IsConstantOperand() &&
   3266       instr->index()->IsConstantOperand()) {
   3267     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   3268     int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
   3269     int index = (const_length - const_index) + 1;
   3270     __ mov(result, Operand(arguments, index * kPointerSize));
   3271   } else {
   3272     Register length = ToRegister(instr->length());
   3273     Operand index = ToOperand(instr->index());
   3274     // There are two words between the frame pointer and the last argument.
   3275     // Subtracting from length accounts for one of them add one more.
   3276     __ sub(length, index);
   3277     __ mov(result, Operand(arguments, length, times_4, kPointerSize));
   3278   }
   3279 }
   3280 
   3281 
   3282 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
   3283   ElementsKind elements_kind = instr->elements_kind();
   3284   LOperand* key = instr->key();
   3285   if (!key->IsConstantOperand() &&
   3286       ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
   3287                                   elements_kind)) {
   3288     __ SmiUntag(ToRegister(key));
   3289   }
   3290   Operand operand(BuildFastArrayOperand(
   3291       instr->elements(),
   3292       key,
   3293       instr->hydrogen()->key()->representation(),
   3294       elements_kind,
   3295       0,
   3296       instr->additional_index()));
   3297   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
   3298     if (CpuFeatures::IsSupported(SSE2)) {
   3299       CpuFeatureScope scope(masm(), SSE2);
   3300       XMMRegister result(ToDoubleRegister(instr->result()));
   3301       __ movss(result, operand);
   3302       __ cvtss2sd(result, result);
   3303     } else {
   3304       X87Mov(ToX87Register(instr->result()), operand, kX87FloatOperand);
   3305     }
   3306   } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
   3307     if (CpuFeatures::IsSupported(SSE2)) {
   3308       CpuFeatureScope scope(masm(), SSE2);
   3309       __ movdbl(ToDoubleRegister(instr->result()), operand);
   3310     } else {
   3311       X87Mov(ToX87Register(instr->result()), operand);
   3312     }
   3313   } else {
   3314     Register result(ToRegister(instr->result()));
   3315     switch (elements_kind) {
   3316       case EXTERNAL_BYTE_ELEMENTS:
   3317         __ movsx_b(result, operand);
   3318         break;
   3319       case EXTERNAL_PIXEL_ELEMENTS:
   3320       case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
   3321         __ movzx_b(result, operand);
   3322         break;
   3323       case EXTERNAL_SHORT_ELEMENTS:
   3324         __ movsx_w(result, operand);
   3325         break;
   3326       case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
   3327         __ movzx_w(result, operand);
   3328         break;
   3329       case EXTERNAL_INT_ELEMENTS:
   3330         __ mov(result, operand);
   3331         break;
   3332       case EXTERNAL_UNSIGNED_INT_ELEMENTS:
   3333         __ mov(result, operand);
   3334         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
   3335           __ test(result, Operand(result));
   3336           DeoptimizeIf(negative, instr->environment());
   3337         }
   3338         break;
   3339       case EXTERNAL_FLOAT_ELEMENTS:
   3340       case EXTERNAL_DOUBLE_ELEMENTS:
   3341       case FAST_SMI_ELEMENTS:
   3342       case FAST_ELEMENTS:
   3343       case FAST_DOUBLE_ELEMENTS:
   3344       case FAST_HOLEY_SMI_ELEMENTS:
   3345       case FAST_HOLEY_ELEMENTS:
   3346       case FAST_HOLEY_DOUBLE_ELEMENTS:
   3347       case DICTIONARY_ELEMENTS:
   3348       case NON_STRICT_ARGUMENTS_ELEMENTS:
   3349         UNREACHABLE();
   3350         break;
   3351     }
   3352   }
   3353 }
   3354 
   3355 
   3356 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
   3357   if (instr->hydrogen()->RequiresHoleCheck()) {
   3358     int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
   3359         sizeof(kHoleNanLower32);
   3360     Operand hole_check_operand = BuildFastArrayOperand(
   3361         instr->elements(), instr->key(),
   3362         instr->hydrogen()->key()->representation(),
   3363         FAST_DOUBLE_ELEMENTS,
   3364         offset,
   3365         instr->additional_index());
   3366     __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
   3367     DeoptimizeIf(equal, instr->environment());
   3368   }
   3369 
   3370   Operand double_load_operand = BuildFastArrayOperand(
   3371       instr->elements(),
   3372       instr->key(),
   3373       instr->hydrogen()->key()->representation(),
   3374       FAST_DOUBLE_ELEMENTS,
   3375       FixedDoubleArray::kHeaderSize - kHeapObjectTag,
   3376       instr->additional_index());
   3377   if (CpuFeatures::IsSupported(SSE2)) {
   3378     CpuFeatureScope scope(masm(), SSE2);
   3379     XMMRegister result = ToDoubleRegister(instr->result());
   3380     __ movdbl(result, double_load_operand);
   3381   } else {
   3382     X87Mov(ToX87Register(instr->result()), double_load_operand);
   3383   }
   3384 }
   3385 
   3386 
   3387 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
   3388   Register result = ToRegister(instr->result());
   3389 
   3390   // Load the result.
   3391   __ mov(result,
   3392          BuildFastArrayOperand(instr->elements(),
   3393                                instr->key(),
   3394                                instr->hydrogen()->key()->representation(),
   3395                                FAST_ELEMENTS,
   3396                                FixedArray::kHeaderSize - kHeapObjectTag,
   3397                                instr->additional_index()));
   3398 
   3399   // Check for the hole value.
   3400   if (instr->hydrogen()->RequiresHoleCheck()) {
   3401     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
   3402       __ test(result, Immediate(kSmiTagMask));
   3403       DeoptimizeIf(not_equal, instr->environment());
   3404     } else {
   3405       __ cmp(result, factory()->the_hole_value());
   3406       DeoptimizeIf(equal, instr->environment());
   3407     }
   3408   }
   3409 }
   3410 
   3411 
   3412 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
   3413   if (instr->is_external()) {
   3414     DoLoadKeyedExternalArray(instr);
   3415   } else if (instr->hydrogen()->representation().IsDouble()) {
   3416     DoLoadKeyedFixedDoubleArray(instr);
   3417   } else {
   3418     DoLoadKeyedFixedArray(instr);
   3419   }
   3420 }
   3421 
   3422 
   3423 Operand LCodeGen::BuildFastArrayOperand(
   3424     LOperand* elements_pointer,
   3425     LOperand* key,
   3426     Representation key_representation,
   3427     ElementsKind elements_kind,
   3428     uint32_t offset,
   3429     uint32_t additional_index) {
   3430   Register elements_pointer_reg = ToRegister(elements_pointer);
   3431   int element_shift_size = ElementsKindToShiftSize(elements_kind);
   3432   int shift_size = element_shift_size;
   3433   if (key->IsConstantOperand()) {
   3434     int constant_value = ToInteger32(LConstantOperand::cast(key));
   3435     if (constant_value & 0xF0000000) {
   3436       Abort(kArrayIndexConstantValueTooBig);
   3437     }
   3438     return Operand(elements_pointer_reg,
   3439                    ((constant_value + additional_index) << shift_size)
   3440                        + offset);
   3441   } else {
   3442     // Take the tag bit into account while computing the shift size.
   3443     if (key_representation.IsSmi() && (shift_size >= 1)) {
   3444       shift_size -= kSmiTagSize;
   3445     }
   3446     ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
   3447     return Operand(elements_pointer_reg,
   3448                    ToRegister(key),
   3449                    scale_factor,
   3450                    offset + (additional_index << element_shift_size));
   3451   }
   3452 }
   3453 
   3454 
   3455 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
   3456   ASSERT(ToRegister(instr->context()).is(esi));
   3457   ASSERT(ToRegister(instr->object()).is(edx));
   3458   ASSERT(ToRegister(instr->key()).is(ecx));
   3459 
   3460   Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
   3461   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3462 }
   3463 
   3464 
   3465 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   3466   Register result = ToRegister(instr->result());
   3467 
   3468   if (instr->hydrogen()->from_inlined()) {
   3469     __ lea(result, Operand(esp, -2 * kPointerSize));
   3470   } else {
   3471     // Check for arguments adapter frame.
   3472     Label done, adapted;
   3473     __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   3474     __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
   3475     __ cmp(Operand(result),
   3476            Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   3477     __ j(equal, &adapted, Label::kNear);
   3478 
   3479     // No arguments adaptor frame.
   3480     __ mov(result, Operand(ebp));
   3481     __ jmp(&done, Label::kNear);
   3482 
   3483     // Arguments adaptor frame present.
   3484     __ bind(&adapted);
   3485     __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   3486 
   3487     // Result is the frame pointer for the frame if not adapted and for the real
   3488     // frame below the adaptor frame if adapted.
   3489     __ bind(&done);
   3490   }
   3491 }
   3492 
   3493 
   3494 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
   3495   Operand elem = ToOperand(instr->elements());
   3496   Register result = ToRegister(instr->result());
   3497 
   3498   Label done;
   3499 
   3500   // If no arguments adaptor frame the number of arguments is fixed.
   3501   __ cmp(ebp, elem);
   3502   __ mov(result, Immediate(scope()->num_parameters()));
   3503   __ j(equal, &done, Label::kNear);
   3504 
   3505   // Arguments adaptor frame present. Get argument length from there.
   3506   __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   3507   __ mov(result, Operand(result,
   3508                          ArgumentsAdaptorFrameConstants::kLengthOffset));
   3509   __ SmiUntag(result);
   3510 
   3511   // Argument length is in result register.
   3512   __ bind(&done);
   3513 }
   3514 
   3515 
   3516 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
   3517   Register receiver = ToRegister(instr->receiver());
   3518   Register function = ToRegister(instr->function());
   3519   Register scratch = ToRegister(instr->temp());
   3520 
   3521   // If the receiver is null or undefined, we have to pass the global
   3522   // object as a receiver to normal functions. Values have to be
   3523   // passed unchanged to builtins and strict-mode functions.
   3524   Label global_object, receiver_ok;
   3525 
   3526   // Do not transform the receiver to object for strict mode
   3527   // functions.
   3528   __ mov(scratch,
   3529          FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
   3530   __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
   3531             1 << SharedFunctionInfo::kStrictModeBitWithinByte);
   3532   __ j(not_equal, &receiver_ok);  // A near jump is not sufficient here!
   3533 
   3534   // Do not transform the receiver to object for builtins.
   3535   __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
   3536             1 << SharedFunctionInfo::kNativeBitWithinByte);
   3537   __ j(not_equal, &receiver_ok);
   3538 
   3539   // Normal function. Replace undefined or null with global receiver.
   3540   __ cmp(receiver, factory()->null_value());
   3541   __ j(equal, &global_object, Label::kNear);
   3542   __ cmp(receiver, factory()->undefined_value());
   3543   __ j(equal, &global_object, Label::kNear);
   3544 
   3545   // The receiver should be a JS object.
   3546   __ test(receiver, Immediate(kSmiTagMask));
   3547   DeoptimizeIf(equal, instr->environment());
   3548   __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
   3549   DeoptimizeIf(below, instr->environment());
   3550   __ jmp(&receiver_ok, Label::kNear);
   3551 
   3552   __ bind(&global_object);
   3553   // TODO(kmillikin): We have a hydrogen value for the global object.  See
   3554   // if it's better to use it than to explicitly fetch it from the context
   3555   // here.
   3556   __ mov(receiver, Operand(ebp, StandardFrameConstants::kContextOffset));
   3557   __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_OBJECT_INDEX));
   3558   __ mov(receiver,
   3559          FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
   3560   __ bind(&receiver_ok);
   3561 }
   3562 
   3563 
   3564 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   3565   Register receiver = ToRegister(instr->receiver());
   3566   Register function = ToRegister(instr->function());
   3567   Register length = ToRegister(instr->length());
   3568   Register elements = ToRegister(instr->elements());
   3569   ASSERT(receiver.is(eax));  // Used for parameter count.
   3570   ASSERT(function.is(edi));  // Required by InvokeFunction.
   3571   ASSERT(ToRegister(instr->result()).is(eax));
   3572 
   3573   // Copy the arguments to this function possibly from the
   3574   // adaptor frame below it.
   3575   const uint32_t kArgumentsLimit = 1 * KB;
   3576   __ cmp(length, kArgumentsLimit);
   3577   DeoptimizeIf(above, instr->environment());
   3578 
   3579   __ push(receiver);
   3580   __ mov(receiver, length);
   3581 
   3582   // Loop through the arguments pushing them onto the execution
   3583   // stack.
   3584   Label invoke, loop;
   3585   // length is a small non-negative integer, due to the test above.
   3586   __ test(length, Operand(length));
   3587   __ j(zero, &invoke, Label::kNear);
   3588   __ bind(&loop);
   3589   __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
   3590   __ dec(length);
   3591   __ j(not_zero, &loop);
   3592 
   3593   // Invoke the function.
   3594   __ bind(&invoke);
   3595   ASSERT(instr->HasPointerMap());
   3596   LPointerMap* pointers = instr->pointer_map();
   3597   RecordPosition(pointers->position());
   3598   SafepointGenerator safepoint_generator(
   3599       this, pointers, Safepoint::kLazyDeopt);
   3600   ParameterCount actual(eax);
   3601   __ InvokeFunction(function, actual, CALL_FUNCTION,
   3602                     safepoint_generator, CALL_AS_METHOD);
   3603 }
   3604 
   3605 
   3606 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
   3607   __ int3();
   3608 }
   3609 
   3610 
   3611 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   3612   LOperand* argument = instr->value();
   3613   EmitPushTaggedOperand(argument);
   3614 }
   3615 
   3616 
   3617 void LCodeGen::DoDrop(LDrop* instr) {
   3618   __ Drop(instr->count());
   3619 }
   3620 
   3621 
   3622 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   3623   Register result = ToRegister(instr->result());
   3624   __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   3625 }
   3626 
   3627 
   3628 void LCodeGen::DoContext(LContext* instr) {
   3629   Register result = ToRegister(instr->result());
   3630   if (info()->IsOptimizing()) {
   3631     __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
   3632   } else {
   3633     // If there is no frame, the context must be in esi.
   3634     ASSERT(result.is(esi));
   3635   }
   3636 }
   3637 
   3638 
   3639 void LCodeGen::DoOuterContext(LOuterContext* instr) {
   3640   Register context = ToRegister(instr->context());
   3641   Register result = ToRegister(instr->result());
   3642   __ mov(result,
   3643          Operand(context, Context::SlotOffset(Context::PREVIOUS_INDEX)));
   3644 }
   3645 
   3646 
   3647 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   3648   ASSERT(ToRegister(instr->context()).is(esi));
   3649   __ push(esi);  // The context is the first argument.
   3650   __ push(Immediate(instr->hydrogen()->pairs()));
   3651   __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
   3652   CallRuntime(Runtime::kDeclareGlobals, 3, instr);
   3653 }
   3654 
   3655 
   3656 void LCodeGen::DoGlobalObject(LGlobalObject* instr) {
   3657   Register context = ToRegister(instr->context());
   3658   Register result = ToRegister(instr->result());
   3659   __ mov(result,
   3660          Operand(context, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
   3661 }
   3662 
   3663 
   3664 void LCodeGen::DoGlobalReceiver(LGlobalReceiver* instr) {
   3665   Register global = ToRegister(instr->global());
   3666   Register result = ToRegister(instr->result());
   3667   __ mov(result, FieldOperand(global, GlobalObject::kGlobalReceiverOffset));
   3668 }
   3669 
   3670 
   3671 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   3672                                  int formal_parameter_count,
   3673                                  int arity,
   3674                                  LInstruction* instr,
   3675                                  CallKind call_kind,
   3676                                  EDIState edi_state) {
   3677   bool dont_adapt_arguments =
   3678       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   3679   bool can_invoke_directly =
   3680       dont_adapt_arguments || formal_parameter_count == arity;
   3681 
   3682   LPointerMap* pointers = instr->pointer_map();
   3683   RecordPosition(pointers->position());
   3684 
   3685   if (can_invoke_directly) {
   3686     if (edi_state == EDI_UNINITIALIZED) {
   3687       __ LoadHeapObject(edi, function);
   3688     }
   3689 
   3690     // Change context.
   3691     __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
   3692 
   3693     // Set eax to arguments count if adaption is not needed. Assumes that eax
   3694     // is available to write to at this point.
   3695     if (dont_adapt_arguments) {
   3696       __ mov(eax, arity);
   3697     }
   3698 
   3699     // Invoke function directly.
   3700     __ SetCallKind(ecx, call_kind);
   3701     if (function.is_identical_to(info()->closure())) {
   3702       __ CallSelf();
   3703     } else {
   3704       __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
   3705     }
   3706     RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   3707   } else {
   3708     // We need to adapt arguments.
   3709     SafepointGenerator generator(
   3710         this, pointers, Safepoint::kLazyDeopt);
   3711     ParameterCount count(arity);
   3712     ParameterCount expected(formal_parameter_count);
   3713     __ InvokeFunction(
   3714         function, expected, count, CALL_FUNCTION, generator, call_kind);
   3715   }
   3716 }
   3717 
   3718 
   3719 void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
   3720   ASSERT(ToRegister(instr->result()).is(eax));
   3721   CallKnownFunction(instr->hydrogen()->function(),
   3722                     instr->hydrogen()->formal_parameter_count(),
   3723                     instr->arity(),
   3724                     instr,
   3725                     CALL_AS_METHOD,
   3726                     EDI_UNINITIALIZED);
   3727 }
   3728 
   3729 
   3730 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
   3731   Register input_reg = ToRegister(instr->value());
   3732   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
   3733          factory()->heap_number_map());
   3734   DeoptimizeIf(not_equal, instr->environment());
   3735 
   3736   Label slow, allocated, done;
   3737   Register tmp = input_reg.is(eax) ? ecx : eax;
   3738   Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
   3739 
   3740   // Preserve the value of all registers.
   3741   PushSafepointRegistersScope scope(this);
   3742 
   3743   __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
   3744   // Check the sign of the argument. If the argument is positive, just
   3745   // return it. We do not need to patch the stack since |input| and
   3746   // |result| are the same register and |input| will be restored
   3747   // unchanged by popping safepoint registers.
   3748   __ test(tmp, Immediate(HeapNumber::kSignMask));
   3749   __ j(zero, &done);
   3750 
   3751   __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
   3752   __ jmp(&allocated, Label::kNear);
   3753 
   3754   // Slow case: Call the runtime system to do the number allocation.
   3755   __ bind(&slow);
   3756   CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
   3757                           instr, instr->context());
   3758   // Set the pointer to the new heap number in tmp.
   3759   if (!tmp.is(eax)) __ mov(tmp, eax);
   3760   // Restore input_reg after call to runtime.
   3761   __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
   3762 
   3763   __ bind(&allocated);
   3764   __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
   3765   __ and_(tmp2, ~HeapNumber::kSignMask);
   3766   __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
   3767   __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
   3768   __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
   3769   __ StoreToSafepointRegisterSlot(input_reg, tmp);
   3770 
   3771   __ bind(&done);
   3772 }
   3773 
   3774 
   3775 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
   3776   Register input_reg = ToRegister(instr->value());
   3777   __ test(input_reg, Operand(input_reg));
   3778   Label is_positive;
   3779   __ j(not_sign, &is_positive, Label::kNear);
   3780   __ neg(input_reg);  // Sets flags.
   3781   DeoptimizeIf(negative, instr->environment());
   3782   __ bind(&is_positive);
   3783 }
   3784 
   3785 
   3786 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   3787   // Class for deferred case.
   3788   class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
   3789    public:
   3790     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
   3791         : LDeferredCode(codegen), instr_(instr) { }
   3792     virtual void Generate() {
   3793       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
   3794     }
   3795     virtual LInstruction* instr() { return instr_; }
   3796    private:
   3797     LMathAbs* instr_;
   3798   };
   3799 
   3800   ASSERT(instr->value()->Equals(instr->result()));
   3801   Representation r = instr->hydrogen()->value()->representation();
   3802 
   3803   CpuFeatureScope scope(masm(), SSE2);
   3804   if (r.IsDouble()) {
   3805     XMMRegister  scratch = xmm0;
   3806     XMMRegister input_reg = ToDoubleRegister(instr->value());
   3807     __ xorps(scratch, scratch);
   3808     __ subsd(scratch, input_reg);
   3809     __ pand(input_reg, scratch);
   3810   } else if (r.IsSmiOrInteger32()) {
   3811     EmitIntegerMathAbs(instr);
   3812   } else {  // Tagged case.
   3813     DeferredMathAbsTaggedHeapNumber* deferred =
   3814         new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
   3815     Register input_reg = ToRegister(instr->value());
   3816     // Smi check.
   3817     __ JumpIfNotSmi(input_reg, deferred->entry());
   3818     EmitIntegerMathAbs(instr);
   3819     __ bind(deferred->exit());
   3820   }
   3821 }
   3822 
   3823 
   3824 void LCodeGen::DoMathFloor(LMathFloor* instr) {
   3825   CpuFeatureScope scope(masm(), SSE2);
   3826   XMMRegister xmm_scratch = xmm0;
   3827   Register output_reg = ToRegister(instr->result());
   3828   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3829 
   3830   if (CpuFeatures::IsSupported(SSE4_1)) {
   3831     CpuFeatureScope scope(masm(), SSE4_1);
   3832     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3833       // Deoptimize on negative zero.
   3834       Label non_zero;
   3835       __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
   3836       __ ucomisd(input_reg, xmm_scratch);
   3837       __ j(not_equal, &non_zero, Label::kNear);
   3838       __ movmskpd(output_reg, input_reg);
   3839       __ test(output_reg, Immediate(1));
   3840       DeoptimizeIf(not_zero, instr->environment());
   3841       __ bind(&non_zero);
   3842     }
   3843     __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
   3844     __ cvttsd2si(output_reg, Operand(xmm_scratch));
   3845     // Overflow is signalled with minint.
   3846     __ cmp(output_reg, 0x80000000u);
   3847     DeoptimizeIf(equal, instr->environment());
   3848   } else {
   3849     Label negative_sign, done;
   3850     // Deoptimize on unordered.
   3851     __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
   3852     __ ucomisd(input_reg, xmm_scratch);
   3853     DeoptimizeIf(parity_even, instr->environment());
   3854     __ j(below, &negative_sign, Label::kNear);
   3855 
   3856     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3857       // Check for negative zero.
   3858       Label positive_sign;
   3859       __ j(above, &positive_sign, Label::kNear);
   3860       __ movmskpd(output_reg, input_reg);
   3861       __ test(output_reg, Immediate(1));
   3862       DeoptimizeIf(not_zero, instr->environment());
   3863       __ Set(output_reg, Immediate(0));
   3864       __ jmp(&done, Label::kNear);
   3865       __ bind(&positive_sign);
   3866     }
   3867 
   3868     // Use truncating instruction (OK because input is positive).
   3869     __ cvttsd2si(output_reg, Operand(input_reg));
   3870     // Overflow is signalled with minint.
   3871     __ cmp(output_reg, 0x80000000u);
   3872     DeoptimizeIf(equal, instr->environment());
   3873     __ jmp(&done, Label::kNear);
   3874 
   3875     // Non-zero negative reaches here.
   3876     __ bind(&negative_sign);
   3877     // Truncate, then compare and compensate.
   3878     __ cvttsd2si(output_reg, Operand(input_reg));
   3879     __ cvtsi2sd(xmm_scratch, output_reg);
   3880     __ ucomisd(input_reg, xmm_scratch);
   3881     __ j(equal, &done, Label::kNear);
   3882     __ sub(output_reg, Immediate(1));
   3883     DeoptimizeIf(overflow, instr->environment());
   3884 
   3885     __ bind(&done);
   3886   }
   3887 }
   3888 
   3889 
   3890 void LCodeGen::DoMathRound(LMathRound* instr) {
   3891   CpuFeatureScope scope(masm(), SSE2);
   3892   Register output_reg = ToRegister(instr->result());
   3893   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3894   XMMRegister xmm_scratch = xmm0;
   3895   XMMRegister input_temp = ToDoubleRegister(instr->temp());
   3896   ExternalReference one_half = ExternalReference::address_of_one_half();
   3897   ExternalReference minus_one_half =
   3898       ExternalReference::address_of_minus_one_half();
   3899 
   3900   Label done, round_to_zero, below_one_half, do_not_compensate;
   3901   __ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
   3902   __ ucomisd(xmm_scratch, input_reg);
   3903   __ j(above, &below_one_half);
   3904 
   3905   // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
   3906   __ addsd(xmm_scratch, input_reg);
   3907   __ cvttsd2si(output_reg, Operand(xmm_scratch));
   3908   // Overflow is signalled with minint.
   3909   __ cmp(output_reg, 0x80000000u);
   3910   __ RecordComment("D2I conversion overflow");
   3911   DeoptimizeIf(equal, instr->environment());
   3912   __ jmp(&done);
   3913 
   3914   __ bind(&below_one_half);
   3915   __ movdbl(xmm_scratch, Operand::StaticVariable(minus_one_half));
   3916   __ ucomisd(xmm_scratch, input_reg);
   3917   __ j(below_equal, &round_to_zero);
   3918 
   3919   // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
   3920   // compare and compensate.
   3921   __ movsd(input_temp, input_reg);  // Do not alter input_reg.
   3922   __ subsd(input_temp, xmm_scratch);
   3923   __ cvttsd2si(output_reg, Operand(input_temp));
   3924   // Catch minint due to overflow, and to prevent overflow when compensating.
   3925   __ cmp(output_reg, 0x80000000u);
   3926   __ RecordComment("D2I conversion overflow");
   3927   DeoptimizeIf(equal, instr->environment());
   3928 
   3929   __ cvtsi2sd(xmm_scratch, output_reg);
   3930   __ ucomisd(xmm_scratch, input_temp);
   3931   __ j(equal, &done);
   3932   __ sub(output_reg, Immediate(1));
   3933   // No overflow because we already ruled out minint.
   3934   __ jmp(&done);
   3935 
   3936   __ bind(&round_to_zero);
   3937   // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
   3938   // we can ignore the difference between a result of -0 and +0.
   3939   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3940     // If the sign is positive, we return +0.
   3941     __ movmskpd(output_reg, input_reg);
   3942     __ test(output_reg, Immediate(1));
   3943     __ RecordComment("Minus zero");
   3944     DeoptimizeIf(not_zero, instr->environment());
   3945   }
   3946   __ Set(output_reg, Immediate(0));
   3947   __ bind(&done);
   3948 }
   3949 
   3950 
   3951 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   3952   CpuFeatureScope scope(masm(), SSE2);
   3953   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3954   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
   3955   __ sqrtsd(input_reg, input_reg);
   3956 }
   3957 
   3958 
   3959 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   3960   CpuFeatureScope scope(masm(), SSE2);
   3961   XMMRegister xmm_scratch = xmm0;
   3962   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3963   Register scratch = ToRegister(instr->temp());
   3964   ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
   3965 
   3966   // Note that according to ECMA-262 15.8.2.13:
   3967   // Math.pow(-Infinity, 0.5) == Infinity
   3968   // Math.sqrt(-Infinity) == NaN
   3969   Label done, sqrt;
   3970   // Check base for -Infinity.  According to IEEE-754, single-precision
   3971   // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
   3972   __ mov(scratch, 0xFF800000);
   3973   __ movd(xmm_scratch, scratch);
   3974   __ cvtss2sd(xmm_scratch, xmm_scratch);
   3975   __ ucomisd(input_reg, xmm_scratch);
   3976   // Comparing -Infinity with NaN results in "unordered", which sets the
   3977   // zero flag as if both were equal.  However, it also sets the carry flag.
   3978   __ j(not_equal, &sqrt, Label::kNear);
   3979   __ j(carry, &sqrt, Label::kNear);
   3980   // If input is -Infinity, return Infinity.
   3981   __ xorps(input_reg, input_reg);
   3982   __ subsd(input_reg, xmm_scratch);
   3983   __ jmp(&done, Label::kNear);
   3984 
   3985   // Square root.
   3986   __ bind(&sqrt);
   3987   __ xorps(xmm_scratch, xmm_scratch);
   3988   __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
   3989   __ sqrtsd(input_reg, input_reg);
   3990   __ bind(&done);
   3991 }
   3992 
   3993 
   3994 void LCodeGen::DoPower(LPower* instr) {
   3995   Representation exponent_type = instr->hydrogen()->right()->representation();
   3996   // Having marked this as a call, we can use any registers.
   3997   // Just make sure that the input/output registers are the expected ones.
   3998   ASSERT(!instr->right()->IsDoubleRegister() ||
   3999          ToDoubleRegister(instr->right()).is(xmm1));
   4000   ASSERT(!instr->right()->IsRegister() ||
   4001          ToRegister(instr->right()).is(eax));
   4002   ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
   4003   ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
   4004 
   4005   if (exponent_type.IsSmi()) {
   4006     MathPowStub stub(MathPowStub::TAGGED);
   4007     __ CallStub(&stub);
   4008   } else if (exponent_type.IsTagged()) {
   4009     Label no_deopt;
   4010     __ JumpIfSmi(eax, &no_deopt);
   4011     __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
   4012     DeoptimizeIf(not_equal, instr->environment());
   4013     __ bind(&no_deopt);
   4014     MathPowStub stub(MathPowStub::TAGGED);
   4015     __ CallStub(&stub);
   4016   } else if (exponent_type.IsInteger32()) {
   4017     MathPowStub stub(MathPowStub::INTEGER);
   4018     __ CallStub(&stub);
   4019   } else {
   4020     ASSERT(exponent_type.IsDouble());
   4021     MathPowStub stub(MathPowStub::DOUBLE);
   4022     __ CallStub(&stub);
   4023   }
   4024 }
   4025 
   4026 
   4027 void LCodeGen::DoRandom(LRandom* instr) {
   4028   class DeferredDoRandom: public LDeferredCode {
   4029    public:
   4030     DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
   4031         : LDeferredCode(codegen), instr_(instr) { }
   4032     virtual void Generate() { codegen()->DoDeferredRandom(instr_); }
   4033     virtual LInstruction* instr() { return instr_; }
   4034    private:
   4035     LRandom* instr_;
   4036   };
   4037 
   4038   DeferredDoRandom* deferred = new(zone()) DeferredDoRandom(this, instr);
   4039 
   4040   CpuFeatureScope scope(masm(), SSE2);
   4041   // Having marked this instruction as a call we can use any
   4042   // registers.
   4043   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   4044   ASSERT(ToRegister(instr->global_object()).is(eax));
   4045   // Assert that the register size is indeed the size of each seed.
   4046   static const int kSeedSize = sizeof(uint32_t);
   4047   STATIC_ASSERT(kPointerSize == kSeedSize);
   4048 
   4049   __ mov(eax, FieldOperand(eax, GlobalObject::kNativeContextOffset));
   4050   static const int kRandomSeedOffset =
   4051       FixedArray::kHeaderSize + Context::RANDOM_SEED_INDEX * kPointerSize;
   4052   __ mov(ebx, FieldOperand(eax, kRandomSeedOffset));
   4053   // ebx: FixedArray of the native context's random seeds
   4054 
   4055   // Load state[0].
   4056   __ mov(ecx, FieldOperand(ebx, ByteArray::kHeaderSize));
   4057   // If state[0] == 0, call runtime to initialize seeds.
   4058   __ test(ecx, ecx);
   4059   __ j(zero, deferred->entry());
   4060   // Load state[1].
   4061   __ mov(eax, FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize));
   4062   // ecx: state[0]
   4063   // eax: state[1]
   4064 
   4065   // state[0] = 18273 * (state[0] & 0xFFFF) + (state[0] >> 16)
   4066   __ movzx_w(edx, ecx);
   4067   __ imul(edx, edx, 18273);
   4068   __ shr(ecx, 16);
   4069   __ add(ecx, edx);
   4070   // Save state[0].
   4071   __ mov(FieldOperand(ebx, ByteArray::kHeaderSize), ecx);
   4072 
   4073   // state[1] = 36969 * (state[1] & 0xFFFF) + (state[1] >> 16)
   4074   __ movzx_w(edx, eax);
   4075   __ imul(edx, edx, 36969);
   4076   __ shr(eax, 16);
   4077   __ add(eax, edx);
   4078   // Save state[1].
   4079   __ mov(FieldOperand(ebx, ByteArray::kHeaderSize + kSeedSize), eax);
   4080 
   4081   // Random bit pattern = (state[0] << 14) + (state[1] & 0x3FFFF)
   4082   __ shl(ecx, 14);
   4083   __ and_(eax, Immediate(0x3FFFF));
   4084   __ add(eax, ecx);
   4085 
   4086   __ bind(deferred->exit());
   4087   // Convert 32 random bits in eax to 0.(32 random bits) in a double
   4088   // by computing:
   4089   // ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
   4090   __ mov(ebx, Immediate(0x49800000));  // 1.0 x 2^20 as single.
   4091   __ movd(xmm2, ebx);
   4092   __ movd(xmm1, eax);
   4093   __ cvtss2sd(xmm2, xmm2);
   4094   __ xorps(xmm1, xmm2);
   4095   __ subsd(xmm1, xmm2);
   4096 }
   4097 
   4098 
   4099 void LCodeGen::DoDeferredRandom(LRandom* instr) {
   4100   __ PrepareCallCFunction(1, ebx);
   4101   __ mov(Operand(esp, 0), eax);
   4102   __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
   4103   // Return value is in eax.
   4104 }
   4105 
   4106 
   4107 void LCodeGen::DoMathLog(LMathLog* instr) {
   4108   CpuFeatureScope scope(masm(), SSE2);
   4109   ASSERT(instr->value()->Equals(instr->result()));
   4110   XMMRegister input_reg = ToDoubleRegister(instr->value());
   4111   Label positive, done, zero;
   4112   __ xorps(xmm0, xmm0);
   4113   __ ucomisd(input_reg, xmm0);
   4114   __ j(above, &positive, Label::kNear);
   4115   __ j(equal, &zero, Label::kNear);
   4116   ExternalReference nan =
   4117       ExternalReference::address_of_canonical_non_hole_nan();
   4118   __ movdbl(input_reg, Operand::StaticVariable(nan));
   4119   __ jmp(&done, Label::kNear);
   4120   __ bind(&zero);
   4121   __ push(Immediate(0xFFF00000));
   4122   __ push(Immediate(0));
   4123   __ movdbl(input_reg, Operand(esp, 0));
   4124   __ add(Operand(esp), Immediate(kDoubleSize));
   4125   __ jmp(&done, Label::kNear);
   4126   __ bind(&positive);
   4127   __ fldln2();
   4128   __ sub(Operand(esp), Immediate(kDoubleSize));
   4129   __ movdbl(Operand(esp, 0), input_reg);
   4130   __ fld_d(Operand(esp, 0));
   4131   __ fyl2x();
   4132   __ fstp_d(Operand(esp, 0));
   4133   __ movdbl(input_reg, Operand(esp, 0));
   4134   __ add(Operand(esp), Immediate(kDoubleSize));
   4135   __ bind(&done);
   4136 }
   4137 
   4138 
   4139 void LCodeGen::DoMathExp(LMathExp* instr) {
   4140   CpuFeatureScope scope(masm(), SSE2);
   4141   XMMRegister input = ToDoubleRegister(instr->value());
   4142   XMMRegister result = ToDoubleRegister(instr->result());
   4143   Register temp1 = ToRegister(instr->temp1());
   4144   Register temp2 = ToRegister(instr->temp2());
   4145 
   4146   MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
   4147 }
   4148 
   4149 
   4150 void LCodeGen::DoMathTan(LMathTan* instr) {
   4151   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   4152   // Set the context register to a GC-safe fake value. Clobbering it is
   4153   // OK because this instruction is marked as a call.
   4154   __ Set(esi, Immediate(0));
   4155   TranscendentalCacheStub stub(TranscendentalCache::TAN,
   4156                                TranscendentalCacheStub::UNTAGGED);
   4157   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   4158 }
   4159 
   4160 
   4161 void LCodeGen::DoMathCos(LMathCos* instr) {
   4162   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   4163   // Set the context register to a GC-safe fake value. Clobbering it is
   4164   // OK because this instruction is marked as a call.
   4165   __ Set(esi, Immediate(0));
   4166   TranscendentalCacheStub stub(TranscendentalCache::COS,
   4167                                TranscendentalCacheStub::UNTAGGED);
   4168   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   4169 }
   4170 
   4171 
   4172 void LCodeGen::DoMathSin(LMathSin* instr) {
   4173   ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
   4174   // Set the context register to a GC-safe fake value. Clobbering it is
   4175   // OK because this instruction is marked as a call.
   4176   __ Set(esi, Immediate(0));
   4177   TranscendentalCacheStub stub(TranscendentalCache::SIN,
   4178                                TranscendentalCacheStub::UNTAGGED);
   4179   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   4180 }
   4181 
   4182 
   4183 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   4184   ASSERT(ToRegister(instr->context()).is(esi));
   4185   ASSERT(ToRegister(instr->function()).is(edi));
   4186   ASSERT(instr->HasPointerMap());
   4187 
   4188   Handle<JSFunction> known_function = instr->hydrogen()->known_function();
   4189   if (known_function.is_null()) {
   4190     LPointerMap* pointers = instr->pointer_map();
   4191     RecordPosition(pointers->position());
   4192     SafepointGenerator generator(
   4193         this, pointers, Safepoint::kLazyDeopt);
   4194     ParameterCount count(instr->arity());
   4195     __ InvokeFunction(edi, count, CALL_FUNCTION, generator, CALL_AS_METHOD);
   4196   } else {
   4197     CallKnownFunction(known_function,
   4198                       instr->hydrogen()->formal_parameter_count(),
   4199                       instr->arity(),
   4200                       instr,
   4201                       CALL_AS_METHOD,
   4202                       EDI_CONTAINS_TARGET);
   4203   }
   4204 }
   4205 
   4206 
   4207 void LCodeGen::DoCallKeyed(LCallKeyed* instr) {
   4208   ASSERT(ToRegister(instr->context()).is(esi));
   4209   ASSERT(ToRegister(instr->key()).is(ecx));
   4210   ASSERT(ToRegister(instr->result()).is(eax));
   4211 
   4212   int arity = instr->arity();
   4213   Handle<Code> ic =
   4214       isolate()->stub_cache()->ComputeKeyedCallInitialize(arity);
   4215   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   4216 }
   4217 
   4218 
   4219 void LCodeGen::DoCallNamed(LCallNamed* instr) {
   4220   ASSERT(ToRegister(instr->context()).is(esi));
   4221   ASSERT(ToRegister(instr->result()).is(eax));
   4222 
   4223   int arity = instr->arity();
   4224   RelocInfo::Mode mode = RelocInfo::CODE_TARGET;
   4225   Handle<Code> ic =
   4226       isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
   4227   __ mov(ecx, instr->name());
   4228   CallCode(ic, mode, instr);
   4229 }
   4230 
   4231 
   4232 void LCodeGen::DoCallFunction(LCallFunction* instr) {
   4233   ASSERT(ToRegister(instr->context()).is(esi));
   4234   ASSERT(ToRegister(instr->function()).is(edi));
   4235   ASSERT(ToRegister(instr->result()).is(eax));
   4236 
   4237   int arity = instr->arity();
   4238   CallFunctionStub stub(arity, NO_CALL_FUNCTION_FLAGS);
   4239   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   4240 }
   4241 
   4242 
   4243 void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
   4244   ASSERT(ToRegister(instr->context()).is(esi));
   4245   ASSERT(ToRegister(instr->result()).is(eax));
   4246 
   4247   int arity = instr->arity();
   4248   RelocInfo::Mode mode = RelocInfo::CODE_TARGET_CONTEXT;
   4249   Handle<Code> ic =
   4250       isolate()->stub_cache()->ComputeCallInitialize(arity, mode);
   4251   __ mov(ecx, instr->name());
   4252   CallCode(ic, mode, instr);
   4253 }
   4254 
   4255 
   4256 void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
   4257   ASSERT(ToRegister(instr->result()).is(eax));
   4258   CallKnownFunction(instr->hydrogen()->target(),
   4259                     instr->hydrogen()->formal_parameter_count(),
   4260                     instr->arity(),
   4261                     instr,
   4262                     CALL_AS_FUNCTION,
   4263                     EDI_UNINITIALIZED);
   4264 }
   4265 
   4266 
   4267 void LCodeGen::DoCallNew(LCallNew* instr) {
   4268   ASSERT(ToRegister(instr->context()).is(esi));
   4269   ASSERT(ToRegister(instr->constructor()).is(edi));
   4270   ASSERT(ToRegister(instr->result()).is(eax));
   4271 
   4272   // No cell in ebx for construct type feedback in optimized code
   4273   Handle<Object> undefined_value(isolate()->factory()->undefined_value());
   4274   __ mov(ebx, Immediate(undefined_value));
   4275   CallConstructStub stub(NO_CALL_FUNCTION_FLAGS);
   4276   __ Set(eax, Immediate(instr->arity()));
   4277   CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   4278 }
   4279 
   4280 
   4281 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   4282   ASSERT(ToRegister(instr->context()).is(esi));
   4283   ASSERT(ToRegister(instr->constructor()).is(edi));
   4284   ASSERT(ToRegister(instr->result()).is(eax));
   4285 
   4286   __ Set(eax, Immediate(instr->arity()));
   4287   __ mov(ebx, instr->hydrogen()->property_cell());
   4288   ElementsKind kind = instr->hydrogen()->elements_kind();
   4289   AllocationSiteOverrideMode override_mode =
   4290       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
   4291           ? DISABLE_ALLOCATION_SITES
   4292           : DONT_OVERRIDE;
   4293   ContextCheckMode context_mode = CONTEXT_CHECK_NOT_REQUIRED;
   4294 
   4295   if (instr->arity() == 0) {
   4296     ArrayNoArgumentConstructorStub stub(kind, context_mode, override_mode);
   4297     CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   4298   } else if (instr->arity() == 1) {
   4299     Label done;
   4300     if (IsFastPackedElementsKind(kind)) {
   4301       Label packed_case;
   4302       // We might need a change here
   4303       // look at the first argument
   4304       __ mov(ecx, Operand(esp, 0));
   4305       __ test(ecx, ecx);
   4306       __ j(zero, &packed_case);
   4307 
   4308       ElementsKind holey_kind = GetHoleyElementsKind(kind);
   4309       ArraySingleArgumentConstructorStub stub(holey_kind, context_mode,
   4310                                               override_mode);
   4311       CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   4312       __ jmp(&done);
   4313       __ bind(&packed_case);
   4314     }
   4315 
   4316     ArraySingleArgumentConstructorStub stub(kind, context_mode, override_mode);
   4317     CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   4318     __ bind(&done);
   4319   } else {
   4320     ArrayNArgumentsConstructorStub stub(kind, context_mode, override_mode);
   4321     CallCode(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL, instr);
   4322   }
   4323 }
   4324 
   4325 
   4326 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
   4327   CallRuntime(instr->function(), instr->arity(), instr);
   4328 }
   4329 
   4330 
   4331 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
   4332   Register result = ToRegister(instr->result());
   4333   Register base = ToRegister(instr->base_object());
   4334   __ lea(result, Operand(base, instr->offset()));
   4335 }
   4336 
   4337 
   4338 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
   4339   Representation representation = instr->representation();
   4340 
   4341   HObjectAccess access = instr->hydrogen()->access();
   4342   int offset = access.offset();
   4343 
   4344   if (access.IsExternalMemory()) {
   4345     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
   4346     MemOperand operand = instr->object()->IsConstantOperand()
   4347         ? MemOperand::StaticVariable(
   4348             ToExternalReference(LConstantOperand::cast(instr->object())))
   4349         : MemOperand(ToRegister(instr->object()), offset);
   4350     if (instr->value()->IsConstantOperand()) {
   4351       LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
   4352       __ mov(operand, Immediate(ToInteger32(operand_value)));
   4353     } else {
   4354       Register value = ToRegister(instr->value());
   4355       __ mov(operand, value);
   4356     }
   4357     return;
   4358   }
   4359 
   4360   Register object = ToRegister(instr->object());
   4361   Handle<Map> transition = instr->transition();
   4362 
   4363   if (FLAG_track_fields && representation.IsSmi()) {
   4364     if (instr->value()->IsConstantOperand()) {
   4365       LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
   4366       if (!IsSmi(operand_value)) {
   4367         DeoptimizeIf(no_condition, instr->environment());
   4368       }
   4369     }
   4370   } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
   4371     if (instr->value()->IsConstantOperand()) {
   4372       LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
   4373       if (IsInteger32(operand_value)) {
   4374         DeoptimizeIf(no_condition, instr->environment());
   4375       }
   4376     } else {
   4377       if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   4378         Register value = ToRegister(instr->value());
   4379         __ test(value, Immediate(kSmiTagMask));
   4380         DeoptimizeIf(zero, instr->environment());
   4381       }
   4382     }
   4383   } else if (FLAG_track_double_fields && representation.IsDouble()) {
   4384     ASSERT(transition.is_null());
   4385     ASSERT(access.IsInobject());
   4386     ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
   4387     if (CpuFeatures::IsSupported(SSE2)) {
   4388       CpuFeatureScope scope(masm(), SSE2);
   4389       XMMRegister value = ToDoubleRegister(instr->value());
   4390       __ movdbl(FieldOperand(object, offset), value);
   4391     } else {
   4392       X87Register value = ToX87Register(instr->value());
   4393       X87Mov(FieldOperand(object, offset), value);
   4394     }
   4395     return;
   4396   }
   4397 
   4398   if (!transition.is_null()) {
   4399     if (!instr->hydrogen()->NeedsWriteBarrierForMap()) {
   4400       __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
   4401     } else {
   4402       Register temp = ToRegister(instr->temp());
   4403       Register temp_map = ToRegister(instr->temp_map());
   4404       __ mov(temp_map, transition);
   4405       __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
   4406       // Update the write barrier for the map field.
   4407       __ RecordWriteField(object,
   4408                           HeapObject::kMapOffset,
   4409                           temp_map,
   4410                           temp,
   4411                           GetSaveFPRegsMode(),
   4412                           OMIT_REMEMBERED_SET,
   4413                           OMIT_SMI_CHECK);
   4414     }
   4415   }
   4416 
   4417   // Do the store.
   4418   SmiCheck check_needed =
   4419       instr->hydrogen()->value()->IsHeapObject()
   4420           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   4421 
   4422   Register write_register = object;
   4423   if (!access.IsInobject()) {
   4424     write_register = ToRegister(instr->temp());
   4425     __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
   4426   }
   4427 
   4428   if (instr->value()->IsConstantOperand()) {
   4429     LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
   4430     if (operand_value->IsRegister()) {
   4431       __ mov(FieldOperand(write_register, offset), ToRegister(operand_value));
   4432     } else {
   4433       Handle<Object> handle_value = ToHandle(operand_value);
   4434       ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
   4435       __ mov(FieldOperand(write_register, offset), handle_value);
   4436     }
   4437   } else {
   4438     __ mov(FieldOperand(write_register, offset), ToRegister(instr->value()));
   4439   }
   4440 
   4441   if (instr->hydrogen()->NeedsWriteBarrier()) {
   4442     Register value = ToRegister(instr->value());
   4443     Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
   4444     // Update the write barrier for the object for in-object properties.
   4445     __ RecordWriteField(write_register,
   4446                         offset,
   4447                         value,
   4448                         temp,
   4449                         GetSaveFPRegsMode(),
   4450                         EMIT_REMEMBERED_SET,
   4451                         check_needed);
   4452   }
   4453 }
   4454 
   4455 
   4456 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
   4457   ASSERT(ToRegister(instr->context()).is(esi));
   4458   ASSERT(ToRegister(instr->object()).is(edx));
   4459   ASSERT(ToRegister(instr->value()).is(eax));
   4460 
   4461   __ mov(ecx, instr->name());
   4462   Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
   4463       ? isolate()->builtins()->StoreIC_Initialize_Strict()
   4464       : isolate()->builtins()->StoreIC_Initialize();
   4465   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   4466 }
   4467 
   4468 
   4469 void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) {
   4470   if (FLAG_debug_code && check->hydrogen()->skip_check()) {
   4471     Label done;
   4472     __ j(NegateCondition(cc), &done, Label::kNear);
   4473     __ int3();
   4474     __ bind(&done);
   4475   } else {
   4476     DeoptimizeIf(cc, check->environment());
   4477   }
   4478 }
   4479 
   4480 
   4481 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   4482   if (instr->hydrogen()->skip_check() && !FLAG_debug_code) return;
   4483 
   4484   if (instr->index()->IsConstantOperand()) {
   4485     Immediate immediate =
   4486         ToImmediate(LConstantOperand::cast(instr->index()),
   4487                     instr->hydrogen()->length()->representation());
   4488     __ cmp(ToOperand(instr->length()), immediate);
   4489     Condition condition =
   4490         instr->hydrogen()->allow_equality() ? below : below_equal;
   4491     ApplyCheckIf(condition, instr);
   4492   } else {
   4493     __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
   4494     Condition condition =
   4495         instr->hydrogen()->allow_equality() ? above : above_equal;
   4496     ApplyCheckIf(condition, instr);
   4497   }
   4498 }
   4499 
   4500 
   4501 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
   4502   ElementsKind elements_kind = instr->elements_kind();
   4503   LOperand* key = instr->key();
   4504   if (!key->IsConstantOperand() &&
   4505       ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
   4506                                   elements_kind)) {
   4507     __ SmiUntag(ToRegister(key));
   4508   }
   4509   Operand operand(BuildFastArrayOperand(
   4510       instr->elements(),
   4511       key,
   4512       instr->hydrogen()->key()->representation(),
   4513       elements_kind,
   4514       0,
   4515       instr->additional_index()));
   4516   if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
   4517     if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
   4518       CpuFeatureScope scope(masm(), SSE2);
   4519       __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
   4520       __ movss(operand, xmm0);
   4521     } else {
   4522       __ fld(0);
   4523       __ fstp_s(operand);
   4524     }
   4525   } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
   4526     if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
   4527       CpuFeatureScope scope(masm(), SSE2);
   4528       __ movdbl(operand, ToDoubleRegister(instr->value()));
   4529     } else {
   4530       X87Mov(operand, ToX87Register(instr->value()));
   4531     }
   4532   } else {
   4533     Register value = ToRegister(instr->value());
   4534     switch (elements_kind) {
   4535       case EXTERNAL_PIXEL_ELEMENTS:
   4536       case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
   4537       case EXTERNAL_BYTE_ELEMENTS:
   4538         __ mov_b(operand, value);
   4539         break;
   4540       case EXTERNAL_SHORT_ELEMENTS:
   4541       case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
   4542         __ mov_w(operand, value);
   4543         break;
   4544       case EXTERNAL_INT_ELEMENTS:
   4545       case EXTERNAL_UNSIGNED_INT_ELEMENTS:
   4546         __ mov(operand, value);
   4547         break;
   4548       case EXTERNAL_FLOAT_ELEMENTS:
   4549       case EXTERNAL_DOUBLE_ELEMENTS:
   4550       case FAST_SMI_ELEMENTS:
   4551       case FAST_ELEMENTS:
   4552       case FAST_DOUBLE_ELEMENTS:
   4553       case FAST_HOLEY_SMI_ELEMENTS:
   4554       case FAST_HOLEY_ELEMENTS:
   4555       case FAST_HOLEY_DOUBLE_ELEMENTS:
   4556       case DICTIONARY_ELEMENTS:
   4557       case NON_STRICT_ARGUMENTS_ELEMENTS:
   4558         UNREACHABLE();
   4559         break;
   4560     }
   4561   }
   4562 }
   4563 
   4564 
   4565 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
   4566   ExternalReference canonical_nan_reference =
   4567       ExternalReference::address_of_canonical_non_hole_nan();
   4568   Operand double_store_operand = BuildFastArrayOperand(
   4569       instr->elements(),
   4570       instr->key(),
   4571       instr->hydrogen()->key()->representation(),
   4572       FAST_DOUBLE_ELEMENTS,
   4573       FixedDoubleArray::kHeaderSize - kHeapObjectTag,
   4574       instr->additional_index());
   4575 
   4576   if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
   4577     CpuFeatureScope scope(masm(), SSE2);
   4578     XMMRegister value = ToDoubleRegister(instr->value());
   4579 
   4580     if (instr->NeedsCanonicalization()) {
   4581       Label have_value;
   4582 
   4583       __ ucomisd(value, value);
   4584       __ j(parity_odd, &have_value);  // NaN.
   4585 
   4586       __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
   4587       __ bind(&have_value);
   4588     }
   4589 
   4590     __ movdbl(double_store_operand, value);
   4591   } else {
   4592     // Can't use SSE2 in the serializer
   4593     if (instr->hydrogen()->IsConstantHoleStore()) {
   4594       // This means we should store the (double) hole. No floating point
   4595       // registers required.
   4596       double nan_double = FixedDoubleArray::hole_nan_as_double();
   4597       uint64_t int_val = BitCast<uint64_t, double>(nan_double);
   4598       int32_t lower = static_cast<int32_t>(int_val);
   4599       int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
   4600 
   4601       __ mov(double_store_operand, Immediate(lower));
   4602       Operand double_store_operand2 = BuildFastArrayOperand(
   4603           instr->elements(),
   4604           instr->key(),
   4605           instr->hydrogen()->key()->representation(),
   4606           FAST_DOUBLE_ELEMENTS,
   4607           FixedDoubleArray::kHeaderSize - kHeapObjectTag + kPointerSize,
   4608           instr->additional_index());
   4609       __ mov(double_store_operand2, Immediate(upper));
   4610     } else {
   4611       Label no_special_nan_handling;
   4612       X87Register value = ToX87Register(instr->value());
   4613       X87Fxch(value);
   4614 
   4615       if (instr->NeedsCanonicalization()) {
   4616         __ fld(0);
   4617         __ fld(0);
   4618         __ FCmp();
   4619 
   4620         __ j(parity_odd, &no_special_nan_handling);
   4621         __ sub(esp, Immediate(kDoubleSize));
   4622         __ fst_d(MemOperand(esp, 0));
   4623         __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
   4624                Immediate(kHoleNanUpper32));
   4625         __ add(esp, Immediate(kDoubleSize));
   4626         Label canonicalize;
   4627         __ j(not_equal, &canonicalize);
   4628         __ jmp(&no_special_nan_handling);
   4629         __ bind(&canonicalize);
   4630         __ fstp(0);
   4631         __ fld_d(Operand::StaticVariable(canonical_nan_reference));
   4632       }
   4633 
   4634       __ bind(&no_special_nan_handling);
   4635       __ fst_d(double_store_operand);
   4636     }
   4637   }
   4638 }
   4639 
   4640 
   4641 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
   4642   Register elements = ToRegister(instr->elements());
   4643   Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
   4644 
   4645   Operand operand = BuildFastArrayOperand(
   4646       instr->elements(),
   4647       instr->key(),
   4648       instr->hydrogen()->key()->representation(),
   4649       FAST_ELEMENTS,
   4650       FixedArray::kHeaderSize - kHeapObjectTag,
   4651       instr->additional_index());
   4652   if (instr->value()->IsRegister()) {
   4653     __ mov(operand, ToRegister(instr->value()));
   4654   } else {
   4655     LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
   4656     if (IsSmi(operand_value)) {
   4657       Immediate immediate = ToImmediate(operand_value, Representation::Smi());
   4658       __ mov(operand, immediate);
   4659     } else {
   4660       ASSERT(!IsInteger32(operand_value));
   4661       Handle<Object> handle_value = ToHandle(operand_value);
   4662       __ mov(operand, handle_value);
   4663     }
   4664   }
   4665 
   4666   if (instr->hydrogen()->NeedsWriteBarrier()) {
   4667     ASSERT(instr->value()->IsRegister());
   4668     Register value = ToRegister(instr->value());
   4669     ASSERT(!instr->key()->IsConstantOperand());
   4670     SmiCheck check_needed =
   4671         instr->hydrogen()->value()->IsHeapObject()
   4672           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   4673     // Compute address of modified element and store it into key register.
   4674     __ lea(key, operand);
   4675     __ RecordWrite(elements,
   4676                    key,
   4677                    value,
   4678                    GetSaveFPRegsMode(),
   4679                    EMIT_REMEMBERED_SET,
   4680                    check_needed);
   4681   }
   4682 }
   4683 
   4684 
   4685 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
   4686   // By cases...external, fast-double, fast
   4687   if (instr->is_external()) {
   4688     DoStoreKeyedExternalArray(instr);
   4689   } else if (instr->hydrogen()->value()->representation().IsDouble()) {
   4690     DoStoreKeyedFixedDoubleArray(instr);
   4691   } else {
   4692     DoStoreKeyedFixedArray(instr);
   4693   }
   4694 }
   4695 
   4696 
   4697 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
   4698   ASSERT(ToRegister(instr->context()).is(esi));
   4699   ASSERT(ToRegister(instr->object()).is(edx));
   4700   ASSERT(ToRegister(instr->key()).is(ecx));
   4701   ASSERT(ToRegister(instr->value()).is(eax));
   4702 
   4703   Handle<Code> ic = (instr->strict_mode_flag() == kStrictMode)
   4704       ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
   4705       : isolate()->builtins()->KeyedStoreIC_Initialize();
   4706   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   4707 }
   4708 
   4709 
   4710 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
   4711   Register object = ToRegister(instr->object());
   4712   Register temp = ToRegister(instr->temp());
   4713   __ TestJSArrayForAllocationMemento(object, temp);
   4714   DeoptimizeIf(equal, instr->environment());
   4715 }
   4716 
   4717 
   4718 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
   4719   Register object_reg = ToRegister(instr->object());
   4720 
   4721   Handle<Map> from_map = instr->original_map();
   4722   Handle<Map> to_map = instr->transitioned_map();
   4723   ElementsKind from_kind = instr->from_kind();
   4724   ElementsKind to_kind = instr->to_kind();
   4725 
   4726   Label not_applicable;
   4727   bool is_simple_map_transition =
   4728       IsSimpleMapChangeTransition(from_kind, to_kind);
   4729   Label::Distance branch_distance =
   4730       is_simple_map_transition ? Label::kNear : Label::kFar;
   4731   __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
   4732   __ j(not_equal, &not_applicable, branch_distance);
   4733   if (is_simple_map_transition) {
   4734     Register new_map_reg = ToRegister(instr->new_map_temp());
   4735     Handle<Map> map = instr->hydrogen()->transitioned_map();
   4736     __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
   4737            Immediate(map));
   4738     // Write barrier.
   4739     ASSERT_NE(instr->temp(), NULL);
   4740     __ RecordWriteForMap(object_reg, to_map, new_map_reg,
   4741                          ToRegister(instr->temp()),
   4742                          kDontSaveFPRegs);
   4743   } else {
   4744     PushSafepointRegistersScope scope(this);
   4745     if (!object_reg.is(eax)) {
   4746       __ push(object_reg);
   4747     }
   4748     LoadContextFromDeferred(instr->context());
   4749     if (!object_reg.is(eax)) {
   4750       __ pop(eax);
   4751     }
   4752     __ mov(ebx, to_map);
   4753     TransitionElementsKindStub stub(from_kind, to_kind);
   4754     __ CallStub(&stub);
   4755     RecordSafepointWithRegisters(
   4756         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4757   }
   4758   __ bind(&not_applicable);
   4759 }
   4760 
   4761 
   4762 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   4763   class DeferredStringCharCodeAt: public LDeferredCode {
   4764    public:
   4765     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
   4766         : LDeferredCode(codegen), instr_(instr) { }
   4767     virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
   4768     virtual LInstruction* instr() { return instr_; }
   4769    private:
   4770     LStringCharCodeAt* instr_;
   4771   };
   4772 
   4773   DeferredStringCharCodeAt* deferred =
   4774       new(zone()) DeferredStringCharCodeAt(this, instr);
   4775 
   4776   StringCharLoadGenerator::Generate(masm(),
   4777                                     factory(),
   4778                                     ToRegister(instr->string()),
   4779                                     ToRegister(instr->index()),
   4780                                     ToRegister(instr->result()),
   4781                                     deferred->entry());
   4782   __ bind(deferred->exit());
   4783 }
   4784 
   4785 
   4786 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
   4787   Register string = ToRegister(instr->string());
   4788   Register result = ToRegister(instr->result());
   4789 
   4790   // TODO(3095996): Get rid of this. For now, we need to make the
   4791   // result register contain a valid pointer because it is already
   4792   // contained in the register pointer map.
   4793   __ Set(result, Immediate(0));
   4794 
   4795   PushSafepointRegistersScope scope(this);
   4796   __ push(string);
   4797   // Push the index as a smi. This is safe because of the checks in
   4798   // DoStringCharCodeAt above.
   4799   STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
   4800   if (instr->index()->IsConstantOperand()) {
   4801     Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
   4802                                       Representation::Smi());
   4803     __ push(immediate);
   4804   } else {
   4805     Register index = ToRegister(instr->index());
   4806     __ SmiTag(index);
   4807     __ push(index);
   4808   }
   4809   CallRuntimeFromDeferred(Runtime::kStringCharCodeAt, 2,
   4810                           instr, instr->context());
   4811   __ AssertSmi(eax);
   4812   __ SmiUntag(eax);
   4813   __ StoreToSafepointRegisterSlot(result, eax);
   4814 }
   4815 
   4816 
   4817 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
   4818   class DeferredStringCharFromCode: public LDeferredCode {
   4819    public:
   4820     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
   4821         : LDeferredCode(codegen), instr_(instr) { }
   4822     virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
   4823     virtual LInstruction* instr() { return instr_; }
   4824    private:
   4825     LStringCharFromCode* instr_;
   4826   };
   4827 
   4828   DeferredStringCharFromCode* deferred =
   4829       new(zone()) DeferredStringCharFromCode(this, instr);
   4830 
   4831   ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
   4832   Register char_code = ToRegister(instr->char_code());
   4833   Register result = ToRegister(instr->result());
   4834   ASSERT(!char_code.is(result));
   4835 
   4836   __ cmp(char_code, String::kMaxOneByteCharCode);
   4837   __ j(above, deferred->entry());
   4838   __ Set(result, Immediate(factory()->single_character_string_cache()));
   4839   __ mov(result, FieldOperand(result,
   4840                               char_code, times_pointer_size,
   4841                               FixedArray::kHeaderSize));
   4842   __ cmp(result, factory()->undefined_value());
   4843   __ j(equal, deferred->entry());
   4844   __ bind(deferred->exit());
   4845 }
   4846 
   4847 
   4848 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
   4849   Register char_code = ToRegister(instr->char_code());
   4850   Register result = ToRegister(instr->result());
   4851 
   4852   // TODO(3095996): Get rid of this. For now, we need to make the
   4853   // result register contain a valid pointer because it is already
   4854   // contained in the register pointer map.
   4855   __ Set(result, Immediate(0));
   4856 
   4857   PushSafepointRegistersScope scope(this);
   4858   __ SmiTag(char_code);
   4859   __ push(char_code);
   4860   CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
   4861   __ StoreToSafepointRegisterSlot(result, eax);
   4862 }
   4863 
   4864 
   4865 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   4866   EmitPushTaggedOperand(instr->left());
   4867   EmitPushTaggedOperand(instr->right());
   4868   StringAddStub stub(instr->hydrogen()->flags());
   4869   CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   4870 }
   4871 
   4872 
   4873 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   4874   if (CpuFeatures::IsSupported(SSE2)) {
   4875     CpuFeatureScope scope(masm(), SSE2);
   4876     LOperand* input = instr->value();
   4877     ASSERT(input->IsRegister() || input->IsStackSlot());
   4878     LOperand* output = instr->result();
   4879     ASSERT(output->IsDoubleRegister());
   4880     __ cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
   4881   } else {
   4882     UNREACHABLE();
   4883   }
   4884 }
   4885 
   4886 
   4887 void LCodeGen::DoInteger32ToSmi(LInteger32ToSmi* instr) {
   4888   Register input = ToRegister(instr->value());
   4889   __ SmiTag(input);
   4890   if (!instr->hydrogen()->value()->HasRange() ||
   4891       !instr->hydrogen()->value()->range()->IsInSmiRange()) {
   4892     DeoptimizeIf(overflow, instr->environment());
   4893   }
   4894 }
   4895 
   4896 
   4897 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
   4898   CpuFeatureScope scope(masm(), SSE2);
   4899   LOperand* input = instr->value();
   4900   LOperand* output = instr->result();
   4901   LOperand* temp = instr->temp();
   4902 
   4903   __ LoadUint32(ToDoubleRegister(output),
   4904                 ToRegister(input),
   4905                 ToDoubleRegister(temp));
   4906 }
   4907 
   4908 
   4909 void LCodeGen::DoUint32ToSmi(LUint32ToSmi* instr) {
   4910   Register input = ToRegister(instr->value());
   4911   if (!instr->hydrogen()->value()->HasRange() ||
   4912       !instr->hydrogen()->value()->range()->IsInSmiRange()) {
   4913     __ test(input, Immediate(0xc0000000));
   4914     DeoptimizeIf(not_zero, instr->environment());
   4915   }
   4916   __ SmiTag(input);
   4917 }
   4918 
   4919 
   4920 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
   4921   class DeferredNumberTagI: public LDeferredCode {
   4922    public:
   4923     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
   4924         : LDeferredCode(codegen), instr_(instr) { }
   4925     virtual void Generate() {
   4926       codegen()->DoDeferredNumberTagI(instr_, instr_->value(), SIGNED_INT32);
   4927     }
   4928     virtual LInstruction* instr() { return instr_; }
   4929    private:
   4930     LNumberTagI* instr_;
   4931   };
   4932 
   4933   LOperand* input = instr->value();
   4934   ASSERT(input->IsRegister() && input->Equals(instr->result()));
   4935   Register reg = ToRegister(input);
   4936 
   4937   DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
   4938   __ SmiTag(reg);
   4939   __ j(overflow, deferred->entry());
   4940   __ bind(deferred->exit());
   4941 }
   4942 
   4943 
   4944 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
   4945   class DeferredNumberTagU: public LDeferredCode {
   4946    public:
   4947     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
   4948         : LDeferredCode(codegen), instr_(instr) { }
   4949     virtual void Generate() {
   4950       codegen()->DoDeferredNumberTagI(instr_, instr_->value(), UNSIGNED_INT32);
   4951     }
   4952     virtual LInstruction* instr() { return instr_; }
   4953    private:
   4954     LNumberTagU* instr_;
   4955   };
   4956 
   4957   LOperand* input = instr->value();
   4958   ASSERT(input->IsRegister() && input->Equals(instr->result()));
   4959   Register reg = ToRegister(input);
   4960 
   4961   DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
   4962   __ cmp(reg, Immediate(Smi::kMaxValue));
   4963   __ j(above, deferred->entry());
   4964   __ SmiTag(reg);
   4965   __ bind(deferred->exit());
   4966 }
   4967 
   4968 
   4969 void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
   4970                                     LOperand* value,
   4971                                     IntegerSignedness signedness) {
   4972   Label slow;
   4973   Register reg = ToRegister(value);
   4974   Register tmp = reg.is(eax) ? ecx : eax;
   4975 
   4976   // Preserve the value of all registers.
   4977   PushSafepointRegistersScope scope(this);
   4978 
   4979   Label done;
   4980 
   4981   if (signedness == SIGNED_INT32) {
   4982     // There was overflow, so bits 30 and 31 of the original integer
   4983     // disagree. Try to allocate a heap number in new space and store
   4984     // the value in there. If that fails, call the runtime system.
   4985     __ SmiUntag(reg);
   4986     __ xor_(reg, 0x80000000);
   4987     if (CpuFeatures::IsSupported(SSE2)) {
   4988       CpuFeatureScope feature_scope(masm(), SSE2);
   4989       __ cvtsi2sd(xmm0, Operand(reg));
   4990     } else {
   4991       __ push(reg);
   4992       __ fild_s(Operand(esp, 0));
   4993       __ pop(reg);
   4994     }
   4995   } else {
   4996     if (CpuFeatures::IsSupported(SSE2)) {
   4997       CpuFeatureScope feature_scope(masm(), SSE2);
   4998       __ LoadUint32(xmm0, reg,
   4999                     ToDoubleRegister(LNumberTagU::cast(instr)->temp()));
   5000     } else {
   5001       // There's no fild variant for unsigned values, so zero-extend to a 64-bit
   5002       // int manually.
   5003       __ push(Immediate(0));
   5004       __ push(reg);
   5005       __ fild_d(Operand(esp, 0));
   5006       __ pop(reg);
   5007       __ pop(reg);
   5008     }
   5009   }
   5010 
   5011   if (FLAG_inline_new) {
   5012     __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
   5013     __ jmp(&done, Label::kNear);
   5014   }
   5015 
   5016   // Slow case: Call the runtime system to do the number allocation.
   5017   __ bind(&slow);
   5018 
   5019   // TODO(3095996): Put a valid pointer value in the stack slot where the result
   5020   // register is stored, as this register is in the pointer map, but contains an
   5021   // integer value.
   5022   __ StoreToSafepointRegisterSlot(reg, Immediate(0));
   5023   // NumberTagI and NumberTagD use the context from the frame, rather than
   5024   // the environment's HContext or HInlinedContext value.
   5025   // They only call Runtime::kAllocateHeapNumber.
   5026   // The corresponding HChange instructions are added in a phase that does
   5027   // not have easy access to the local context.
   5028   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   5029   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   5030   RecordSafepointWithRegisters(
   5031       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   5032   if (!reg.is(eax)) __ mov(reg, eax);
   5033 
   5034   // Done. Put the value in xmm0 into the value of the allocated heap
   5035   // number.
   5036   __ bind(&done);
   5037   if (CpuFeatures::IsSupported(SSE2)) {
   5038     CpuFeatureScope feature_scope(masm(), SSE2);
   5039     __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), xmm0);
   5040   } else {
   5041     __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
   5042   }
   5043   __ StoreToSafepointRegisterSlot(reg, reg);
   5044 }
   5045 
   5046 
   5047 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
   5048   class DeferredNumberTagD: public LDeferredCode {
   5049    public:
   5050     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
   5051         : LDeferredCode(codegen), instr_(instr) { }
   5052     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
   5053     virtual LInstruction* instr() { return instr_; }
   5054    private:
   5055     LNumberTagD* instr_;
   5056   };
   5057 
   5058   Register reg = ToRegister(instr->result());
   5059 
   5060   bool use_sse2 = CpuFeatures::IsSupported(SSE2);
   5061   if (!use_sse2) {
   5062     // Put the value to the top of stack
   5063     X87Register src = ToX87Register(instr->value());
   5064     X87LoadForUsage(src);
   5065   }
   5066 
   5067   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
   5068   if (FLAG_inline_new) {
   5069     Register tmp = ToRegister(instr->temp());
   5070     __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
   5071   } else {
   5072     __ jmp(deferred->entry());
   5073   }
   5074   __ bind(deferred->exit());
   5075   if (use_sse2) {
   5076     CpuFeatureScope scope(masm(), SSE2);
   5077     XMMRegister input_reg = ToDoubleRegister(instr->value());
   5078     __ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
   5079   } else {
   5080     __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
   5081   }
   5082 }
   5083 
   5084 
   5085 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
   5086   // TODO(3095996): Get rid of this. For now, we need to make the
   5087   // result register contain a valid pointer because it is already
   5088   // contained in the register pointer map.
   5089   Register reg = ToRegister(instr->result());
   5090   __ Set(reg, Immediate(0));
   5091 
   5092   PushSafepointRegistersScope scope(this);
   5093   // NumberTagI and NumberTagD use the context from the frame, rather than
   5094   // the environment's HContext or HInlinedContext value.
   5095   // They only call Runtime::kAllocateHeapNumber.
   5096   // The corresponding HChange instructions are added in a phase that does
   5097   // not have easy access to the local context.
   5098   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   5099   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   5100   RecordSafepointWithRegisters(
   5101       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   5102   __ StoreToSafepointRegisterSlot(reg, eax);
   5103 }
   5104 
   5105 
   5106 void LCodeGen::DoSmiTag(LSmiTag* instr) {
   5107   LOperand* input = instr->value();
   5108   ASSERT(input->IsRegister() && input->Equals(instr->result()));
   5109   ASSERT(!instr->hydrogen_value()->CheckFlag(HValue::kCanOverflow));
   5110   __ SmiTag(ToRegister(input));
   5111 }
   5112 
   5113 
   5114 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
   5115   LOperand* input = instr->value();
   5116   Register result = ToRegister(input);
   5117   ASSERT(input->IsRegister() && input->Equals(instr->result()));
   5118   if (instr->needs_check()) {
   5119     __ test(result, Immediate(kSmiTagMask));
   5120     DeoptimizeIf(not_zero, instr->environment());
   5121   } else {
   5122     __ AssertSmi(result);
   5123   }
   5124   __ SmiUntag(result);
   5125 }
   5126 
   5127 
   5128 void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
   5129                                       Register temp_reg,
   5130                                       X87Register res_reg,
   5131                                       bool can_convert_undefined_to_nan,
   5132                                       bool deoptimize_on_minus_zero,
   5133                                       LEnvironment* env,
   5134                                       NumberUntagDMode mode) {
   5135   Label load_smi, done;
   5136 
   5137   X87PrepareToWrite(res_reg);
   5138   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
   5139     // Smi check.
   5140     __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
   5141 
   5142     // Heap number map check.
   5143     __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
   5144            factory()->heap_number_map());
   5145     if (!can_convert_undefined_to_nan) {
   5146       DeoptimizeIf(not_equal, env);
   5147     } else {
   5148       Label heap_number, convert;
   5149       __ j(equal, &heap_number, Label::kNear);
   5150 
   5151       // Convert undefined (or hole) to NaN.
   5152       __ cmp(input_reg, factory()->undefined_value());
   5153       DeoptimizeIf(not_equal, env);
   5154 
   5155       __ bind(&convert);
   5156       ExternalReference nan =
   5157           ExternalReference::address_of_canonical_non_hole_nan();
   5158       __ fld_d(Operand::StaticVariable(nan));
   5159       __ jmp(&done, Label::kNear);
   5160 
   5161       __ bind(&heap_number);
   5162     }
   5163     // Heap number to x87 conversion.
   5164     __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
   5165     if (deoptimize_on_minus_zero) {
   5166       __ fldz();
   5167       __ FCmp();
   5168       __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
   5169       __ j(not_zero, &done, Label::kNear);
   5170 
   5171       // Use general purpose registers to check if we have -0.0
   5172       __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
   5173       __ test(temp_reg, Immediate(HeapNumber::kSignMask));
   5174       __ j(zero, &done, Label::kNear);
   5175 
   5176       // Pop FPU stack before deoptimizing.
   5177       __ fstp(0);
   5178       DeoptimizeIf(not_zero, env);
   5179     }
   5180     __ jmp(&done, Label::kNear);
   5181   } else {
   5182     ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
   5183   }
   5184 
   5185   __ bind(&load_smi);
   5186   __ SmiUntag(input_reg);  // Untag smi before converting to float.
   5187   __ push(input_reg);
   5188   __ fild_s(Operand(esp, 0));
   5189   __ pop(input_reg);
   5190   __ SmiTag(input_reg);  // Retag smi.
   5191   __ bind(&done);
   5192   X87CommitWrite(res_reg);
   5193 }
   5194 
   5195 
   5196 void LCodeGen::EmitNumberUntagD(Register input_reg,
   5197                                 Register temp_reg,
   5198                                 XMMRegister result_reg,
   5199                                 bool can_convert_undefined_to_nan,
   5200                                 bool deoptimize_on_minus_zero,
   5201                                 LEnvironment* env,
   5202                                 NumberUntagDMode mode) {
   5203   Label load_smi, done;
   5204 
   5205   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
   5206     // Smi check.
   5207     __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
   5208 
   5209     // Heap number map check.
   5210     __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
   5211            factory()->heap_number_map());
   5212     if (!can_convert_undefined_to_nan) {
   5213       DeoptimizeIf(not_equal, env);
   5214     } else {
   5215       Label heap_number, convert;
   5216       __ j(equal, &heap_number, Label::kNear);
   5217 
   5218       // Convert undefined (and hole) to NaN.
   5219       __ cmp(input_reg, factory()->undefined_value());
   5220       DeoptimizeIf(not_equal, env);
   5221 
   5222       __ bind(&convert);
   5223       ExternalReference nan =
   5224           ExternalReference::address_of_canonical_non_hole_nan();
   5225       __ movdbl(result_reg, Operand::StaticVariable(nan));
   5226       __ jmp(&done, Label::kNear);
   5227 
   5228       __ bind(&heap_number);
   5229     }
   5230     // Heap number to XMM conversion.
   5231     __ movdbl(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
   5232     if (deoptimize_on_minus_zero) {
   5233       XMMRegister xmm_scratch = xmm0;
   5234       __ xorps(xmm_scratch, xmm_scratch);
   5235       __ ucomisd(result_reg, xmm_scratch);
   5236       __ j(not_zero, &done, Label::kNear);
   5237       __ movmskpd(temp_reg, result_reg);
   5238       __ test_b(temp_reg, 1);
   5239       DeoptimizeIf(not_zero, env);
   5240     }
   5241     __ jmp(&done, Label::kNear);
   5242   } else {
   5243     ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
   5244   }
   5245 
   5246   // Smi to XMM conversion
   5247   __ bind(&load_smi);
   5248   __ SmiUntag(input_reg);  // Untag smi before converting to float.
   5249   __ cvtsi2sd(result_reg, Operand(input_reg));
   5250   __ SmiTag(input_reg);  // Retag smi.
   5251   __ bind(&done);
   5252 }
   5253 
   5254 
   5255 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   5256   Label done, heap_number;
   5257   Register input_reg = ToRegister(instr->value());
   5258 
   5259   // Heap number map check.
   5260   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
   5261          factory()->heap_number_map());
   5262 
   5263   if (instr->truncating()) {
   5264     __ j(equal, &heap_number, Label::kNear);
   5265     // Check for undefined. Undefined is converted to zero for truncating
   5266     // conversions.
   5267     __ cmp(input_reg, factory()->undefined_value());
   5268     __ RecordComment("Deferred TaggedToI: cannot truncate");
   5269     DeoptimizeIf(not_equal, instr->environment());
   5270     __ mov(input_reg, 0);
   5271     __ jmp(&done, Label::kNear);
   5272 
   5273     __ bind(&heap_number);
   5274     if (CpuFeatures::IsSupported(SSE3)) {
   5275       CpuFeatureScope scope(masm(), SSE3);
   5276       Label convert;
   5277       // Use more powerful conversion when sse3 is available.
   5278       // Load x87 register with heap number.
   5279       __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
   5280       // Get exponent alone and check for too-big exponent.
   5281       __ mov(input_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
   5282       __ and_(input_reg, HeapNumber::kExponentMask);
   5283       const uint32_t kTooBigExponent =
   5284           (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
   5285       __ cmp(Operand(input_reg), Immediate(kTooBigExponent));
   5286       __ j(less, &convert, Label::kNear);
   5287       // Pop FPU stack before deoptimizing.
   5288       __ fstp(0);
   5289       __ RecordComment("Deferred TaggedToI: exponent too big");
   5290       DeoptimizeIf(no_condition, instr->environment());
   5291 
   5292       // Reserve space for 64 bit answer.
   5293       __ bind(&convert);
   5294       __ sub(Operand(esp), Immediate(kDoubleSize));
   5295       // Do conversion, which cannot fail because we checked the exponent.
   5296       __ fisttp_d(Operand(esp, 0));
   5297       __ mov(input_reg, Operand(esp, 0));  // Low word of answer is the result.
   5298       __ add(Operand(esp), Immediate(kDoubleSize));
   5299     } else if (CpuFeatures::IsSupported(SSE2)) {
   5300       CpuFeatureScope scope(masm(), SSE2);
   5301       XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
   5302       __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
   5303       __ cvttsd2si(input_reg, Operand(xmm0));
   5304       __ cmp(input_reg, 0x80000000u);
   5305       __ j(not_equal, &done);
   5306       // Check if the input was 0x8000000 (kMinInt).
   5307       // If no, then we got an overflow and we deoptimize.
   5308       ExternalReference min_int = ExternalReference::address_of_min_int();
   5309       __ movdbl(xmm_temp, Operand::StaticVariable(min_int));
   5310       __ ucomisd(xmm_temp, xmm0);
   5311       DeoptimizeIf(not_equal, instr->environment());
   5312       DeoptimizeIf(parity_even, instr->environment());  // NaN.
   5313     } else {
   5314       UNREACHABLE();
   5315     }
   5316   } else if (CpuFeatures::IsSupported(SSE2)) {
   5317     CpuFeatureScope scope(masm(), SSE2);
   5318     // Deoptimize if we don't have a heap number.
   5319     __ RecordComment("Deferred TaggedToI: not a heap number");
   5320     DeoptimizeIf(not_equal, instr->environment());
   5321 
   5322     XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
   5323     __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
   5324     __ cvttsd2si(input_reg, Operand(xmm0));
   5325     __ cvtsi2sd(xmm_temp, Operand(input_reg));
   5326     __ ucomisd(xmm0, xmm_temp);
   5327     __ RecordComment("Deferred TaggedToI: lost precision");
   5328     DeoptimizeIf(not_equal, instr->environment());
   5329     __ RecordComment("Deferred TaggedToI: NaN");
   5330     DeoptimizeIf(parity_even, instr->environment());  // NaN.
   5331     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   5332       __ test(input_reg, Operand(input_reg));
   5333       __ j(not_zero, &done);
   5334       __ movmskpd(input_reg, xmm0);
   5335       __ and_(input_reg, 1);
   5336       __ RecordComment("Deferred TaggedToI: minus zero");
   5337       DeoptimizeIf(not_zero, instr->environment());
   5338     }
   5339   } else {
   5340     UNREACHABLE();
   5341   }
   5342   __ bind(&done);
   5343 }
   5344 
   5345 
   5346 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
   5347   class DeferredTaggedToI: public LDeferredCode {
   5348    public:
   5349     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
   5350         : LDeferredCode(codegen), instr_(instr) { }
   5351     virtual void Generate() { codegen()->DoDeferredTaggedToI(instr_); }
   5352     virtual LInstruction* instr() { return instr_; }
   5353    private:
   5354     LTaggedToI* instr_;
   5355   };
   5356 
   5357   LOperand* input = instr->value();
   5358   ASSERT(input->IsRegister());
   5359   Register input_reg = ToRegister(input);
   5360   ASSERT(input_reg.is(ToRegister(instr->result())));
   5361 
   5362   DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
   5363 
   5364   __ JumpIfNotSmi(input_reg, deferred->entry());
   5365   __ SmiUntag(input_reg);
   5366   __ bind(deferred->exit());
   5367 }
   5368 
   5369 
   5370 void LCodeGen::DoDeferredTaggedToINoSSE2(LTaggedToINoSSE2* instr) {
   5371   Label done, heap_number;
   5372   Register result_reg = ToRegister(instr->result());
   5373   Register input_reg = ToRegister(instr->value());
   5374 
   5375   // Heap number map check.
   5376   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
   5377          factory()->heap_number_map());
   5378   if (instr->truncating()) {
   5379     __ j(equal, &heap_number, Label::kNear);
   5380     // Check for undefined. Undefined is converted to zero for truncating
   5381     // conversions.
   5382     __ cmp(input_reg, factory()->undefined_value());
   5383     __ RecordComment("Deferred TaggedToI: cannot truncate");
   5384     DeoptimizeIf(not_equal, instr->environment());
   5385     __ xor_(result_reg, result_reg);
   5386     __ jmp(&done, Label::kFar);
   5387     __ bind(&heap_number);
   5388   } else {
   5389     // Deoptimize if we don't have a heap number.
   5390     DeoptimizeIf(not_equal, instr->environment());
   5391   }
   5392 
   5393   // Surprisingly, all of this crazy bit manipulation is considerably
   5394   // faster than using the built-in x86 CPU conversion functions (about 6x).
   5395   Label right_exponent, adjust_bias, zero_result;
   5396   Register scratch = ToRegister(instr->scratch());
   5397   Register scratch2 = ToRegister(instr->scratch2());
   5398   // Get exponent word.
   5399   __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
   5400   // Get exponent alone in scratch2.
   5401   __ mov(scratch2, scratch);
   5402   __ and_(scratch2, HeapNumber::kExponentMask);
   5403   __ shr(scratch2, HeapNumber::kExponentShift);
   5404   if (instr->truncating()) {
   5405     __ j(zero, &zero_result);
   5406   } else {
   5407     __ j(not_zero, &adjust_bias);
   5408     __ test(scratch, Immediate(HeapNumber::kMantissaMask));
   5409     DeoptimizeIf(not_zero, instr->environment());
   5410     __ cmp(FieldOperand(input_reg, HeapNumber::kMantissaOffset), Immediate(0));
   5411     DeoptimizeIf(not_equal, instr->environment());
   5412     __ bind(&adjust_bias);
   5413   }
   5414   __ sub(scratch2, Immediate(HeapNumber::kExponentBias));
   5415   if (!instr->truncating()) {
   5416     DeoptimizeIf(negative, instr->environment());
   5417   } else {
   5418     __ j(negative, &zero_result);
   5419   }
   5420 
   5421   // Get the second half of the double. For some exponents we don't
   5422   // actually need this because the bits get shifted out again, but
   5423   // it's probably slower to test than just to do it.
   5424   Register scratch3 = ToRegister(instr->scratch3());
   5425   __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
   5426   __ xor_(result_reg, result_reg);
   5427 
   5428   const uint32_t non_int32_exponent = 31;
   5429   __ cmp(scratch2, Immediate(non_int32_exponent));
   5430   // If we have a match of the int32 exponent then skip some logic.
   5431   __ j(equal, &right_exponent, Label::kNear);
   5432   // If the number doesn't find in an int32, deopt.
   5433   DeoptimizeIf(greater, instr->environment());
   5434 
   5435   // Exponent word in scratch, exponent in scratch2.  We know that 0 <= exponent
   5436   // < 31.
   5437   __ mov(result_reg, Immediate(31));
   5438   __ sub(result_reg, scratch2);
   5439 
   5440   __ bind(&right_exponent);
   5441 
   5442   // Save off exponent for negative check later.
   5443   __ mov(scratch2, scratch);
   5444 
   5445   // Here result_reg is the shift, scratch is the exponent word.
   5446   // Get the top bits of the mantissa.
   5447   __ and_(scratch, HeapNumber::kMantissaMask);
   5448   // Put back the implicit 1.
   5449   __ or_(scratch, 1 << HeapNumber::kExponentShift);
   5450   // Shift up the mantissa bits to take up the space the exponent used to
   5451   // take. We have kExponentShift + 1 significant bits int he low end of the
   5452   // word.  Shift them to the top bits.
   5453   const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
   5454   __ shl(scratch, shift_distance);
   5455   if (!instr->truncating()) {
   5456     // If not truncating, a non-zero value in the bottom 22 bits means a
   5457     // non-integral value --> trigger a deopt.
   5458     __ test(scratch3, Immediate((1 << (32 - shift_distance)) - 1));
   5459     DeoptimizeIf(not_equal, instr->environment());
   5460   }
   5461   // Shift down 22 bits to get the most significant 10 bits or the low
   5462   // mantissa word.
   5463   __ shr(scratch3, 32 - shift_distance);
   5464   __ or_(scratch3, scratch);
   5465   if (!instr->truncating()) {
   5466     // If truncating, a non-zero value in the bits that will be shifted away
   5467     // when adjusting the exponent means rounding --> deopt.
   5468     __ mov(scratch, 0x1);
   5469     ASSERT(result_reg.is(ecx));
   5470     __ shl_cl(scratch);
   5471     __ dec(scratch);
   5472     __ test(scratch3, scratch);
   5473     DeoptimizeIf(not_equal, instr->environment());
   5474   }
   5475   // Move down according to the exponent.
   5476   ASSERT(result_reg.is(ecx));
   5477   __ shr_cl(scratch3);
   5478   // Now the unsigned 32-bit answer is in scratch3.  We need to move it to
   5479   // result_reg and we may need to fix the sign.
   5480   Label negative_result;
   5481   __ xor_(result_reg, result_reg);
   5482   __ cmp(scratch2, result_reg);
   5483   __ j(less, &negative_result, Label::kNear);
   5484   __ cmp(scratch3, result_reg);
   5485   __ mov(result_reg, scratch3);
   5486   // If the result is > MAX_INT, result doesn't fit in signed 32-bit --> deopt.
   5487   DeoptimizeIf(less, instr->environment());
   5488   __ jmp(&done, Label::kNear);
   5489   __ bind(&zero_result);
   5490   __ xor_(result_reg, result_reg);
   5491   __ jmp(&done, Label::kNear);
   5492   __ bind(&negative_result);
   5493   __ sub(result_reg, scratch3);
   5494   if (!instr->truncating()) {
   5495     // -0.0 triggers a deopt.
   5496     DeoptimizeIf(zero, instr->environment());
   5497   }
   5498   // If the negative subtraction overflows into a positive number, there was an
   5499   // overflow --> deopt.
   5500   DeoptimizeIf(positive, instr->environment());
   5501   __ bind(&done);
   5502 }
   5503 
   5504 
   5505 void LCodeGen::DoTaggedToINoSSE2(LTaggedToINoSSE2* instr) {
   5506   class DeferredTaggedToINoSSE2: public LDeferredCode {
   5507    public:
   5508     DeferredTaggedToINoSSE2(LCodeGen* codegen, LTaggedToINoSSE2* instr)
   5509         : LDeferredCode(codegen), instr_(instr) { }
   5510     virtual void Generate() { codegen()->DoDeferredTaggedToINoSSE2(instr_); }
   5511     virtual LInstruction* instr() { return instr_; }
   5512    private:
   5513     LTaggedToINoSSE2* instr_;
   5514   };
   5515 
   5516   LOperand* input = instr->value();
   5517   ASSERT(input->IsRegister());
   5518   Register input_reg = ToRegister(input);
   5519   ASSERT(input_reg.is(ToRegister(instr->result())));
   5520 
   5521   DeferredTaggedToINoSSE2* deferred =
   5522       new(zone()) DeferredTaggedToINoSSE2(this, instr);
   5523 
   5524   // Smi check.
   5525   __ JumpIfNotSmi(input_reg, deferred->entry());
   5526   __ SmiUntag(input_reg);  // Untag smi.
   5527   __ bind(deferred->exit());
   5528 }
   5529 
   5530 
   5531 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   5532   LOperand* input = instr->value();
   5533   ASSERT(input->IsRegister());
   5534   LOperand* temp = instr->temp();
   5535   ASSERT(temp == NULL || temp->IsRegister());
   5536   LOperand* result = instr->result();
   5537   ASSERT(result->IsDoubleRegister());
   5538 
   5539   Register input_reg = ToRegister(input);
   5540   bool deoptimize_on_minus_zero =
   5541       instr->hydrogen()->deoptimize_on_minus_zero();
   5542   Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
   5543 
   5544   HValue* value = instr->hydrogen()->value();
   5545   NumberUntagDMode mode = value->representation().IsSmi()
   5546       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
   5547 
   5548   if (CpuFeatures::IsSupported(SSE2)) {
   5549     CpuFeatureScope scope(masm(), SSE2);
   5550     XMMRegister result_reg = ToDoubleRegister(result);
   5551     EmitNumberUntagD(input_reg,
   5552                      temp_reg,
   5553                      result_reg,
   5554                      instr->hydrogen()->can_convert_undefined_to_nan(),
   5555                      deoptimize_on_minus_zero,
   5556                      instr->environment(),
   5557                      mode);
   5558   } else {
   5559     EmitNumberUntagDNoSSE2(input_reg,
   5560                            temp_reg,
   5561                            ToX87Register(instr->result()),
   5562                            instr->hydrogen()->can_convert_undefined_to_nan(),
   5563                            deoptimize_on_minus_zero,
   5564                            instr->environment(),
   5565                            mode);
   5566   }
   5567 }
   5568 
   5569 
   5570 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
   5571   LOperand* input = instr->value();
   5572   ASSERT(input->IsDoubleRegister());
   5573   LOperand* result = instr->result();
   5574   ASSERT(result->IsRegister());
   5575   CpuFeatureScope scope(masm(), SSE2);
   5576 
   5577   XMMRegister input_reg = ToDoubleRegister(input);
   5578   Register result_reg = ToRegister(result);
   5579 
   5580   __ cvttsd2si(result_reg, Operand(input_reg));
   5581 
   5582   if (instr->truncating()) {
   5583     // Performs a truncating conversion of a floating point number as used by
   5584     // the JS bitwise operations.
   5585     Label fast_case_succeeded;
   5586     __ cmp(result_reg, 0x80000000u);
   5587     __ j(not_equal, &fast_case_succeeded);
   5588     __ sub(esp, Immediate(kDoubleSize));
   5589     __ movdbl(MemOperand(esp, 0), input_reg);
   5590     DoubleToIStub stub(esp, result_reg, 0, true);
   5591     __ call(stub.GetCode(isolate()), RelocInfo::CODE_TARGET);
   5592     __ add(esp, Immediate(kDoubleSize));
   5593     __ bind(&fast_case_succeeded);
   5594   } else {
   5595     Label done;
   5596     __ cvtsi2sd(xmm0, Operand(result_reg));
   5597     __ ucomisd(xmm0, input_reg);
   5598     DeoptimizeIf(not_equal, instr->environment());
   5599     DeoptimizeIf(parity_even, instr->environment());  // NaN.
   5600     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   5601       // The integer converted back is equal to the original. We
   5602       // only have to test if we got -0 as an input.
   5603       __ test(result_reg, Operand(result_reg));
   5604       __ j(not_zero, &done, Label::kNear);
   5605       __ movmskpd(result_reg, input_reg);
   5606       // Bit 0 contains the sign of the double in input_reg.
   5607       // If input was positive, we are ok and return 0, otherwise
   5608       // deoptimize.
   5609       __ and_(result_reg, 1);
   5610       DeoptimizeIf(not_zero, instr->environment());
   5611     }
   5612     __ bind(&done);
   5613   }
   5614 }
   5615 
   5616 
   5617 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
   5618   LOperand* input = instr->value();
   5619   ASSERT(input->IsDoubleRegister());
   5620   LOperand* result = instr->result();
   5621   ASSERT(result->IsRegister());
   5622   CpuFeatureScope scope(masm(), SSE2);
   5623 
   5624   XMMRegister input_reg = ToDoubleRegister(input);
   5625   Register result_reg = ToRegister(result);
   5626 
   5627   Label done;
   5628   __ cvttsd2si(result_reg, Operand(input_reg));
   5629   __ cvtsi2sd(xmm0, Operand(result_reg));
   5630   __ ucomisd(xmm0, input_reg);
   5631   DeoptimizeIf(not_equal, instr->environment());
   5632   DeoptimizeIf(parity_even, instr->environment());  // NaN.
   5633 
   5634   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   5635     // The integer converted back is equal to the original. We
   5636     // only have to test if we got -0 as an input.
   5637     __ test(result_reg, Operand(result_reg));
   5638     __ j(not_zero, &done, Label::kNear);
   5639     __ movmskpd(result_reg, input_reg);
   5640     // Bit 0 contains the sign of the double in input_reg.
   5641     // If input was positive, we are ok and return 0, otherwise
   5642     // deoptimize.
   5643     __ and_(result_reg, 1);
   5644     DeoptimizeIf(not_zero, instr->environment());
   5645     __ bind(&done);
   5646   }
   5647   __ SmiTag(result_reg);
   5648   DeoptimizeIf(overflow, instr->environment());
   5649 }
   5650 
   5651 
   5652 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   5653   LOperand* input = instr->value();
   5654   __ test(ToOperand(input), Immediate(kSmiTagMask));
   5655   DeoptimizeIf(not_zero, instr->environment());
   5656 }
   5657 
   5658 
   5659 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
   5660   if (!instr->hydrogen()->value()->IsHeapObject()) {
   5661     LOperand* input = instr->value();
   5662     __ test(ToOperand(input), Immediate(kSmiTagMask));
   5663     DeoptimizeIf(zero, instr->environment());
   5664   }
   5665 }
   5666 
   5667 
   5668 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   5669   Register input = ToRegister(instr->value());
   5670   Register temp = ToRegister(instr->temp());
   5671 
   5672   __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
   5673 
   5674   if (instr->hydrogen()->is_interval_check()) {
   5675     InstanceType first;
   5676     InstanceType last;
   5677     instr->hydrogen()->GetCheckInterval(&first, &last);
   5678 
   5679     __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
   5680             static_cast<int8_t>(first));
   5681 
   5682     // If there is only one type in the interval check for equality.
   5683     if (first == last) {
   5684       DeoptimizeIf(not_equal, instr->environment());
   5685     } else {
   5686       DeoptimizeIf(below, instr->environment());
   5687       // Omit check for the last type.
   5688       if (last != LAST_TYPE) {
   5689         __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
   5690                 static_cast<int8_t>(last));
   5691         DeoptimizeIf(above, instr->environment());
   5692       }
   5693     }
   5694   } else {
   5695     uint8_t mask;
   5696     uint8_t tag;
   5697     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
   5698 
   5699     if (IsPowerOf2(mask)) {
   5700       ASSERT(tag == 0 || IsPowerOf2(tag));
   5701       __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
   5702       DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
   5703     } else {
   5704       __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
   5705       __ and_(temp, mask);
   5706       __ cmp(temp, tag);
   5707       DeoptimizeIf(not_equal, instr->environment());
   5708     }
   5709   }
   5710 }
   5711 
   5712 
   5713 void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
   5714   Handle<JSFunction> target = instr->hydrogen()->target();
   5715   if (instr->hydrogen()->target_in_new_space()) {
   5716     Register reg = ToRegister(instr->value());
   5717     Handle<Cell> cell = isolate()->factory()->NewCell(target);
   5718     __ cmp(reg, Operand::ForCell(cell));
   5719   } else {
   5720     Operand operand = ToOperand(instr->value());
   5721     __ cmp(operand, target);
   5722   }
   5723   DeoptimizeIf(not_equal, instr->environment());
   5724 }
   5725 
   5726 
   5727 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   5728   {
   5729     PushSafepointRegistersScope scope(this);
   5730     __ push(object);
   5731     __ xor_(esi, esi);
   5732     __ CallRuntimeSaveDoubles(Runtime::kMigrateInstance);
   5733     RecordSafepointWithRegisters(
   5734         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
   5735 
   5736     __ test(eax, Immediate(kSmiTagMask));
   5737   }
   5738   DeoptimizeIf(zero, instr->environment());
   5739 }
   5740 
   5741 
   5742 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
   5743   class DeferredCheckMaps: public LDeferredCode {
   5744    public:
   5745     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
   5746         : LDeferredCode(codegen), instr_(instr), object_(object) {
   5747       SetExit(check_maps());
   5748     }
   5749     virtual void Generate() {
   5750       codegen()->DoDeferredInstanceMigration(instr_, object_);
   5751     }
   5752     Label* check_maps() { return &check_maps_; }
   5753     virtual LInstruction* instr() { return instr_; }
   5754    private:
   5755     LCheckMaps* instr_;
   5756     Label check_maps_;
   5757     Register object_;
   5758   };
   5759 
   5760   if (instr->hydrogen()->CanOmitMapChecks()) return;
   5761 
   5762   LOperand* input = instr->value();
   5763   ASSERT(input->IsRegister());
   5764   Register reg = ToRegister(input);
   5765 
   5766   SmallMapList* map_set = instr->hydrogen()->map_set();
   5767 
   5768   DeferredCheckMaps* deferred = NULL;
   5769   if (instr->hydrogen()->has_migration_target()) {
   5770     deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
   5771     __ bind(deferred->check_maps());
   5772   }
   5773 
   5774   Label success;
   5775   for (int i = 0; i < map_set->length() - 1; i++) {
   5776     Handle<Map> map = map_set->at(i);
   5777     __ CompareMap(reg, map, &success);
   5778     __ j(equal, &success);
   5779   }
   5780 
   5781   Handle<Map> map = map_set->last();
   5782   __ CompareMap(reg, map, &success);
   5783   if (instr->hydrogen()->has_migration_target()) {
   5784     __ j(not_equal, deferred->entry());
   5785   } else {
   5786     DeoptimizeIf(not_equal, instr->environment());
   5787   }
   5788 
   5789   __ bind(&success);
   5790 }
   5791 
   5792 
   5793 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
   5794   CpuFeatureScope scope(masm(), SSE2);
   5795   XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
   5796   Register result_reg = ToRegister(instr->result());
   5797   __ ClampDoubleToUint8(value_reg, xmm0, result_reg);
   5798 }
   5799 
   5800 
   5801 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
   5802   ASSERT(instr->unclamped()->Equals(instr->result()));
   5803   Register value_reg = ToRegister(instr->result());
   5804   __ ClampUint8(value_reg);
   5805 }
   5806 
   5807 
   5808 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
   5809   CpuFeatureScope scope(masm(), SSE2);
   5810 
   5811   ASSERT(instr->unclamped()->Equals(instr->result()));
   5812   Register input_reg = ToRegister(instr->unclamped());
   5813   Label is_smi, done, heap_number;
   5814 
   5815   __ JumpIfSmi(input_reg, &is_smi);
   5816 
   5817   // Check for heap number
   5818   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
   5819          factory()->heap_number_map());
   5820   __ j(equal, &heap_number, Label::kNear);
   5821 
   5822   // Check for undefined. Undefined is converted to zero for clamping
   5823   // conversions.
   5824   __ cmp(input_reg, factory()->undefined_value());
   5825   DeoptimizeIf(not_equal, instr->environment());
   5826   __ mov(input_reg, 0);
   5827   __ jmp(&done, Label::kNear);
   5828 
   5829   // Heap number
   5830   __ bind(&heap_number);
   5831   __ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
   5832   __ ClampDoubleToUint8(xmm0, xmm1, input_reg);
   5833   __ jmp(&done, Label::kNear);
   5834 
   5835   // smi
   5836   __ bind(&is_smi);
   5837   __ SmiUntag(input_reg);
   5838   __ ClampUint8(input_reg);
   5839   __ bind(&done);
   5840 }
   5841 
   5842 
   5843 void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
   5844   Register input_reg = ToRegister(instr->unclamped());
   5845   Register result_reg = ToRegister(instr->result());
   5846   Register scratch = ToRegister(instr->scratch());
   5847   Register scratch2 = ToRegister(instr->scratch2());
   5848   Register scratch3 = ToRegister(instr->scratch3());
   5849   Label is_smi, done, heap_number, valid_exponent,
   5850       largest_value, zero_result, maybe_nan_or_infinity;
   5851 
   5852   __ JumpIfSmi(input_reg, &is_smi);
   5853 
   5854   // Check for heap number
   5855   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
   5856          factory()->heap_number_map());
   5857   __ j(equal, &heap_number, Label::kFar);
   5858 
   5859   // Check for undefined. Undefined is converted to zero for clamping
   5860   // conversions.
   5861   __ cmp(input_reg, factory()->undefined_value());
   5862   DeoptimizeIf(not_equal, instr->environment());
   5863   __ jmp(&zero_result);
   5864 
   5865   // Heap number
   5866   __ bind(&heap_number);
   5867 
   5868   // Surprisingly, all of the hand-crafted bit-manipulations below are much
   5869   // faster than the x86 FPU built-in instruction, especially since "banker's
   5870   // rounding" would be additionally very expensive
   5871 
   5872   // Get exponent word.
   5873   __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
   5874   __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
   5875 
   5876   // Test for negative values --> clamp to zero
   5877   __ test(scratch, scratch);
   5878   __ j(negative, &zero_result);
   5879 
   5880   // Get exponent alone in scratch2.
   5881   __ mov(scratch2, scratch);
   5882   __ and_(scratch2, HeapNumber::kExponentMask);
   5883   __ shr(scratch2, HeapNumber::kExponentShift);
   5884   __ j(zero, &zero_result);
   5885   __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
   5886   __ j(negative, &zero_result);
   5887 
   5888   const uint32_t non_int8_exponent = 7;
   5889   __ cmp(scratch2, Immediate(non_int8_exponent + 1));
   5890   // If the exponent is too big, check for special values.
   5891   __ j(greater, &maybe_nan_or_infinity, Label::kNear);
   5892 
   5893   __ bind(&valid_exponent);
   5894   // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
   5895   // < 7. The shift bias is the number of bits to shift the mantissa such that
   5896   // with an exponent of 7 such the that top-most one is in bit 30, allowing
   5897   // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
   5898   // 1).
   5899   int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
   5900   __ lea(result_reg, MemOperand(scratch2, shift_bias));
   5901   // Here result_reg (ecx) is the shift, scratch is the exponent word.  Get the
   5902   // top bits of the mantissa.
   5903   __ and_(scratch, HeapNumber::kMantissaMask);
   5904   // Put back the implicit 1 of the mantissa
   5905   __ or_(scratch, 1 << HeapNumber::kExponentShift);
   5906   // Shift up to round
   5907   __ shl_cl(scratch);
   5908   // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
   5909   // use the bit in the "ones" place and add it to the "halves" place, which has
   5910   // the effect of rounding to even.
   5911   __ mov(scratch2, scratch);
   5912   const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
   5913   const uint32_t one_bit_shift = one_half_bit_shift + 1;
   5914   __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
   5915   __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
   5916   Label no_round;
   5917   __ j(less, &no_round);
   5918   Label round_up;
   5919   __ mov(scratch2, Immediate(1 << one_half_bit_shift));
   5920   __ j(greater, &round_up);
   5921   __ test(scratch3, scratch3);
   5922   __ j(not_zero, &round_up);
   5923   __ mov(scratch2, scratch);
   5924   __ and_(scratch2, Immediate(1 << one_bit_shift));
   5925   __ shr(scratch2, 1);
   5926   __ bind(&round_up);
   5927   __ add(scratch, scratch2);
   5928   __ j(overflow, &largest_value);
   5929   __ bind(&no_round);
   5930   __ shr(scratch, 23);
   5931   __ mov(result_reg, scratch);
   5932   __ jmp(&done, Label::kNear);
   5933 
   5934   __ bind(&maybe_nan_or_infinity);
   5935   // Check for NaN/Infinity, all other values map to 255
   5936   __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
   5937   __ j(not_equal, &largest_value, Label::kNear);
   5938 
   5939   // Check for NaN, which differs from Infinity in that at least one mantissa
   5940   // bit is set.
   5941   __ and_(scratch, HeapNumber::kMantissaMask);
   5942   __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
   5943   __ j(not_zero, &zero_result);  // M!=0 --> NaN
   5944   // Infinity -> Fall through to map to 255.
   5945 
   5946   __ bind(&largest_value);
   5947   __ mov(result_reg, Immediate(255));
   5948   __ jmp(&done, Label::kNear);
   5949 
   5950   __ bind(&zero_result);
   5951   __ xor_(result_reg, result_reg);
   5952   __ jmp(&done);
   5953 
   5954   // smi
   5955   __ bind(&is_smi);
   5956   if (!input_reg.is(result_reg)) {
   5957     __ mov(result_reg, input_reg);
   5958   }
   5959   __ SmiUntag(result_reg);
   5960   __ ClampUint8(result_reg);
   5961   __ bind(&done);
   5962 }
   5963 
   5964 
   5965 void LCodeGen::DoAllocate(LAllocate* instr) {
   5966   class DeferredAllocate: public LDeferredCode {
   5967    public:
   5968     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
   5969         : LDeferredCode(codegen), instr_(instr) { }
   5970     virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
   5971     virtual LInstruction* instr() { return instr_; }
   5972    private:
   5973     LAllocate* instr_;
   5974   };
   5975 
   5976   DeferredAllocate* deferred =
   5977       new(zone()) DeferredAllocate(this, instr);
   5978 
   5979   Register result = ToRegister(instr->result());
   5980   Register temp = ToRegister(instr->temp());
   5981 
   5982   // Allocate memory for the object.
   5983   AllocationFlags flags = TAG_OBJECT;
   5984   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   5985     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   5986   }
   5987   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
   5988     ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
   5989     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   5990     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
   5991   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
   5992     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   5993     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
   5994   }
   5995 
   5996   if (instr->size()->IsConstantOperand()) {
   5997     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5998     __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
   5999   } else {
   6000     Register size = ToRegister(instr->size());
   6001     __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
   6002   }
   6003 
   6004   __ bind(deferred->exit());
   6005 
   6006   if (instr->hydrogen()->MustPrefillWithFiller()) {
   6007     if (instr->size()->IsConstantOperand()) {
   6008       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   6009       __ mov(temp, (size / kPointerSize) - 1);
   6010     } else {
   6011       temp = ToRegister(instr->size());
   6012       __ shr(temp, kPointerSizeLog2);
   6013       __ dec(temp);
   6014     }
   6015     Label loop;
   6016     __ bind(&loop);
   6017     __ mov(FieldOperand(result, temp, times_pointer_size, 0),
   6018         isolate()->factory()->one_pointer_filler_map());
   6019     __ dec(temp);
   6020     __ j(not_zero, &loop);
   6021   }
   6022 }
   6023 
   6024 
   6025 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
   6026   Register result = ToRegister(instr->result());
   6027 
   6028   // TODO(3095996): Get rid of this. For now, we need to make the
   6029   // result register contain a valid pointer because it is already
   6030   // contained in the register pointer map.
   6031   __ mov(result, Immediate(Smi::FromInt(0)));
   6032 
   6033   PushSafepointRegistersScope scope(this);
   6034   if (instr->size()->IsRegister()) {
   6035     Register size = ToRegister(instr->size());
   6036     ASSERT(!size.is(result));
   6037     __ SmiTag(ToRegister(instr->size()));
   6038     __ push(size);
   6039   } else {
   6040     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   6041     __ push(Immediate(Smi::FromInt(size)));
   6042   }
   6043 
   6044   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
   6045     ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
   6046     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   6047     CallRuntimeFromDeferred(
   6048         Runtime::kAllocateInOldPointerSpace, 1, instr, instr->context());
   6049   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
   6050     ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
   6051     CallRuntimeFromDeferred(
   6052         Runtime::kAllocateInOldDataSpace, 1, instr, instr->context());
   6053   } else {
   6054     CallRuntimeFromDeferred(
   6055         Runtime::kAllocateInNewSpace, 1, instr, instr->context());
   6056   }
   6057   __ StoreToSafepointRegisterSlot(result, eax);
   6058 }
   6059 
   6060 
   6061 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
   6062   ASSERT(ToRegister(instr->value()).is(eax));
   6063   __ push(eax);
   6064   CallRuntime(Runtime::kToFastProperties, 1, instr);
   6065 }
   6066 
   6067 
   6068 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
   6069   ASSERT(ToRegister(instr->context()).is(esi));
   6070   Label materialized;
   6071   // Registers will be used as follows:
   6072   // ecx = literals array.
   6073   // ebx = regexp literal.
   6074   // eax = regexp literal clone.
   6075   // esi = context.
   6076   int literal_offset =
   6077       FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
   6078   __ LoadHeapObject(ecx, instr->hydrogen()->literals());
   6079   __ mov(ebx, FieldOperand(ecx, literal_offset));
   6080   __ cmp(ebx, factory()->undefined_value());
   6081   __ j(not_equal, &materialized, Label::kNear);
   6082 
   6083   // Create regexp literal using runtime function
   6084   // Result will be in eax.
   6085   __ push(ecx);
   6086   __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
   6087   __ push(Immediate(instr->hydrogen()->pattern()));
   6088   __ push(Immediate(instr->hydrogen()->flags()));
   6089   CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
   6090   __ mov(ebx, eax);
   6091 
   6092   __ bind(&materialized);
   6093   int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
   6094   Label allocated, runtime_allocate;
   6095   __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
   6096   __ jmp(&allocated);
   6097 
   6098   __ bind(&runtime_allocate);
   6099   __ push(ebx);
   6100   __ push(Immediate(Smi::FromInt(size)));
   6101   CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
   6102   __ pop(ebx);
   6103 
   6104   __ bind(&allocated);
   6105   // Copy the content into the newly allocated memory.
   6106   // (Unroll copy loop once for better throughput).
   6107   for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
   6108     __ mov(edx, FieldOperand(ebx, i));
   6109     __ mov(ecx, FieldOperand(ebx, i + kPointerSize));
   6110     __ mov(FieldOperand(eax, i), edx);
   6111     __ mov(FieldOperand(eax, i + kPointerSize), ecx);
   6112   }
   6113   if ((size % (2 * kPointerSize)) != 0) {
   6114     __ mov(edx, FieldOperand(ebx, size - kPointerSize));
   6115     __ mov(FieldOperand(eax, size - kPointerSize), edx);
   6116   }
   6117 }
   6118 
   6119 
   6120 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
   6121   ASSERT(ToRegister(instr->context()).is(esi));
   6122   // Use the fast case closure allocation code that allocates in new
   6123   // space for nested functions that don't need literals cloning.
   6124   bool pretenure = instr->hydrogen()->pretenure();
   6125   if (!pretenure && instr->hydrogen()->has_no_literals()) {
   6126     FastNewClosureStub stub(instr->hydrogen()->language_mode(),
   6127                             instr->hydrogen()->is_generator());
   6128     __ push(Immediate(instr->hydrogen()->shared_info()));
   6129     CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   6130   } else {
   6131     __ push(esi);
   6132     __ push(Immediate(instr->hydrogen()->shared_info()));
   6133     __ push(Immediate(pretenure ? factory()->true_value()
   6134                                 : factory()->false_value()));
   6135     CallRuntime(Runtime::kNewClosure, 3, instr);
   6136   }
   6137 }
   6138 
   6139 
   6140 void LCodeGen::DoTypeof(LTypeof* instr) {
   6141   LOperand* input = instr->value();
   6142   EmitPushTaggedOperand(input);
   6143   CallRuntime(Runtime::kTypeof, 1, instr);
   6144 }
   6145 
   6146 
   6147 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   6148   Register input = ToRegister(instr->value());
   6149 
   6150   Condition final_branch_condition =
   6151       EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
   6152           input, instr->type_literal());
   6153   if (final_branch_condition != no_condition) {
   6154     EmitBranch(instr, final_branch_condition);
   6155   }
   6156 }
   6157 
   6158 
   6159 Condition LCodeGen::EmitTypeofIs(Label* true_label,
   6160                                  Label* false_label,
   6161                                  Register input,
   6162                                  Handle<String> type_name) {
   6163   Condition final_branch_condition = no_condition;
   6164   if (type_name->Equals(heap()->number_string())) {
   6165     __ JumpIfSmi(input, true_label);
   6166     __ cmp(FieldOperand(input, HeapObject::kMapOffset),
   6167            factory()->heap_number_map());
   6168     final_branch_condition = equal;
   6169 
   6170   } else if (type_name->Equals(heap()->string_string())) {
   6171     __ JumpIfSmi(input, false_label);
   6172     __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
   6173     __ j(above_equal, false_label);
   6174     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
   6175               1 << Map::kIsUndetectable);
   6176     final_branch_condition = zero;
   6177 
   6178   } else if (type_name->Equals(heap()->symbol_string())) {
   6179     __ JumpIfSmi(input, false_label);
   6180     __ CmpObjectType(input, SYMBOL_TYPE, input);
   6181     final_branch_condition = equal;
   6182 
   6183   } else if (type_name->Equals(heap()->boolean_string())) {
   6184     __ cmp(input, factory()->true_value());
   6185     __ j(equal, true_label);
   6186     __ cmp(input, factory()->false_value());
   6187     final_branch_condition = equal;
   6188 
   6189   } else if (FLAG_harmony_typeof && type_name->Equals(heap()->null_string())) {
   6190     __ cmp(input, factory()->null_value());
   6191     final_branch_condition = equal;
   6192 
   6193   } else if (type_name->Equals(heap()->undefined_string())) {
   6194     __ cmp(input, factory()->undefined_value());
   6195     __ j(equal, true_label);
   6196     __ JumpIfSmi(input, false_label);
   6197     // Check for undetectable objects => true.
   6198     __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
   6199     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
   6200               1 << Map::kIsUndetectable);
   6201     final_branch_condition = not_zero;
   6202 
   6203   } else if (type_name->Equals(heap()->function_string())) {
   6204     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
   6205     __ JumpIfSmi(input, false_label);
   6206     __ CmpObjectType(input, JS_FUNCTION_TYPE, input);
   6207     __ j(equal, true_label);
   6208     __ CmpInstanceType(input, JS_FUNCTION_PROXY_TYPE);
   6209     final_branch_condition = equal;
   6210 
   6211   } else if (type_name->Equals(heap()->object_string())) {
   6212     __ JumpIfSmi(input, false_label);
   6213     if (!FLAG_harmony_typeof) {
   6214       __ cmp(input, factory()->null_value());
   6215       __ j(equal, true_label);
   6216     }
   6217     __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
   6218     __ j(below, false_label);
   6219     __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
   6220     __ j(above, false_label);
   6221     // Check for undetectable objects => false.
   6222     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
   6223               1 << Map::kIsUndetectable);
   6224     final_branch_condition = zero;
   6225 
   6226   } else {
   6227     __ jmp(false_label);
   6228   }
   6229   return final_branch_condition;
   6230 }
   6231 
   6232 
   6233 void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
   6234   Register temp = ToRegister(instr->temp());
   6235 
   6236   EmitIsConstructCall(temp);
   6237   EmitBranch(instr, equal);
   6238 }
   6239 
   6240 
   6241 void LCodeGen::EmitIsConstructCall(Register temp) {
   6242   // Get the frame pointer for the calling frame.
   6243   __ mov(temp, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   6244 
   6245   // Skip the arguments adaptor frame if it exists.
   6246   Label check_frame_marker;
   6247   __ cmp(Operand(temp, StandardFrameConstants::kContextOffset),
   6248          Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   6249   __ j(not_equal, &check_frame_marker, Label::kNear);
   6250   __ mov(temp, Operand(temp, StandardFrameConstants::kCallerFPOffset));
   6251 
   6252   // Check the marker in the calling frame.
   6253   __ bind(&check_frame_marker);
   6254   __ cmp(Operand(temp, StandardFrameConstants::kMarkerOffset),
   6255          Immediate(Smi::FromInt(StackFrame::CONSTRUCT)));
   6256 }
   6257 
   6258 
   6259 void LCodeGen::EnsureSpaceForLazyDeopt() {
   6260   if (!info()->IsStub()) {
   6261     // Ensure that we have enough space after the previous lazy-bailout
   6262     // instruction for patching the code here.
   6263     int current_pc = masm()->pc_offset();
   6264     int patch_size = Deoptimizer::patch_size();
   6265     if (current_pc < last_lazy_deopt_pc_ + patch_size) {
   6266       int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
   6267       __ Nop(padding_size);
   6268     }
   6269   }
   6270   last_lazy_deopt_pc_ = masm()->pc_offset();
   6271 }
   6272 
   6273 
   6274 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   6275   EnsureSpaceForLazyDeopt();
   6276   ASSERT(instr->HasEnvironment());
   6277   LEnvironment* env = instr->environment();
   6278   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   6279   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   6280 }
   6281 
   6282 
   6283 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
   6284   Deoptimizer::BailoutType type = instr->hydrogen()->type();
   6285   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
   6286   // needed return address), even though the implementation of LAZY and EAGER is
   6287   // now identical. When LAZY is eventually completely folded into EAGER, remove
   6288   // the special case below.
   6289   if (info()->IsStub() && type == Deoptimizer::EAGER) {
   6290     type = Deoptimizer::LAZY;
   6291   }
   6292   Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
   6293   DeoptimizeIf(no_condition, instr->environment(), type);
   6294 }
   6295 
   6296 
   6297 void LCodeGen::DoDummyUse(LDummyUse* instr) {
   6298   // Nothing to see here, move on!
   6299 }
   6300 
   6301 
   6302 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   6303   PushSafepointRegistersScope scope(this);
   6304   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   6305   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   6306   RecordSafepointWithLazyDeopt(
   6307       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   6308   ASSERT(instr->HasEnvironment());
   6309   LEnvironment* env = instr->environment();
   6310   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   6311 }
   6312 
   6313 
   6314 void LCodeGen::DoStackCheck(LStackCheck* instr) {
   6315   class DeferredStackCheck: public LDeferredCode {
   6316    public:
   6317     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
   6318         : LDeferredCode(codegen), instr_(instr) { }
   6319     virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
   6320     virtual LInstruction* instr() { return instr_; }
   6321    private:
   6322     LStackCheck* instr_;
   6323   };
   6324 
   6325   ASSERT(instr->HasEnvironment());
   6326   LEnvironment* env = instr->environment();
   6327   // There is no LLazyBailout instruction for stack-checks. We have to
   6328   // prepare for lazy deoptimization explicitly here.
   6329   if (instr->hydrogen()->is_function_entry()) {
   6330     // Perform stack overflow check.
   6331     Label done;
   6332     ExternalReference stack_limit =
   6333         ExternalReference::address_of_stack_limit(isolate());
   6334     __ cmp(esp, Operand::StaticVariable(stack_limit));
   6335     __ j(above_equal, &done, Label::kNear);
   6336 
   6337     ASSERT(instr->context()->IsRegister());
   6338     ASSERT(ToRegister(instr->context()).is(esi));
   6339     StackCheckStub stub;
   6340     CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
   6341     EnsureSpaceForLazyDeopt();
   6342     __ bind(&done);
   6343     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   6344     safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   6345   } else {
   6346     ASSERT(instr->hydrogen()->is_backwards_branch());
   6347     // Perform stack overflow check if this goto needs it before jumping.
   6348     DeferredStackCheck* deferred_stack_check =
   6349         new(zone()) DeferredStackCheck(this, instr);
   6350     ExternalReference stack_limit =
   6351         ExternalReference::address_of_stack_limit(isolate());
   6352     __ cmp(esp, Operand::StaticVariable(stack_limit));
   6353     __ j(below, deferred_stack_check->entry());
   6354     EnsureSpaceForLazyDeopt();
   6355     __ bind(instr->done_label());
   6356     deferred_stack_check->SetExit(instr->done_label());
   6357     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   6358     // Don't record a deoptimization index for the safepoint here.
   6359     // This will be done explicitly when emitting call and the safepoint in
   6360     // the deferred code.
   6361   }
   6362 }
   6363 
   6364 
   6365 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
   6366   // This is a pseudo-instruction that ensures that the environment here is
   6367   // properly registered for deoptimization and records the assembler's PC
   6368   // offset.
   6369   LEnvironment* environment = instr->environment();
   6370 
   6371   // If the environment were already registered, we would have no way of
   6372   // backpatching it with the spill slot operands.
   6373   ASSERT(!environment->HasBeenRegistered());
   6374   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   6375 
   6376   // Normally we record the first unknown OSR value as the entrypoint to the OSR
   6377   // code, but if there were none, record the entrypoint here.
   6378   if (osr_pc_offset_ == -1) osr_pc_offset_ = masm()->pc_offset();
   6379 }
   6380 
   6381 
   6382 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   6383   __ cmp(eax, isolate()->factory()->undefined_value());
   6384   DeoptimizeIf(equal, instr->environment());
   6385 
   6386   __ cmp(eax, isolate()->factory()->null_value());
   6387   DeoptimizeIf(equal, instr->environment());
   6388 
   6389   __ test(eax, Immediate(kSmiTagMask));
   6390   DeoptimizeIf(zero, instr->environment());
   6391 
   6392   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
   6393   __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
   6394   DeoptimizeIf(below_equal, instr->environment());
   6395 
   6396   Label use_cache, call_runtime;
   6397   __ CheckEnumCache(&call_runtime);
   6398 
   6399   __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
   6400   __ jmp(&use_cache, Label::kNear);
   6401 
   6402   // Get the set of properties to enumerate.
   6403   __ bind(&call_runtime);
   6404   __ push(eax);
   6405   CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
   6406 
   6407   __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
   6408          isolate()->factory()->meta_map());
   6409   DeoptimizeIf(not_equal, instr->environment());
   6410   __ bind(&use_cache);
   6411 }
   6412 
   6413 
   6414 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
   6415   Register map = ToRegister(instr->map());
   6416   Register result = ToRegister(instr->result());
   6417   Label load_cache, done;
   6418   __ EnumLength(result, map);
   6419   __ cmp(result, Immediate(Smi::FromInt(0)));
   6420   __ j(not_equal, &load_cache);
   6421   __ mov(result, isolate()->factory()->empty_fixed_array());
   6422   __ jmp(&done);
   6423 
   6424   __ bind(&load_cache);
   6425   __ LoadInstanceDescriptors(map, result);
   6426   __ mov(result,
   6427          FieldOperand(result, DescriptorArray::kEnumCacheOffset));
   6428   __ mov(result,
   6429          FieldOperand(result, FixedArray::SizeFor(instr->idx())));
   6430   __ bind(&done);
   6431   __ test(result, result);
   6432   DeoptimizeIf(equal, instr->environment());
   6433 }
   6434 
   6435 
   6436 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
   6437   Register object = ToRegister(instr->value());
   6438   __ cmp(ToRegister(instr->map()),
   6439          FieldOperand(object, HeapObject::kMapOffset));
   6440   DeoptimizeIf(not_equal, instr->environment());
   6441 }
   6442 
   6443 
   6444 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
   6445   Register object = ToRegister(instr->object());
   6446   Register index = ToRegister(instr->index());
   6447 
   6448   Label out_of_object, done;
   6449   __ cmp(index, Immediate(0));
   6450   __ j(less, &out_of_object);
   6451   __ mov(object, FieldOperand(object,
   6452                               index,
   6453                               times_half_pointer_size,
   6454                               JSObject::kHeaderSize));
   6455   __ jmp(&done, Label::kNear);
   6456 
   6457   __ bind(&out_of_object);
   6458   __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
   6459   __ neg(index);
   6460   // Index is now equal to out of object property index plus 1.
   6461   __ mov(object, FieldOperand(object,
   6462                               index,
   6463                               times_half_pointer_size,
   6464                               FixedArray::kHeaderSize - kPointerSize));
   6465   __ bind(&done);
   6466 }
   6467 
   6468 
   6469 #undef __
   6470 
   6471 } }  // namespace v8::internal
   6472 
   6473 #endif  // V8_TARGET_ARCH_IA32
   6474