Home | History | Annotate | Download | only in ia32
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #if V8_TARGET_ARCH_IA32
      6 
      7 #include "src/crankshaft/ia32/lithium-codegen-ia32.h"
      8 
      9 #include "src/base/bits.h"
     10 #include "src/code-factory.h"
     11 #include "src/code-stubs.h"
     12 #include "src/codegen.h"
     13 #include "src/crankshaft/hydrogen-osr.h"
     14 #include "src/deoptimizer.h"
     15 #include "src/ia32/frames-ia32.h"
     16 #include "src/ic/ic.h"
     17 #include "src/ic/stub-cache.h"
     18 #include "src/profiler/cpu-profiler.h"
     19 
     20 namespace v8 {
     21 namespace internal {
     22 
     23 // When invoking builtins, we need to record the safepoint in the middle of
     24 // the invoke instruction sequence generated by the macro assembler.
     25 class SafepointGenerator final : public CallWrapper {
     26  public:
     27   SafepointGenerator(LCodeGen* codegen,
     28                      LPointerMap* pointers,
     29                      Safepoint::DeoptMode mode)
     30       : codegen_(codegen),
     31         pointers_(pointers),
     32         deopt_mode_(mode) {}
     33   virtual ~SafepointGenerator() {}
     34 
     35   void BeforeCall(int call_size) const override {}
     36 
     37   void AfterCall() const override {
     38     codegen_->RecordSafepoint(pointers_, deopt_mode_);
     39   }
     40 
     41  private:
     42   LCodeGen* codegen_;
     43   LPointerMap* pointers_;
     44   Safepoint::DeoptMode deopt_mode_;
     45 };
     46 
     47 
     48 #define __ masm()->
     49 
     50 bool LCodeGen::GenerateCode() {
     51   LPhase phase("Z_Code generation", chunk());
     52   DCHECK(is_unused());
     53   status_ = GENERATING;
     54 
     55   // Open a frame scope to indicate that there is a frame on the stack.  The
     56   // MANUAL indicates that the scope shouldn't actually generate code to set up
     57   // the frame (that is done in GeneratePrologue).
     58   FrameScope frame_scope(masm_, StackFrame::MANUAL);
     59 
     60   support_aligned_spilled_doubles_ = info()->IsOptimizing();
     61 
     62   dynamic_frame_alignment_ = info()->IsOptimizing() &&
     63       ((chunk()->num_double_slots() > 2 &&
     64         !chunk()->graph()->is_recursive()) ||
     65        !info()->osr_ast_id().IsNone());
     66 
     67   return GeneratePrologue() &&
     68       GenerateBody() &&
     69       GenerateDeferredCode() &&
     70       GenerateJumpTable() &&
     71       GenerateSafepointTable();
     72 }
     73 
     74 
     75 void LCodeGen::FinishCode(Handle<Code> code) {
     76   DCHECK(is_done());
     77   code->set_stack_slots(GetStackSlotCount());
     78   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
     79   PopulateDeoptimizationData(code);
     80   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
     81     Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(code);
     82   }
     83 }
     84 
     85 
     86 #ifdef _MSC_VER
     87 void LCodeGen::MakeSureStackPagesMapped(int offset) {
     88   const int kPageSize = 4 * KB;
     89   for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
     90     __ mov(Operand(esp, offset), eax);
     91   }
     92 }
     93 #endif
     94 
     95 
     96 void LCodeGen::SaveCallerDoubles() {
     97   DCHECK(info()->saves_caller_doubles());
     98   DCHECK(NeedsEagerFrame());
     99   Comment(";;; Save clobbered callee double registers");
    100   int count = 0;
    101   BitVector* doubles = chunk()->allocated_double_registers();
    102   BitVector::Iterator save_iterator(doubles);
    103   while (!save_iterator.Done()) {
    104     __ movsd(MemOperand(esp, count * kDoubleSize),
    105              XMMRegister::from_code(save_iterator.Current()));
    106     save_iterator.Advance();
    107     count++;
    108   }
    109 }
    110 
    111 
    112 void LCodeGen::RestoreCallerDoubles() {
    113   DCHECK(info()->saves_caller_doubles());
    114   DCHECK(NeedsEagerFrame());
    115   Comment(";;; Restore clobbered callee double registers");
    116   BitVector* doubles = chunk()->allocated_double_registers();
    117   BitVector::Iterator save_iterator(doubles);
    118   int count = 0;
    119   while (!save_iterator.Done()) {
    120     __ movsd(XMMRegister::from_code(save_iterator.Current()),
    121              MemOperand(esp, count * kDoubleSize));
    122     save_iterator.Advance();
    123     count++;
    124   }
    125 }
    126 
    127 
    128 bool LCodeGen::GeneratePrologue() {
    129   DCHECK(is_generating());
    130 
    131   if (info()->IsOptimizing()) {
    132     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
    133 
    134 #ifdef DEBUG
    135     if (strlen(FLAG_stop_at) > 0 &&
    136         info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
    137       __ int3();
    138     }
    139 #endif
    140 
    141     if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
    142       // Move state of dynamic frame alignment into edx.
    143       __ Move(edx, Immediate(kNoAlignmentPadding));
    144 
    145       Label do_not_pad, align_loop;
    146       STATIC_ASSERT(kDoubleSize == 2 * kPointerSize);
    147       // Align esp + 4 to a multiple of 2 * kPointerSize.
    148       __ test(esp, Immediate(kPointerSize));
    149       __ j(not_zero, &do_not_pad, Label::kNear);
    150       __ push(Immediate(0));
    151       __ mov(ebx, esp);
    152       __ mov(edx, Immediate(kAlignmentPaddingPushed));
    153       // Copy arguments, receiver, and return address.
    154       __ mov(ecx, Immediate(scope()->num_parameters() + 2));
    155 
    156       __ bind(&align_loop);
    157       __ mov(eax, Operand(ebx, 1 * kPointerSize));
    158       __ mov(Operand(ebx, 0), eax);
    159       __ add(Operand(ebx), Immediate(kPointerSize));
    160       __ dec(ecx);
    161       __ j(not_zero, &align_loop, Label::kNear);
    162       __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
    163       __ bind(&do_not_pad);
    164     }
    165   }
    166 
    167   info()->set_prologue_offset(masm_->pc_offset());
    168   if (NeedsEagerFrame()) {
    169     DCHECK(!frame_is_built_);
    170     frame_is_built_ = true;
    171     if (info()->IsStub()) {
    172       __ StubPrologue();
    173     } else {
    174       __ Prologue(info()->GeneratePreagedPrologue());
    175     }
    176   }
    177 
    178   if (info()->IsOptimizing() &&
    179       dynamic_frame_alignment_ &&
    180       FLAG_debug_code) {
    181     __ test(esp, Immediate(kPointerSize));
    182     __ Assert(zero, kFrameIsExpectedToBeAligned);
    183   }
    184 
    185   // Reserve space for the stack slots needed by the code.
    186   int slots = GetStackSlotCount();
    187   DCHECK(slots != 0 || !info()->IsOptimizing());
    188   if (slots > 0) {
    189     if (slots == 1) {
    190       if (dynamic_frame_alignment_) {
    191         __ push(edx);
    192       } else {
    193         __ push(Immediate(kNoAlignmentPadding));
    194       }
    195     } else {
    196       if (FLAG_debug_code) {
    197         __ sub(Operand(esp), Immediate(slots * kPointerSize));
    198 #ifdef _MSC_VER
    199         MakeSureStackPagesMapped(slots * kPointerSize);
    200 #endif
    201         __ push(eax);
    202         __ mov(Operand(eax), Immediate(slots));
    203         Label loop;
    204         __ bind(&loop);
    205         __ mov(MemOperand(esp, eax, times_4, 0),
    206                Immediate(kSlotsZapValue));
    207         __ dec(eax);
    208         __ j(not_zero, &loop);
    209         __ pop(eax);
    210       } else {
    211         __ sub(Operand(esp), Immediate(slots * kPointerSize));
    212 #ifdef _MSC_VER
    213         MakeSureStackPagesMapped(slots * kPointerSize);
    214 #endif
    215       }
    216 
    217       if (support_aligned_spilled_doubles_) {
    218         Comment(";;; Store dynamic frame alignment tag for spilled doubles");
    219         // Store dynamic frame alignment state in the first local.
    220         int offset = JavaScriptFrameConstants::kDynamicAlignmentStateOffset;
    221         if (dynamic_frame_alignment_) {
    222           __ mov(Operand(ebp, offset), edx);
    223         } else {
    224           __ mov(Operand(ebp, offset), Immediate(kNoAlignmentPadding));
    225         }
    226       }
    227     }
    228 
    229     if (info()->saves_caller_doubles()) SaveCallerDoubles();
    230   }
    231   return !is_aborted();
    232 }
    233 
    234 
    235 void LCodeGen::DoPrologue(LPrologue* instr) {
    236   Comment(";;; Prologue begin");
    237 
    238   // Possibly allocate a local context.
    239   if (info_->num_heap_slots() > 0) {
    240     Comment(";;; Allocate local context");
    241     bool need_write_barrier = true;
    242     // Argument to NewContext is the function, which is still in edi.
    243     int slots = info_->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
    244     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
    245     if (info()->scope()->is_script_scope()) {
    246       __ push(edi);
    247       __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
    248       __ CallRuntime(Runtime::kNewScriptContext);
    249       deopt_mode = Safepoint::kLazyDeopt;
    250     } else if (slots <= FastNewContextStub::kMaximumSlots) {
    251       FastNewContextStub stub(isolate(), slots);
    252       __ CallStub(&stub);
    253       // Result of FastNewContextStub is always in new space.
    254       need_write_barrier = false;
    255     } else {
    256       __ push(edi);
    257       __ CallRuntime(Runtime::kNewFunctionContext);
    258     }
    259     RecordSafepoint(deopt_mode);
    260 
    261     // Context is returned in eax.  It replaces the context passed to us.
    262     // It's saved in the stack and kept live in esi.
    263     __ mov(esi, eax);
    264     __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), eax);
    265 
    266     // Copy parameters into context if necessary.
    267     int num_parameters = scope()->num_parameters();
    268     int first_parameter = scope()->has_this_declaration() ? -1 : 0;
    269     for (int i = first_parameter; i < num_parameters; i++) {
    270       Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
    271       if (var->IsContextSlot()) {
    272         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
    273             (num_parameters - 1 - i) * kPointerSize;
    274         // Load parameter from stack.
    275         __ mov(eax, Operand(ebp, parameter_offset));
    276         // Store it in the context.
    277         int context_offset = Context::SlotOffset(var->index());
    278         __ mov(Operand(esi, context_offset), eax);
    279         // Update the write barrier. This clobbers eax and ebx.
    280         if (need_write_barrier) {
    281           __ RecordWriteContextSlot(esi,
    282                                     context_offset,
    283                                     eax,
    284                                     ebx,
    285                                     kDontSaveFPRegs);
    286         } else if (FLAG_debug_code) {
    287           Label done;
    288           __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
    289           __ Abort(kExpectedNewSpaceObject);
    290           __ bind(&done);
    291         }
    292       }
    293     }
    294     Comment(";;; End allocate local context");
    295   }
    296 
    297   Comment(";;; Prologue end");
    298 }
    299 
    300 
    301 void LCodeGen::GenerateOsrPrologue() {
    302   // Generate the OSR entry prologue at the first unknown OSR value, or if there
    303   // are none, at the OSR entrypoint instruction.
    304   if (osr_pc_offset_ >= 0) return;
    305 
    306   osr_pc_offset_ = masm()->pc_offset();
    307 
    308     // Move state of dynamic frame alignment into edx.
    309   __ Move(edx, Immediate(kNoAlignmentPadding));
    310 
    311   if (support_aligned_spilled_doubles_ && dynamic_frame_alignment_) {
    312     Label do_not_pad, align_loop;
    313     // Align ebp + 4 to a multiple of 2 * kPointerSize.
    314     __ test(ebp, Immediate(kPointerSize));
    315     __ j(zero, &do_not_pad, Label::kNear);
    316     __ push(Immediate(0));
    317     __ mov(ebx, esp);
    318     __ mov(edx, Immediate(kAlignmentPaddingPushed));
    319 
    320     // Move all parts of the frame over one word. The frame consists of:
    321     // unoptimized frame slots, alignment state, context, frame pointer, return
    322     // address, receiver, and the arguments.
    323     __ mov(ecx, Immediate(scope()->num_parameters() +
    324            5 + graph()->osr()->UnoptimizedFrameSlots()));
    325 
    326     __ bind(&align_loop);
    327     __ mov(eax, Operand(ebx, 1 * kPointerSize));
    328     __ mov(Operand(ebx, 0), eax);
    329     __ add(Operand(ebx), Immediate(kPointerSize));
    330     __ dec(ecx);
    331     __ j(not_zero, &align_loop, Label::kNear);
    332     __ mov(Operand(ebx, 0), Immediate(kAlignmentZapValue));
    333     __ sub(Operand(ebp), Immediate(kPointerSize));
    334     __ bind(&do_not_pad);
    335   }
    336 
    337   // Save the first local, which is overwritten by the alignment state.
    338   Operand alignment_loc = MemOperand(ebp, -3 * kPointerSize);
    339   __ push(alignment_loc);
    340 
    341   // Set the dynamic frame alignment state.
    342   __ mov(alignment_loc, edx);
    343 
    344   // Adjust the frame size, subsuming the unoptimized frame into the
    345   // optimized frame.
    346   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
    347   DCHECK(slots >= 1);
    348   __ sub(esp, Immediate((slots - 1) * kPointerSize));
    349 }
    350 
    351 
    352 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
    353   if (instr->IsCall()) {
    354     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
    355   }
    356   if (!instr->IsLazyBailout() && !instr->IsGap()) {
    357     safepoints_.BumpLastLazySafepointIndex();
    358   }
    359 }
    360 
    361 
    362 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) { }
    363 
    364 
    365 bool LCodeGen::GenerateJumpTable() {
    366   if (!jump_table_.length()) return !is_aborted();
    367 
    368   Label needs_frame;
    369   Comment(";;; -------------------- Jump table --------------------");
    370 
    371   for (int i = 0; i < jump_table_.length(); i++) {
    372     Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
    373     __ bind(&table_entry->label);
    374     Address entry = table_entry->address;
    375     DeoptComment(table_entry->deopt_info);
    376     if (table_entry->needs_frame) {
    377       DCHECK(!info()->saves_caller_doubles());
    378       __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
    379       __ call(&needs_frame);
    380     } else {
    381       if (info()->saves_caller_doubles()) RestoreCallerDoubles();
    382       __ call(entry, RelocInfo::RUNTIME_ENTRY);
    383     }
    384     info()->LogDeoptCallPosition(masm()->pc_offset(),
    385                                  table_entry->deopt_info.inlining_id);
    386   }
    387   if (needs_frame.is_linked()) {
    388     __ bind(&needs_frame);
    389     /* stack layout
    390        4: entry address
    391        3: return address  <-- esp
    392        2: garbage
    393        1: garbage
    394        0: garbage
    395     */
    396     __ sub(esp, Immediate(kPointerSize));    // Reserve space for stub marker.
    397     __ push(MemOperand(esp, kPointerSize));  // Copy return address.
    398     __ push(MemOperand(esp, 3 * kPointerSize));  // Copy entry address.
    399 
    400     /* stack layout
    401        4: entry address
    402        3: return address
    403        2: garbage
    404        1: return address
    405        0: entry address  <-- esp
    406     */
    407     __ mov(MemOperand(esp, 4 * kPointerSize), ebp);  // Save ebp.
    408     // Copy context.
    409     __ mov(ebp, MemOperand(ebp, StandardFrameConstants::kContextOffset));
    410     __ mov(MemOperand(esp, 3 * kPointerSize), ebp);
    411     // Fill ebp with the right stack frame address.
    412     __ lea(ebp, MemOperand(esp, 4 * kPointerSize));
    413     // This variant of deopt can only be used with stubs. Since we don't
    414     // have a function pointer to install in the stack frame that we're
    415     // building, install a special marker there instead.
    416     DCHECK(info()->IsStub());
    417     __ mov(MemOperand(esp, 2 * kPointerSize),
    418            Immediate(Smi::FromInt(StackFrame::STUB)));
    419 
    420     /* stack layout
    421        4: old ebp
    422        3: context pointer
    423        2: stub marker
    424        1: return address
    425        0: entry address  <-- esp
    426     */
    427     __ ret(0);  // Call the continuation without clobbering registers.
    428   }
    429   return !is_aborted();
    430 }
    431 
    432 
    433 bool LCodeGen::GenerateDeferredCode() {
    434   DCHECK(is_generating());
    435   if (deferred_.length() > 0) {
    436     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
    437       LDeferredCode* code = deferred_[i];
    438 
    439       HValue* value =
    440           instructions_->at(code->instruction_index())->hydrogen_value();
    441       RecordAndWritePosition(
    442           chunk()->graph()->SourcePositionToScriptPosition(value->position()));
    443 
    444       Comment(";;; <@%d,#%d> "
    445               "-------------------- Deferred %s --------------------",
    446               code->instruction_index(),
    447               code->instr()->hydrogen_value()->id(),
    448               code->instr()->Mnemonic());
    449       __ bind(code->entry());
    450       if (NeedsDeferredFrame()) {
    451         Comment(";;; Build frame");
    452         DCHECK(!frame_is_built_);
    453         DCHECK(info()->IsStub());
    454         frame_is_built_ = true;
    455         // Build the frame in such a way that esi isn't trashed.
    456         __ push(ebp);  // Caller's frame pointer.
    457         __ push(Operand(ebp, StandardFrameConstants::kContextOffset));
    458         __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
    459         __ lea(ebp, Operand(esp, 2 * kPointerSize));
    460         Comment(";;; Deferred code");
    461       }
    462       code->Generate();
    463       if (NeedsDeferredFrame()) {
    464         __ bind(code->done());
    465         Comment(";;; Destroy frame");
    466         DCHECK(frame_is_built_);
    467         frame_is_built_ = false;
    468         __ mov(esp, ebp);
    469         __ pop(ebp);
    470       }
    471       __ jmp(code->exit());
    472     }
    473   }
    474 
    475   // Deferred code is the last part of the instruction sequence. Mark
    476   // the generated code as done unless we bailed out.
    477   if (!is_aborted()) status_ = DONE;
    478   return !is_aborted();
    479 }
    480 
    481 
    482 bool LCodeGen::GenerateSafepointTable() {
    483   DCHECK(is_done());
    484   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
    485     // For lazy deoptimization we need space to patch a call after every call.
    486     // Ensure there is always space for such patching, even if the code ends
    487     // in a call.
    488     int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
    489     while (masm()->pc_offset() < target_offset) {
    490       masm()->nop();
    491     }
    492   }
    493   safepoints_.Emit(masm(), GetStackSlotCount());
    494   return !is_aborted();
    495 }
    496 
    497 
    498 Register LCodeGen::ToRegister(int code) const {
    499   return Register::from_code(code);
    500 }
    501 
    502 
    503 XMMRegister LCodeGen::ToDoubleRegister(int code) const {
    504   return XMMRegister::from_code(code);
    505 }
    506 
    507 
    508 Register LCodeGen::ToRegister(LOperand* op) const {
    509   DCHECK(op->IsRegister());
    510   return ToRegister(op->index());
    511 }
    512 
    513 
    514 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
    515   DCHECK(op->IsDoubleRegister());
    516   return ToDoubleRegister(op->index());
    517 }
    518 
    519 
    520 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
    521   return ToRepresentation(op, Representation::Integer32());
    522 }
    523 
    524 
    525 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
    526                                    const Representation& r) const {
    527   HConstant* constant = chunk_->LookupConstant(op);
    528   if (r.IsExternal()) {
    529     return reinterpret_cast<int32_t>(
    530         constant->ExternalReferenceValue().address());
    531   }
    532   int32_t value = constant->Integer32Value();
    533   if (r.IsInteger32()) return value;
    534   DCHECK(r.IsSmiOrTagged());
    535   return reinterpret_cast<int32_t>(Smi::FromInt(value));
    536 }
    537 
    538 
    539 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
    540   HConstant* constant = chunk_->LookupConstant(op);
    541   DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
    542   return constant->handle(isolate());
    543 }
    544 
    545 
    546 double LCodeGen::ToDouble(LConstantOperand* op) const {
    547   HConstant* constant = chunk_->LookupConstant(op);
    548   DCHECK(constant->HasDoubleValue());
    549   return constant->DoubleValue();
    550 }
    551 
    552 
    553 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
    554   HConstant* constant = chunk_->LookupConstant(op);
    555   DCHECK(constant->HasExternalReferenceValue());
    556   return constant->ExternalReferenceValue();
    557 }
    558 
    559 
    560 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
    561   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
    562 }
    563 
    564 
    565 bool LCodeGen::IsSmi(LConstantOperand* op) const {
    566   return chunk_->LookupLiteralRepresentation(op).IsSmi();
    567 }
    568 
    569 
    570 static int ArgumentsOffsetWithoutFrame(int index) {
    571   DCHECK(index < 0);
    572   return -(index + 1) * kPointerSize + kPCOnStackSize;
    573 }
    574 
    575 
    576 Operand LCodeGen::ToOperand(LOperand* op) const {
    577   if (op->IsRegister()) return Operand(ToRegister(op));
    578   if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
    579   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
    580   if (NeedsEagerFrame()) {
    581     return Operand(ebp, StackSlotOffset(op->index()));
    582   } else {
    583     // Retrieve parameter without eager stack-frame relative to the
    584     // stack-pointer.
    585     return Operand(esp, ArgumentsOffsetWithoutFrame(op->index()));
    586   }
    587 }
    588 
    589 
    590 Operand LCodeGen::HighOperand(LOperand* op) {
    591   DCHECK(op->IsDoubleStackSlot());
    592   if (NeedsEagerFrame()) {
    593     return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
    594   } else {
    595     // Retrieve parameter without eager stack-frame relative to the
    596     // stack-pointer.
    597     return Operand(
    598         esp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
    599   }
    600 }
    601 
    602 
    603 void LCodeGen::WriteTranslation(LEnvironment* environment,
    604                                 Translation* translation) {
    605   if (environment == NULL) return;
    606 
    607   // The translation includes one command per value in the environment.
    608   int translation_size = environment->translation_size();
    609 
    610   WriteTranslation(environment->outer(), translation);
    611   WriteTranslationFrame(environment, translation);
    612 
    613   int object_index = 0;
    614   int dematerialized_index = 0;
    615   for (int i = 0; i < translation_size; ++i) {
    616     LOperand* value = environment->values()->at(i);
    617     AddToTranslation(
    618         environment, translation, value, environment->HasTaggedValueAt(i),
    619         environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
    620   }
    621 }
    622 
    623 
    624 void LCodeGen::AddToTranslation(LEnvironment* environment,
    625                                 Translation* translation,
    626                                 LOperand* op,
    627                                 bool is_tagged,
    628                                 bool is_uint32,
    629                                 int* object_index_pointer,
    630                                 int* dematerialized_index_pointer) {
    631   if (op == LEnvironment::materialization_marker()) {
    632     int object_index = (*object_index_pointer)++;
    633     if (environment->ObjectIsDuplicateAt(object_index)) {
    634       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
    635       translation->DuplicateObject(dupe_of);
    636       return;
    637     }
    638     int object_length = environment->ObjectLengthAt(object_index);
    639     if (environment->ObjectIsArgumentsAt(object_index)) {
    640       translation->BeginArgumentsObject(object_length);
    641     } else {
    642       translation->BeginCapturedObject(object_length);
    643     }
    644     int dematerialized_index = *dematerialized_index_pointer;
    645     int env_offset = environment->translation_size() + dematerialized_index;
    646     *dematerialized_index_pointer += object_length;
    647     for (int i = 0; i < object_length; ++i) {
    648       LOperand* value = environment->values()->at(env_offset + i);
    649       AddToTranslation(environment,
    650                        translation,
    651                        value,
    652                        environment->HasTaggedValueAt(env_offset + i),
    653                        environment->HasUint32ValueAt(env_offset + i),
    654                        object_index_pointer,
    655                        dematerialized_index_pointer);
    656     }
    657     return;
    658   }
    659 
    660   if (op->IsStackSlot()) {
    661     int index = op->index();
    662     if (index >= 0) {
    663       index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
    664     }
    665     if (is_tagged) {
    666       translation->StoreStackSlot(index);
    667     } else if (is_uint32) {
    668       translation->StoreUint32StackSlot(index);
    669     } else {
    670       translation->StoreInt32StackSlot(index);
    671     }
    672   } else if (op->IsDoubleStackSlot()) {
    673     int index = op->index();
    674     if (index >= 0) {
    675       index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
    676     }
    677     translation->StoreDoubleStackSlot(index);
    678   } else if (op->IsRegister()) {
    679     Register reg = ToRegister(op);
    680     if (is_tagged) {
    681       translation->StoreRegister(reg);
    682     } else if (is_uint32) {
    683       translation->StoreUint32Register(reg);
    684     } else {
    685       translation->StoreInt32Register(reg);
    686     }
    687   } else if (op->IsDoubleRegister()) {
    688     XMMRegister reg = ToDoubleRegister(op);
    689     translation->StoreDoubleRegister(reg);
    690   } else if (op->IsConstantOperand()) {
    691     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
    692     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
    693     translation->StoreLiteral(src_index);
    694   } else {
    695     UNREACHABLE();
    696   }
    697 }
    698 
    699 
    700 void LCodeGen::CallCodeGeneric(Handle<Code> code,
    701                                RelocInfo::Mode mode,
    702                                LInstruction* instr,
    703                                SafepointMode safepoint_mode) {
    704   DCHECK(instr != NULL);
    705   __ call(code, mode);
    706   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
    707 
    708   // Signal that we don't inline smi code before these stubs in the
    709   // optimizing code generator.
    710   if (code->kind() == Code::BINARY_OP_IC ||
    711       code->kind() == Code::COMPARE_IC) {
    712     __ nop();
    713   }
    714 }
    715 
    716 
    717 void LCodeGen::CallCode(Handle<Code> code,
    718                         RelocInfo::Mode mode,
    719                         LInstruction* instr) {
    720   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
    721 }
    722 
    723 
    724 void LCodeGen::CallRuntime(const Runtime::Function* fun,
    725                            int argc,
    726                            LInstruction* instr,
    727                            SaveFPRegsMode save_doubles) {
    728   DCHECK(instr != NULL);
    729   DCHECK(instr->HasPointerMap());
    730 
    731   __ CallRuntime(fun, argc, save_doubles);
    732 
    733   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
    734 
    735   DCHECK(info()->is_calling());
    736 }
    737 
    738 
    739 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
    740   if (context->IsRegister()) {
    741     if (!ToRegister(context).is(esi)) {
    742       __ mov(esi, ToRegister(context));
    743     }
    744   } else if (context->IsStackSlot()) {
    745     __ mov(esi, ToOperand(context));
    746   } else if (context->IsConstantOperand()) {
    747     HConstant* constant =
    748         chunk_->LookupConstant(LConstantOperand::cast(context));
    749     __ LoadObject(esi, Handle<Object>::cast(constant->handle(isolate())));
    750   } else {
    751     UNREACHABLE();
    752   }
    753 }
    754 
    755 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
    756                                        int argc,
    757                                        LInstruction* instr,
    758                                        LOperand* context) {
    759   LoadContextFromDeferred(context);
    760 
    761   __ CallRuntimeSaveDoubles(id);
    762   RecordSafepointWithRegisters(
    763       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
    764 
    765   DCHECK(info()->is_calling());
    766 }
    767 
    768 
    769 void LCodeGen::RegisterEnvironmentForDeoptimization(
    770     LEnvironment* environment, Safepoint::DeoptMode mode) {
    771   environment->set_has_been_used();
    772   if (!environment->HasBeenRegistered()) {
    773     // Physical stack frame layout:
    774     // -x ............. -4  0 ..................................... y
    775     // [incoming arguments] [spill slots] [pushed outgoing arguments]
    776 
    777     // Layout of the environment:
    778     // 0 ..................................................... size-1
    779     // [parameters] [locals] [expression stack including arguments]
    780 
    781     // Layout of the translation:
    782     // 0 ........................................................ size - 1 + 4
    783     // [expression stack including arguments] [locals] [4 words] [parameters]
    784     // |>------------  translation_size ------------<|
    785 
    786     int frame_count = 0;
    787     int jsframe_count = 0;
    788     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
    789       ++frame_count;
    790       if (e->frame_type() == JS_FUNCTION) {
    791         ++jsframe_count;
    792       }
    793     }
    794     Translation translation(&translations_, frame_count, jsframe_count, zone());
    795     WriteTranslation(environment, &translation);
    796     int deoptimization_index = deoptimizations_.length();
    797     int pc_offset = masm()->pc_offset();
    798     environment->Register(deoptimization_index,
    799                           translation.index(),
    800                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
    801     deoptimizations_.Add(environment, zone());
    802   }
    803 }
    804 
    805 
    806 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
    807                             Deoptimizer::DeoptReason deopt_reason,
    808                             Deoptimizer::BailoutType bailout_type) {
    809   LEnvironment* environment = instr->environment();
    810   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
    811   DCHECK(environment->HasBeenRegistered());
    812   int id = environment->deoptimization_index();
    813   Address entry =
    814       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
    815   if (entry == NULL) {
    816     Abort(kBailoutWasNotPrepared);
    817     return;
    818   }
    819 
    820   if (DeoptEveryNTimes()) {
    821     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
    822     Label no_deopt;
    823     __ pushfd();
    824     __ push(eax);
    825     __ mov(eax, Operand::StaticVariable(count));
    826     __ sub(eax, Immediate(1));
    827     __ j(not_zero, &no_deopt, Label::kNear);
    828     if (FLAG_trap_on_deopt) __ int3();
    829     __ mov(eax, Immediate(FLAG_deopt_every_n_times));
    830     __ mov(Operand::StaticVariable(count), eax);
    831     __ pop(eax);
    832     __ popfd();
    833     DCHECK(frame_is_built_);
    834     __ call(entry, RelocInfo::RUNTIME_ENTRY);
    835     __ bind(&no_deopt);
    836     __ mov(Operand::StaticVariable(count), eax);
    837     __ pop(eax);
    838     __ popfd();
    839   }
    840 
    841   if (info()->ShouldTrapOnDeopt()) {
    842     Label done;
    843     if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
    844     __ int3();
    845     __ bind(&done);
    846   }
    847 
    848   Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
    849 
    850   DCHECK(info()->IsStub() || frame_is_built_);
    851   if (cc == no_condition && frame_is_built_) {
    852     DeoptComment(deopt_info);
    853     __ call(entry, RelocInfo::RUNTIME_ENTRY);
    854     info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
    855   } else {
    856     Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
    857                                             !frame_is_built_);
    858     // We often have several deopts to the same entry, reuse the last
    859     // jump entry if this is the case.
    860     if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
    861         jump_table_.is_empty() ||
    862         !table_entry.IsEquivalentTo(jump_table_.last())) {
    863       jump_table_.Add(table_entry, zone());
    864     }
    865     if (cc == no_condition) {
    866       __ jmp(&jump_table_.last().label);
    867     } else {
    868       __ j(cc, &jump_table_.last().label);
    869     }
    870   }
    871 }
    872 
    873 
    874 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
    875                             Deoptimizer::DeoptReason deopt_reason) {
    876   Deoptimizer::BailoutType bailout_type = info()->IsStub()
    877       ? Deoptimizer::LAZY
    878       : Deoptimizer::EAGER;
    879   DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
    880 }
    881 
    882 
    883 void LCodeGen::RecordSafepointWithLazyDeopt(
    884     LInstruction* instr, SafepointMode safepoint_mode) {
    885   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
    886     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
    887   } else {
    888     DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
    889     RecordSafepointWithRegisters(
    890         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
    891   }
    892 }
    893 
    894 
    895 void LCodeGen::RecordSafepoint(
    896     LPointerMap* pointers,
    897     Safepoint::Kind kind,
    898     int arguments,
    899     Safepoint::DeoptMode deopt_mode) {
    900   DCHECK(kind == expected_safepoint_kind_);
    901   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
    902   Safepoint safepoint =
    903       safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
    904   for (int i = 0; i < operands->length(); i++) {
    905     LOperand* pointer = operands->at(i);
    906     if (pointer->IsStackSlot()) {
    907       safepoint.DefinePointerSlot(pointer->index(), zone());
    908     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
    909       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
    910     }
    911   }
    912 }
    913 
    914 
    915 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
    916                                Safepoint::DeoptMode mode) {
    917   RecordSafepoint(pointers, Safepoint::kSimple, 0, mode);
    918 }
    919 
    920 
    921 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode mode) {
    922   LPointerMap empty_pointers(zone());
    923   RecordSafepoint(&empty_pointers, mode);
    924 }
    925 
    926 
    927 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
    928                                             int arguments,
    929                                             Safepoint::DeoptMode mode) {
    930   RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, mode);
    931 }
    932 
    933 
    934 void LCodeGen::RecordAndWritePosition(int position) {
    935   if (position == RelocInfo::kNoPosition) return;
    936   masm()->positions_recorder()->RecordPosition(position);
    937   masm()->positions_recorder()->WriteRecordedPositions();
    938 }
    939 
    940 
    941 static const char* LabelType(LLabel* label) {
    942   if (label->is_loop_header()) return " (loop header)";
    943   if (label->is_osr_entry()) return " (OSR entry)";
    944   return "";
    945 }
    946 
    947 
    948 void LCodeGen::DoLabel(LLabel* label) {
    949   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
    950           current_instruction_,
    951           label->hydrogen_value()->id(),
    952           label->block_id(),
    953           LabelType(label));
    954   __ bind(label->label());
    955   current_block_ = label->block_id();
    956   DoGap(label);
    957 }
    958 
    959 
    960 void LCodeGen::DoParallelMove(LParallelMove* move) {
    961   resolver_.Resolve(move);
    962 }
    963 
    964 
    965 void LCodeGen::DoGap(LGap* gap) {
    966   for (int i = LGap::FIRST_INNER_POSITION;
    967        i <= LGap::LAST_INNER_POSITION;
    968        i++) {
    969     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
    970     LParallelMove* move = gap->GetParallelMove(inner_pos);
    971     if (move != NULL) DoParallelMove(move);
    972   }
    973 }
    974 
    975 
    976 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
    977   DoGap(instr);
    978 }
    979 
    980 
    981 void LCodeGen::DoParameter(LParameter* instr) {
    982   // Nothing to do.
    983 }
    984 
    985 
    986 void LCodeGen::DoCallStub(LCallStub* instr) {
    987   DCHECK(ToRegister(instr->context()).is(esi));
    988   DCHECK(ToRegister(instr->result()).is(eax));
    989   switch (instr->hydrogen()->major_key()) {
    990     case CodeStub::RegExpExec: {
    991       RegExpExecStub stub(isolate());
    992       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    993       break;
    994     }
    995     case CodeStub::SubString: {
    996       SubStringStub stub(isolate());
    997       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    998       break;
    999     }
   1000     default:
   1001       UNREACHABLE();
   1002   }
   1003 }
   1004 
   1005 
   1006 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
   1007   GenerateOsrPrologue();
   1008 }
   1009 
   1010 
   1011 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
   1012   Register dividend = ToRegister(instr->dividend());
   1013   int32_t divisor = instr->divisor();
   1014   DCHECK(dividend.is(ToRegister(instr->result())));
   1015 
   1016   // Theoretically, a variation of the branch-free code for integer division by
   1017   // a power of 2 (calculating the remainder via an additional multiplication
   1018   // (which gets simplified to an 'and') and subtraction) should be faster, and
   1019   // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
   1020   // indicate that positive dividends are heavily favored, so the branching
   1021   // version performs better.
   1022   HMod* hmod = instr->hydrogen();
   1023   int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
   1024   Label dividend_is_not_negative, done;
   1025   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
   1026     __ test(dividend, dividend);
   1027     __ j(not_sign, &dividend_is_not_negative, Label::kNear);
   1028     // Note that this is correct even for kMinInt operands.
   1029     __ neg(dividend);
   1030     __ and_(dividend, mask);
   1031     __ neg(dividend);
   1032     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1033       DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
   1034     }
   1035     __ jmp(&done, Label::kNear);
   1036   }
   1037 
   1038   __ bind(&dividend_is_not_negative);
   1039   __ and_(dividend, mask);
   1040   __ bind(&done);
   1041 }
   1042 
   1043 
   1044 void LCodeGen::DoModByConstI(LModByConstI* instr) {
   1045   Register dividend = ToRegister(instr->dividend());
   1046   int32_t divisor = instr->divisor();
   1047   DCHECK(ToRegister(instr->result()).is(eax));
   1048 
   1049   if (divisor == 0) {
   1050     DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
   1051     return;
   1052   }
   1053 
   1054   __ TruncatingDiv(dividend, Abs(divisor));
   1055   __ imul(edx, edx, Abs(divisor));
   1056   __ mov(eax, dividend);
   1057   __ sub(eax, edx);
   1058 
   1059   // Check for negative zero.
   1060   HMod* hmod = instr->hydrogen();
   1061   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1062     Label remainder_not_zero;
   1063     __ j(not_zero, &remainder_not_zero, Label::kNear);
   1064     __ cmp(dividend, Immediate(0));
   1065     DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
   1066     __ bind(&remainder_not_zero);
   1067   }
   1068 }
   1069 
   1070 
   1071 void LCodeGen::DoModI(LModI* instr) {
   1072   HMod* hmod = instr->hydrogen();
   1073 
   1074   Register left_reg = ToRegister(instr->left());
   1075   DCHECK(left_reg.is(eax));
   1076   Register right_reg = ToRegister(instr->right());
   1077   DCHECK(!right_reg.is(eax));
   1078   DCHECK(!right_reg.is(edx));
   1079   Register result_reg = ToRegister(instr->result());
   1080   DCHECK(result_reg.is(edx));
   1081 
   1082   Label done;
   1083   // Check for x % 0, idiv would signal a divide error. We have to
   1084   // deopt in this case because we can't return a NaN.
   1085   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
   1086     __ test(right_reg, Operand(right_reg));
   1087     DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
   1088   }
   1089 
   1090   // Check for kMinInt % -1, idiv would signal a divide error. We
   1091   // have to deopt if we care about -0, because we can't return that.
   1092   if (hmod->CheckFlag(HValue::kCanOverflow)) {
   1093     Label no_overflow_possible;
   1094     __ cmp(left_reg, kMinInt);
   1095     __ j(not_equal, &no_overflow_possible, Label::kNear);
   1096     __ cmp(right_reg, -1);
   1097     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1098       DeoptimizeIf(equal, instr, Deoptimizer::kMinusZero);
   1099     } else {
   1100       __ j(not_equal, &no_overflow_possible, Label::kNear);
   1101       __ Move(result_reg, Immediate(0));
   1102       __ jmp(&done, Label::kNear);
   1103     }
   1104     __ bind(&no_overflow_possible);
   1105   }
   1106 
   1107   // Sign extend dividend in eax into edx:eax.
   1108   __ cdq();
   1109 
   1110   // If we care about -0, test if the dividend is <0 and the result is 0.
   1111   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1112     Label positive_left;
   1113     __ test(left_reg, Operand(left_reg));
   1114     __ j(not_sign, &positive_left, Label::kNear);
   1115     __ idiv(right_reg);
   1116     __ test(result_reg, Operand(result_reg));
   1117     DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
   1118     __ jmp(&done, Label::kNear);
   1119     __ bind(&positive_left);
   1120   }
   1121   __ idiv(right_reg);
   1122   __ bind(&done);
   1123 }
   1124 
   1125 
   1126 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
   1127   Register dividend = ToRegister(instr->dividend());
   1128   int32_t divisor = instr->divisor();
   1129   Register result = ToRegister(instr->result());
   1130   DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
   1131   DCHECK(!result.is(dividend));
   1132 
   1133   // Check for (0 / -x) that will produce negative zero.
   1134   HDiv* hdiv = instr->hydrogen();
   1135   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1136     __ test(dividend, dividend);
   1137     DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
   1138   }
   1139   // Check for (kMinInt / -1).
   1140   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
   1141     __ cmp(dividend, kMinInt);
   1142     DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
   1143   }
   1144   // Deoptimize if remainder will not be 0.
   1145   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
   1146       divisor != 1 && divisor != -1) {
   1147     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
   1148     __ test(dividend, Immediate(mask));
   1149     DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
   1150   }
   1151   __ Move(result, dividend);
   1152   int32_t shift = WhichPowerOf2Abs(divisor);
   1153   if (shift > 0) {
   1154     // The arithmetic shift is always OK, the 'if' is an optimization only.
   1155     if (shift > 1) __ sar(result, 31);
   1156     __ shr(result, 32 - shift);
   1157     __ add(result, dividend);
   1158     __ sar(result, shift);
   1159   }
   1160   if (divisor < 0) __ neg(result);
   1161 }
   1162 
   1163 
   1164 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
   1165   Register dividend = ToRegister(instr->dividend());
   1166   int32_t divisor = instr->divisor();
   1167   DCHECK(ToRegister(instr->result()).is(edx));
   1168 
   1169   if (divisor == 0) {
   1170     DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
   1171     return;
   1172   }
   1173 
   1174   // Check for (0 / -x) that will produce negative zero.
   1175   HDiv* hdiv = instr->hydrogen();
   1176   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1177     __ test(dividend, dividend);
   1178     DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
   1179   }
   1180 
   1181   __ TruncatingDiv(dividend, Abs(divisor));
   1182   if (divisor < 0) __ neg(edx);
   1183 
   1184   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
   1185     __ mov(eax, edx);
   1186     __ imul(eax, eax, divisor);
   1187     __ sub(eax, dividend);
   1188     DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
   1189   }
   1190 }
   1191 
   1192 
   1193 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
   1194 void LCodeGen::DoDivI(LDivI* instr) {
   1195   HBinaryOperation* hdiv = instr->hydrogen();
   1196   Register dividend = ToRegister(instr->dividend());
   1197   Register divisor = ToRegister(instr->divisor());
   1198   Register remainder = ToRegister(instr->temp());
   1199   DCHECK(dividend.is(eax));
   1200   DCHECK(remainder.is(edx));
   1201   DCHECK(ToRegister(instr->result()).is(eax));
   1202   DCHECK(!divisor.is(eax));
   1203   DCHECK(!divisor.is(edx));
   1204 
   1205   // Check for x / 0.
   1206   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1207     __ test(divisor, divisor);
   1208     DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
   1209   }
   1210 
   1211   // Check for (0 / -x) that will produce negative zero.
   1212   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1213     Label dividend_not_zero;
   1214     __ test(dividend, dividend);
   1215     __ j(not_zero, &dividend_not_zero, Label::kNear);
   1216     __ test(divisor, divisor);
   1217     DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
   1218     __ bind(&dividend_not_zero);
   1219   }
   1220 
   1221   // Check for (kMinInt / -1).
   1222   if (hdiv->CheckFlag(HValue::kCanOverflow)) {
   1223     Label dividend_not_min_int;
   1224     __ cmp(dividend, kMinInt);
   1225     __ j(not_zero, &dividend_not_min_int, Label::kNear);
   1226     __ cmp(divisor, -1);
   1227     DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
   1228     __ bind(&dividend_not_min_int);
   1229   }
   1230 
   1231   // Sign extend to edx (= remainder).
   1232   __ cdq();
   1233   __ idiv(divisor);
   1234 
   1235   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
   1236     // Deoptimize if remainder is not 0.
   1237     __ test(remainder, remainder);
   1238     DeoptimizeIf(not_zero, instr, Deoptimizer::kLostPrecision);
   1239   }
   1240 }
   1241 
   1242 
   1243 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
   1244   Register dividend = ToRegister(instr->dividend());
   1245   int32_t divisor = instr->divisor();
   1246   DCHECK(dividend.is(ToRegister(instr->result())));
   1247 
   1248   // If the divisor is positive, things are easy: There can be no deopts and we
   1249   // can simply do an arithmetic right shift.
   1250   if (divisor == 1) return;
   1251   int32_t shift = WhichPowerOf2Abs(divisor);
   1252   if (divisor > 1) {
   1253     __ sar(dividend, shift);
   1254     return;
   1255   }
   1256 
   1257   // If the divisor is negative, we have to negate and handle edge cases.
   1258   __ neg(dividend);
   1259   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1260     DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
   1261   }
   1262 
   1263   // Dividing by -1 is basically negation, unless we overflow.
   1264   if (divisor == -1) {
   1265     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
   1266       DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
   1267     }
   1268     return;
   1269   }
   1270 
   1271   // If the negation could not overflow, simply shifting is OK.
   1272   if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
   1273     __ sar(dividend, shift);
   1274     return;
   1275   }
   1276 
   1277   Label not_kmin_int, done;
   1278   __ j(no_overflow, &not_kmin_int, Label::kNear);
   1279   __ mov(dividend, Immediate(kMinInt / divisor));
   1280   __ jmp(&done, Label::kNear);
   1281   __ bind(&not_kmin_int);
   1282   __ sar(dividend, shift);
   1283   __ bind(&done);
   1284 }
   1285 
   1286 
   1287 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
   1288   Register dividend = ToRegister(instr->dividend());
   1289   int32_t divisor = instr->divisor();
   1290   DCHECK(ToRegister(instr->result()).is(edx));
   1291 
   1292   if (divisor == 0) {
   1293     DeoptimizeIf(no_condition, instr, Deoptimizer::kDivisionByZero);
   1294     return;
   1295   }
   1296 
   1297   // Check for (0 / -x) that will produce negative zero.
   1298   HMathFloorOfDiv* hdiv = instr->hydrogen();
   1299   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1300     __ test(dividend, dividend);
   1301     DeoptimizeIf(zero, instr, Deoptimizer::kMinusZero);
   1302   }
   1303 
   1304   // Easy case: We need no dynamic check for the dividend and the flooring
   1305   // division is the same as the truncating division.
   1306   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
   1307       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
   1308     __ TruncatingDiv(dividend, Abs(divisor));
   1309     if (divisor < 0) __ neg(edx);
   1310     return;
   1311   }
   1312 
   1313   // In the general case we may need to adjust before and after the truncating
   1314   // division to get a flooring division.
   1315   Register temp = ToRegister(instr->temp3());
   1316   DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
   1317   Label needs_adjustment, done;
   1318   __ cmp(dividend, Immediate(0));
   1319   __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
   1320   __ TruncatingDiv(dividend, Abs(divisor));
   1321   if (divisor < 0) __ neg(edx);
   1322   __ jmp(&done, Label::kNear);
   1323   __ bind(&needs_adjustment);
   1324   __ lea(temp, Operand(dividend, divisor > 0 ? 1 : -1));
   1325   __ TruncatingDiv(temp, Abs(divisor));
   1326   if (divisor < 0) __ neg(edx);
   1327   __ dec(edx);
   1328   __ bind(&done);
   1329 }
   1330 
   1331 
   1332 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
   1333 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
   1334   HBinaryOperation* hdiv = instr->hydrogen();
   1335   Register dividend = ToRegister(instr->dividend());
   1336   Register divisor = ToRegister(instr->divisor());
   1337   Register remainder = ToRegister(instr->temp());
   1338   Register result = ToRegister(instr->result());
   1339   DCHECK(dividend.is(eax));
   1340   DCHECK(remainder.is(edx));
   1341   DCHECK(result.is(eax));
   1342   DCHECK(!divisor.is(eax));
   1343   DCHECK(!divisor.is(edx));
   1344 
   1345   // Check for x / 0.
   1346   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1347     __ test(divisor, divisor);
   1348     DeoptimizeIf(zero, instr, Deoptimizer::kDivisionByZero);
   1349   }
   1350 
   1351   // Check for (0 / -x) that will produce negative zero.
   1352   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1353     Label dividend_not_zero;
   1354     __ test(dividend, dividend);
   1355     __ j(not_zero, &dividend_not_zero, Label::kNear);
   1356     __ test(divisor, divisor);
   1357     DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
   1358     __ bind(&dividend_not_zero);
   1359   }
   1360 
   1361   // Check for (kMinInt / -1).
   1362   if (hdiv->CheckFlag(HValue::kCanOverflow)) {
   1363     Label dividend_not_min_int;
   1364     __ cmp(dividend, kMinInt);
   1365     __ j(not_zero, &dividend_not_min_int, Label::kNear);
   1366     __ cmp(divisor, -1);
   1367     DeoptimizeIf(zero, instr, Deoptimizer::kOverflow);
   1368     __ bind(&dividend_not_min_int);
   1369   }
   1370 
   1371   // Sign extend to edx (= remainder).
   1372   __ cdq();
   1373   __ idiv(divisor);
   1374 
   1375   Label done;
   1376   __ test(remainder, remainder);
   1377   __ j(zero, &done, Label::kNear);
   1378   __ xor_(remainder, divisor);
   1379   __ sar(remainder, 31);
   1380   __ add(result, remainder);
   1381   __ bind(&done);
   1382 }
   1383 
   1384 
   1385 void LCodeGen::DoMulI(LMulI* instr) {
   1386   Register left = ToRegister(instr->left());
   1387   LOperand* right = instr->right();
   1388 
   1389   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1390     __ mov(ToRegister(instr->temp()), left);
   1391   }
   1392 
   1393   if (right->IsConstantOperand()) {
   1394     // Try strength reductions on the multiplication.
   1395     // All replacement instructions are at most as long as the imul
   1396     // and have better latency.
   1397     int constant = ToInteger32(LConstantOperand::cast(right));
   1398     if (constant == -1) {
   1399       __ neg(left);
   1400     } else if (constant == 0) {
   1401       __ xor_(left, Operand(left));
   1402     } else if (constant == 2) {
   1403       __ add(left, Operand(left));
   1404     } else if (!instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1405       // If we know that the multiplication can't overflow, it's safe to
   1406       // use instructions that don't set the overflow flag for the
   1407       // multiplication.
   1408       switch (constant) {
   1409         case 1:
   1410           // Do nothing.
   1411           break;
   1412         case 3:
   1413           __ lea(left, Operand(left, left, times_2, 0));
   1414           break;
   1415         case 4:
   1416           __ shl(left, 2);
   1417           break;
   1418         case 5:
   1419           __ lea(left, Operand(left, left, times_4, 0));
   1420           break;
   1421         case 8:
   1422           __ shl(left, 3);
   1423           break;
   1424         case 9:
   1425           __ lea(left, Operand(left, left, times_8, 0));
   1426           break;
   1427         case 16:
   1428           __ shl(left, 4);
   1429           break;
   1430         default:
   1431           __ imul(left, left, constant);
   1432           break;
   1433       }
   1434     } else {
   1435       __ imul(left, left, constant);
   1436     }
   1437   } else {
   1438     if (instr->hydrogen()->representation().IsSmi()) {
   1439       __ SmiUntag(left);
   1440     }
   1441     __ imul(left, ToOperand(right));
   1442   }
   1443 
   1444   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1445     DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
   1446   }
   1447 
   1448   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1449     // Bail out if the result is supposed to be negative zero.
   1450     Label done;
   1451     __ test(left, Operand(left));
   1452     __ j(not_zero, &done, Label::kNear);
   1453     if (right->IsConstantOperand()) {
   1454       if (ToInteger32(LConstantOperand::cast(right)) < 0) {
   1455         DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
   1456       } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
   1457         __ cmp(ToRegister(instr->temp()), Immediate(0));
   1458         DeoptimizeIf(less, instr, Deoptimizer::kMinusZero);
   1459       }
   1460     } else {
   1461       // Test the non-zero operand for negative sign.
   1462       __ or_(ToRegister(instr->temp()), ToOperand(right));
   1463       DeoptimizeIf(sign, instr, Deoptimizer::kMinusZero);
   1464     }
   1465     __ bind(&done);
   1466   }
   1467 }
   1468 
   1469 
   1470 void LCodeGen::DoBitI(LBitI* instr) {
   1471   LOperand* left = instr->left();
   1472   LOperand* right = instr->right();
   1473   DCHECK(left->Equals(instr->result()));
   1474   DCHECK(left->IsRegister());
   1475 
   1476   if (right->IsConstantOperand()) {
   1477     int32_t right_operand =
   1478         ToRepresentation(LConstantOperand::cast(right),
   1479                          instr->hydrogen()->representation());
   1480     switch (instr->op()) {
   1481       case Token::BIT_AND:
   1482         __ and_(ToRegister(left), right_operand);
   1483         break;
   1484       case Token::BIT_OR:
   1485         __ or_(ToRegister(left), right_operand);
   1486         break;
   1487       case Token::BIT_XOR:
   1488         if (right_operand == int32_t(~0)) {
   1489           __ not_(ToRegister(left));
   1490         } else {
   1491           __ xor_(ToRegister(left), right_operand);
   1492         }
   1493         break;
   1494       default:
   1495         UNREACHABLE();
   1496         break;
   1497     }
   1498   } else {
   1499     switch (instr->op()) {
   1500       case Token::BIT_AND:
   1501         __ and_(ToRegister(left), ToOperand(right));
   1502         break;
   1503       case Token::BIT_OR:
   1504         __ or_(ToRegister(left), ToOperand(right));
   1505         break;
   1506       case Token::BIT_XOR:
   1507         __ xor_(ToRegister(left), ToOperand(right));
   1508         break;
   1509       default:
   1510         UNREACHABLE();
   1511         break;
   1512     }
   1513   }
   1514 }
   1515 
   1516 
   1517 void LCodeGen::DoShiftI(LShiftI* instr) {
   1518   LOperand* left = instr->left();
   1519   LOperand* right = instr->right();
   1520   DCHECK(left->Equals(instr->result()));
   1521   DCHECK(left->IsRegister());
   1522   if (right->IsRegister()) {
   1523     DCHECK(ToRegister(right).is(ecx));
   1524 
   1525     switch (instr->op()) {
   1526       case Token::ROR:
   1527         __ ror_cl(ToRegister(left));
   1528         break;
   1529       case Token::SAR:
   1530         __ sar_cl(ToRegister(left));
   1531         break;
   1532       case Token::SHR:
   1533         __ shr_cl(ToRegister(left));
   1534         if (instr->can_deopt()) {
   1535           __ test(ToRegister(left), ToRegister(left));
   1536           DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
   1537         }
   1538         break;
   1539       case Token::SHL:
   1540         __ shl_cl(ToRegister(left));
   1541         break;
   1542       default:
   1543         UNREACHABLE();
   1544         break;
   1545     }
   1546   } else {
   1547     int value = ToInteger32(LConstantOperand::cast(right));
   1548     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
   1549     switch (instr->op()) {
   1550       case Token::ROR:
   1551         if (shift_count == 0 && instr->can_deopt()) {
   1552           __ test(ToRegister(left), ToRegister(left));
   1553           DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
   1554         } else {
   1555           __ ror(ToRegister(left), shift_count);
   1556         }
   1557         break;
   1558       case Token::SAR:
   1559         if (shift_count != 0) {
   1560           __ sar(ToRegister(left), shift_count);
   1561         }
   1562         break;
   1563       case Token::SHR:
   1564         if (shift_count != 0) {
   1565           __ shr(ToRegister(left), shift_count);
   1566         } else if (instr->can_deopt()) {
   1567           __ test(ToRegister(left), ToRegister(left));
   1568           DeoptimizeIf(sign, instr, Deoptimizer::kNegativeValue);
   1569         }
   1570         break;
   1571       case Token::SHL:
   1572         if (shift_count != 0) {
   1573           if (instr->hydrogen_value()->representation().IsSmi() &&
   1574               instr->can_deopt()) {
   1575             if (shift_count != 1) {
   1576               __ shl(ToRegister(left), shift_count - 1);
   1577             }
   1578             __ SmiTag(ToRegister(left));
   1579             DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
   1580           } else {
   1581             __ shl(ToRegister(left), shift_count);
   1582           }
   1583         }
   1584         break;
   1585       default:
   1586         UNREACHABLE();
   1587         break;
   1588     }
   1589   }
   1590 }
   1591 
   1592 
   1593 void LCodeGen::DoSubI(LSubI* instr) {
   1594   LOperand* left = instr->left();
   1595   LOperand* right = instr->right();
   1596   DCHECK(left->Equals(instr->result()));
   1597 
   1598   if (right->IsConstantOperand()) {
   1599     __ sub(ToOperand(left),
   1600            ToImmediate(right, instr->hydrogen()->representation()));
   1601   } else {
   1602     __ sub(ToRegister(left), ToOperand(right));
   1603   }
   1604   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1605     DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
   1606   }
   1607 }
   1608 
   1609 
   1610 void LCodeGen::DoConstantI(LConstantI* instr) {
   1611   __ Move(ToRegister(instr->result()), Immediate(instr->value()));
   1612 }
   1613 
   1614 
   1615 void LCodeGen::DoConstantS(LConstantS* instr) {
   1616   __ Move(ToRegister(instr->result()), Immediate(instr->value()));
   1617 }
   1618 
   1619 
   1620 void LCodeGen::DoConstantD(LConstantD* instr) {
   1621   uint64_t const bits = instr->bits();
   1622   uint32_t const lower = static_cast<uint32_t>(bits);
   1623   uint32_t const upper = static_cast<uint32_t>(bits >> 32);
   1624   DCHECK(instr->result()->IsDoubleRegister());
   1625 
   1626   XMMRegister result = ToDoubleRegister(instr->result());
   1627   if (bits == 0u) {
   1628     __ xorps(result, result);
   1629   } else {
   1630     Register temp = ToRegister(instr->temp());
   1631     if (CpuFeatures::IsSupported(SSE4_1)) {
   1632       CpuFeatureScope scope2(masm(), SSE4_1);
   1633       if (lower != 0) {
   1634         __ Move(temp, Immediate(lower));
   1635         __ movd(result, Operand(temp));
   1636         __ Move(temp, Immediate(upper));
   1637         __ pinsrd(result, Operand(temp), 1);
   1638       } else {
   1639         __ xorps(result, result);
   1640         __ Move(temp, Immediate(upper));
   1641         __ pinsrd(result, Operand(temp), 1);
   1642       }
   1643     } else {
   1644       __ Move(temp, Immediate(upper));
   1645       __ movd(result, Operand(temp));
   1646       __ psllq(result, 32);
   1647       if (lower != 0u) {
   1648         XMMRegister xmm_scratch = double_scratch0();
   1649         __ Move(temp, Immediate(lower));
   1650         __ movd(xmm_scratch, Operand(temp));
   1651         __ orps(result, xmm_scratch);
   1652       }
   1653     }
   1654   }
   1655 }
   1656 
   1657 
   1658 void LCodeGen::DoConstantE(LConstantE* instr) {
   1659   __ lea(ToRegister(instr->result()), Operand::StaticVariable(instr->value()));
   1660 }
   1661 
   1662 
   1663 void LCodeGen::DoConstantT(LConstantT* instr) {
   1664   Register reg = ToRegister(instr->result());
   1665   Handle<Object> object = instr->value(isolate());
   1666   AllowDeferredHandleDereference smi_check;
   1667   __ LoadObject(reg, object);
   1668 }
   1669 
   1670 
   1671 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
   1672   Register result = ToRegister(instr->result());
   1673   Register map = ToRegister(instr->value());
   1674   __ EnumLength(result, map);
   1675 }
   1676 
   1677 
   1678 Operand LCodeGen::BuildSeqStringOperand(Register string,
   1679                                         LOperand* index,
   1680                                         String::Encoding encoding) {
   1681   if (index->IsConstantOperand()) {
   1682     int offset = ToRepresentation(LConstantOperand::cast(index),
   1683                                   Representation::Integer32());
   1684     if (encoding == String::TWO_BYTE_ENCODING) {
   1685       offset *= kUC16Size;
   1686     }
   1687     STATIC_ASSERT(kCharSize == 1);
   1688     return FieldOperand(string, SeqString::kHeaderSize + offset);
   1689   }
   1690   return FieldOperand(
   1691       string, ToRegister(index),
   1692       encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
   1693       SeqString::kHeaderSize);
   1694 }
   1695 
   1696 
   1697 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
   1698   String::Encoding encoding = instr->hydrogen()->encoding();
   1699   Register result = ToRegister(instr->result());
   1700   Register string = ToRegister(instr->string());
   1701 
   1702   if (FLAG_debug_code) {
   1703     __ push(string);
   1704     __ mov(string, FieldOperand(string, HeapObject::kMapOffset));
   1705     __ movzx_b(string, FieldOperand(string, Map::kInstanceTypeOffset));
   1706 
   1707     __ and_(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
   1708     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1709     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1710     __ cmp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
   1711                              ? one_byte_seq_type : two_byte_seq_type));
   1712     __ Check(equal, kUnexpectedStringType);
   1713     __ pop(string);
   1714   }
   1715 
   1716   Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   1717   if (encoding == String::ONE_BYTE_ENCODING) {
   1718     __ movzx_b(result, operand);
   1719   } else {
   1720     __ movzx_w(result, operand);
   1721   }
   1722 }
   1723 
   1724 
   1725 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
   1726   String::Encoding encoding = instr->hydrogen()->encoding();
   1727   Register string = ToRegister(instr->string());
   1728 
   1729   if (FLAG_debug_code) {
   1730     Register value = ToRegister(instr->value());
   1731     Register index = ToRegister(instr->index());
   1732     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1733     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1734     int encoding_mask =
   1735         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
   1736         ? one_byte_seq_type : two_byte_seq_type;
   1737     __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
   1738   }
   1739 
   1740   Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   1741   if (instr->value()->IsConstantOperand()) {
   1742     int value = ToRepresentation(LConstantOperand::cast(instr->value()),
   1743                                  Representation::Integer32());
   1744     DCHECK_LE(0, value);
   1745     if (encoding == String::ONE_BYTE_ENCODING) {
   1746       DCHECK_LE(value, String::kMaxOneByteCharCode);
   1747       __ mov_b(operand, static_cast<int8_t>(value));
   1748     } else {
   1749       DCHECK_LE(value, String::kMaxUtf16CodeUnit);
   1750       __ mov_w(operand, static_cast<int16_t>(value));
   1751     }
   1752   } else {
   1753     Register value = ToRegister(instr->value());
   1754     if (encoding == String::ONE_BYTE_ENCODING) {
   1755       __ mov_b(operand, value);
   1756     } else {
   1757       __ mov_w(operand, value);
   1758     }
   1759   }
   1760 }
   1761 
   1762 
   1763 void LCodeGen::DoAddI(LAddI* instr) {
   1764   LOperand* left = instr->left();
   1765   LOperand* right = instr->right();
   1766 
   1767   if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
   1768     if (right->IsConstantOperand()) {
   1769       int32_t offset = ToRepresentation(LConstantOperand::cast(right),
   1770                                         instr->hydrogen()->representation());
   1771       __ lea(ToRegister(instr->result()), MemOperand(ToRegister(left), offset));
   1772     } else {
   1773       Operand address(ToRegister(left), ToRegister(right), times_1, 0);
   1774       __ lea(ToRegister(instr->result()), address);
   1775     }
   1776   } else {
   1777     if (right->IsConstantOperand()) {
   1778       __ add(ToOperand(left),
   1779              ToImmediate(right, instr->hydrogen()->representation()));
   1780     } else {
   1781       __ add(ToRegister(left), ToOperand(right));
   1782     }
   1783     if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1784       DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
   1785     }
   1786   }
   1787 }
   1788 
   1789 
   1790 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
   1791   LOperand* left = instr->left();
   1792   LOperand* right = instr->right();
   1793   DCHECK(left->Equals(instr->result()));
   1794   HMathMinMax::Operation operation = instr->hydrogen()->operation();
   1795   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
   1796     Label return_left;
   1797     Condition condition = (operation == HMathMinMax::kMathMin)
   1798         ? less_equal
   1799         : greater_equal;
   1800     if (right->IsConstantOperand()) {
   1801       Operand left_op = ToOperand(left);
   1802       Immediate immediate = ToImmediate(LConstantOperand::cast(instr->right()),
   1803                                         instr->hydrogen()->representation());
   1804       __ cmp(left_op, immediate);
   1805       __ j(condition, &return_left, Label::kNear);
   1806       __ mov(left_op, immediate);
   1807     } else {
   1808       Register left_reg = ToRegister(left);
   1809       Operand right_op = ToOperand(right);
   1810       __ cmp(left_reg, right_op);
   1811       __ j(condition, &return_left, Label::kNear);
   1812       __ mov(left_reg, right_op);
   1813     }
   1814     __ bind(&return_left);
   1815   } else {
   1816     DCHECK(instr->hydrogen()->representation().IsDouble());
   1817     Label check_nan_left, check_zero, return_left, return_right;
   1818     Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
   1819     XMMRegister left_reg = ToDoubleRegister(left);
   1820     XMMRegister right_reg = ToDoubleRegister(right);
   1821     __ ucomisd(left_reg, right_reg);
   1822     __ j(parity_even, &check_nan_left, Label::kNear);  // At least one NaN.
   1823     __ j(equal, &check_zero, Label::kNear);  // left == right.
   1824     __ j(condition, &return_left, Label::kNear);
   1825     __ jmp(&return_right, Label::kNear);
   1826 
   1827     __ bind(&check_zero);
   1828     XMMRegister xmm_scratch = double_scratch0();
   1829     __ xorps(xmm_scratch, xmm_scratch);
   1830     __ ucomisd(left_reg, xmm_scratch);
   1831     __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
   1832     // At this point, both left and right are either 0 or -0.
   1833     if (operation == HMathMinMax::kMathMin) {
   1834       __ orpd(left_reg, right_reg);
   1835     } else {
   1836       // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
   1837       __ addsd(left_reg, right_reg);
   1838     }
   1839     __ jmp(&return_left, Label::kNear);
   1840 
   1841     __ bind(&check_nan_left);
   1842     __ ucomisd(left_reg, left_reg);  // NaN check.
   1843     __ j(parity_even, &return_left, Label::kNear);  // left == NaN.
   1844     __ bind(&return_right);
   1845     __ movaps(left_reg, right_reg);
   1846 
   1847     __ bind(&return_left);
   1848   }
   1849 }
   1850 
   1851 
   1852 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
   1853   XMMRegister left = ToDoubleRegister(instr->left());
   1854   XMMRegister right = ToDoubleRegister(instr->right());
   1855   XMMRegister result = ToDoubleRegister(instr->result());
   1856   switch (instr->op()) {
   1857     case Token::ADD:
   1858       if (CpuFeatures::IsSupported(AVX)) {
   1859         CpuFeatureScope scope(masm(), AVX);
   1860         __ vaddsd(result, left, right);
   1861       } else {
   1862         DCHECK(result.is(left));
   1863         __ addsd(left, right);
   1864       }
   1865       break;
   1866     case Token::SUB:
   1867       if (CpuFeatures::IsSupported(AVX)) {
   1868         CpuFeatureScope scope(masm(), AVX);
   1869         __ vsubsd(result, left, right);
   1870       } else {
   1871         DCHECK(result.is(left));
   1872         __ subsd(left, right);
   1873       }
   1874       break;
   1875     case Token::MUL:
   1876       if (CpuFeatures::IsSupported(AVX)) {
   1877         CpuFeatureScope scope(masm(), AVX);
   1878         __ vmulsd(result, left, right);
   1879       } else {
   1880         DCHECK(result.is(left));
   1881         __ mulsd(left, right);
   1882       }
   1883       break;
   1884     case Token::DIV:
   1885       if (CpuFeatures::IsSupported(AVX)) {
   1886         CpuFeatureScope scope(masm(), AVX);
   1887         __ vdivsd(result, left, right);
   1888       } else {
   1889         DCHECK(result.is(left));
   1890         __ divsd(left, right);
   1891       }
   1892       // Don't delete this mov. It may improve performance on some CPUs,
   1893       // when there is a (v)mulsd depending on the result
   1894       __ movaps(result, result);
   1895       break;
   1896     case Token::MOD: {
   1897       // Pass two doubles as arguments on the stack.
   1898       __ PrepareCallCFunction(4, eax);
   1899       __ movsd(Operand(esp, 0 * kDoubleSize), left);
   1900       __ movsd(Operand(esp, 1 * kDoubleSize), right);
   1901       __ CallCFunction(
   1902           ExternalReference::mod_two_doubles_operation(isolate()),
   1903           4);
   1904 
   1905       // Return value is in st(0) on ia32.
   1906       // Store it into the result register.
   1907       __ sub(Operand(esp), Immediate(kDoubleSize));
   1908       __ fstp_d(Operand(esp, 0));
   1909       __ movsd(result, Operand(esp, 0));
   1910       __ add(Operand(esp), Immediate(kDoubleSize));
   1911       break;
   1912     }
   1913     default:
   1914       UNREACHABLE();
   1915       break;
   1916   }
   1917 }
   1918 
   1919 
   1920 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
   1921   DCHECK(ToRegister(instr->context()).is(esi));
   1922   DCHECK(ToRegister(instr->left()).is(edx));
   1923   DCHECK(ToRegister(instr->right()).is(eax));
   1924   DCHECK(ToRegister(instr->result()).is(eax));
   1925 
   1926   Handle<Code> code =
   1927       CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
   1928   CallCode(code, RelocInfo::CODE_TARGET, instr);
   1929 }
   1930 
   1931 
   1932 template<class InstrType>
   1933 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
   1934   int left_block = instr->TrueDestination(chunk_);
   1935   int right_block = instr->FalseDestination(chunk_);
   1936 
   1937   int next_block = GetNextEmittedBlock();
   1938 
   1939   if (right_block == left_block || cc == no_condition) {
   1940     EmitGoto(left_block);
   1941   } else if (left_block == next_block) {
   1942     __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
   1943   } else if (right_block == next_block) {
   1944     __ j(cc, chunk_->GetAssemblyLabel(left_block));
   1945   } else {
   1946     __ j(cc, chunk_->GetAssemblyLabel(left_block));
   1947     __ jmp(chunk_->GetAssemblyLabel(right_block));
   1948   }
   1949 }
   1950 
   1951 
   1952 template <class InstrType>
   1953 void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
   1954   int true_block = instr->TrueDestination(chunk_);
   1955   if (cc == no_condition) {
   1956     __ jmp(chunk_->GetAssemblyLabel(true_block));
   1957   } else {
   1958     __ j(cc, chunk_->GetAssemblyLabel(true_block));
   1959   }
   1960 }
   1961 
   1962 
   1963 template<class InstrType>
   1964 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
   1965   int false_block = instr->FalseDestination(chunk_);
   1966   if (cc == no_condition) {
   1967     __ jmp(chunk_->GetAssemblyLabel(false_block));
   1968   } else {
   1969     __ j(cc, chunk_->GetAssemblyLabel(false_block));
   1970   }
   1971 }
   1972 
   1973 
   1974 void LCodeGen::DoBranch(LBranch* instr) {
   1975   Representation r = instr->hydrogen()->value()->representation();
   1976   if (r.IsSmiOrInteger32()) {
   1977     Register reg = ToRegister(instr->value());
   1978     __ test(reg, Operand(reg));
   1979     EmitBranch(instr, not_zero);
   1980   } else if (r.IsDouble()) {
   1981     DCHECK(!info()->IsStub());
   1982     XMMRegister reg = ToDoubleRegister(instr->value());
   1983     XMMRegister xmm_scratch = double_scratch0();
   1984     __ xorps(xmm_scratch, xmm_scratch);
   1985     __ ucomisd(reg, xmm_scratch);
   1986     EmitBranch(instr, not_equal);
   1987   } else {
   1988     DCHECK(r.IsTagged());
   1989     Register reg = ToRegister(instr->value());
   1990     HType type = instr->hydrogen()->value()->type();
   1991     if (type.IsBoolean()) {
   1992       DCHECK(!info()->IsStub());
   1993       __ cmp(reg, factory()->true_value());
   1994       EmitBranch(instr, equal);
   1995     } else if (type.IsSmi()) {
   1996       DCHECK(!info()->IsStub());
   1997       __ test(reg, Operand(reg));
   1998       EmitBranch(instr, not_equal);
   1999     } else if (type.IsJSArray()) {
   2000       DCHECK(!info()->IsStub());
   2001       EmitBranch(instr, no_condition);
   2002     } else if (type.IsHeapNumber()) {
   2003       DCHECK(!info()->IsStub());
   2004       XMMRegister xmm_scratch = double_scratch0();
   2005       __ xorps(xmm_scratch, xmm_scratch);
   2006       __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
   2007       EmitBranch(instr, not_equal);
   2008     } else if (type.IsString()) {
   2009       DCHECK(!info()->IsStub());
   2010       __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
   2011       EmitBranch(instr, not_equal);
   2012     } else {
   2013       ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
   2014       if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
   2015 
   2016       if (expected.Contains(ToBooleanStub::UNDEFINED)) {
   2017         // undefined -> false.
   2018         __ cmp(reg, factory()->undefined_value());
   2019         __ j(equal, instr->FalseLabel(chunk_));
   2020       }
   2021       if (expected.Contains(ToBooleanStub::BOOLEAN)) {
   2022         // true -> true.
   2023         __ cmp(reg, factory()->true_value());
   2024         __ j(equal, instr->TrueLabel(chunk_));
   2025         // false -> false.
   2026         __ cmp(reg, factory()->false_value());
   2027         __ j(equal, instr->FalseLabel(chunk_));
   2028       }
   2029       if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
   2030         // 'null' -> false.
   2031         __ cmp(reg, factory()->null_value());
   2032         __ j(equal, instr->FalseLabel(chunk_));
   2033       }
   2034 
   2035       if (expected.Contains(ToBooleanStub::SMI)) {
   2036         // Smis: 0 -> false, all other -> true.
   2037         __ test(reg, Operand(reg));
   2038         __ j(equal, instr->FalseLabel(chunk_));
   2039         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
   2040       } else if (expected.NeedsMap()) {
   2041         // If we need a map later and have a Smi -> deopt.
   2042         __ test(reg, Immediate(kSmiTagMask));
   2043         DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
   2044       }
   2045 
   2046       Register map = no_reg;  // Keep the compiler happy.
   2047       if (expected.NeedsMap()) {
   2048         map = ToRegister(instr->temp());
   2049         DCHECK(!map.is(reg));
   2050         __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
   2051 
   2052         if (expected.CanBeUndetectable()) {
   2053           // Undetectable -> false.
   2054           __ test_b(FieldOperand(map, Map::kBitFieldOffset),
   2055                     1 << Map::kIsUndetectable);
   2056           __ j(not_zero, instr->FalseLabel(chunk_));
   2057         }
   2058       }
   2059 
   2060       if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
   2061         // spec object -> true.
   2062         __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
   2063         __ j(above_equal, instr->TrueLabel(chunk_));
   2064       }
   2065 
   2066       if (expected.Contains(ToBooleanStub::STRING)) {
   2067         // String value -> false iff empty.
   2068         Label not_string;
   2069         __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
   2070         __ j(above_equal, &not_string, Label::kNear);
   2071         __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
   2072         __ j(not_zero, instr->TrueLabel(chunk_));
   2073         __ jmp(instr->FalseLabel(chunk_));
   2074         __ bind(&not_string);
   2075       }
   2076 
   2077       if (expected.Contains(ToBooleanStub::SYMBOL)) {
   2078         // Symbol value -> true.
   2079         __ CmpInstanceType(map, SYMBOL_TYPE);
   2080         __ j(equal, instr->TrueLabel(chunk_));
   2081       }
   2082 
   2083       if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
   2084         // SIMD value -> true.
   2085         __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
   2086         __ j(equal, instr->TrueLabel(chunk_));
   2087       }
   2088 
   2089       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
   2090         // heap number -> false iff +0, -0, or NaN.
   2091         Label not_heap_number;
   2092         __ cmp(FieldOperand(reg, HeapObject::kMapOffset),
   2093                factory()->heap_number_map());
   2094         __ j(not_equal, &not_heap_number, Label::kNear);
   2095         XMMRegister xmm_scratch = double_scratch0();
   2096         __ xorps(xmm_scratch, xmm_scratch);
   2097         __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
   2098         __ j(zero, instr->FalseLabel(chunk_));
   2099         __ jmp(instr->TrueLabel(chunk_));
   2100         __ bind(&not_heap_number);
   2101       }
   2102 
   2103       if (!expected.IsGeneric()) {
   2104         // We've seen something for the first time -> deopt.
   2105         // This can only happen if we are not generic already.
   2106         DeoptimizeIf(no_condition, instr, Deoptimizer::kUnexpectedObject);
   2107       }
   2108     }
   2109   }
   2110 }
   2111 
   2112 
   2113 void LCodeGen::EmitGoto(int block) {
   2114   if (!IsNextEmittedBlock(block)) {
   2115     __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
   2116   }
   2117 }
   2118 
   2119 
   2120 void LCodeGen::DoGoto(LGoto* instr) {
   2121   EmitGoto(instr->block_id());
   2122 }
   2123 
   2124 
   2125 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
   2126   Condition cond = no_condition;
   2127   switch (op) {
   2128     case Token::EQ:
   2129     case Token::EQ_STRICT:
   2130       cond = equal;
   2131       break;
   2132     case Token::NE:
   2133     case Token::NE_STRICT:
   2134       cond = not_equal;
   2135       break;
   2136     case Token::LT:
   2137       cond = is_unsigned ? below : less;
   2138       break;
   2139     case Token::GT:
   2140       cond = is_unsigned ? above : greater;
   2141       break;
   2142     case Token::LTE:
   2143       cond = is_unsigned ? below_equal : less_equal;
   2144       break;
   2145     case Token::GTE:
   2146       cond = is_unsigned ? above_equal : greater_equal;
   2147       break;
   2148     case Token::IN:
   2149     case Token::INSTANCEOF:
   2150     default:
   2151       UNREACHABLE();
   2152   }
   2153   return cond;
   2154 }
   2155 
   2156 
   2157 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
   2158   LOperand* left = instr->left();
   2159   LOperand* right = instr->right();
   2160   bool is_unsigned =
   2161       instr->is_double() ||
   2162       instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
   2163       instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
   2164   Condition cc = TokenToCondition(instr->op(), is_unsigned);
   2165 
   2166   if (left->IsConstantOperand() && right->IsConstantOperand()) {
   2167     // We can statically evaluate the comparison.
   2168     double left_val = ToDouble(LConstantOperand::cast(left));
   2169     double right_val = ToDouble(LConstantOperand::cast(right));
   2170     int next_block = EvalComparison(instr->op(), left_val, right_val) ?
   2171         instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
   2172     EmitGoto(next_block);
   2173   } else {
   2174     if (instr->is_double()) {
   2175       __ ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
   2176       // Don't base result on EFLAGS when a NaN is involved. Instead
   2177       // jump to the false block.
   2178       __ j(parity_even, instr->FalseLabel(chunk_));
   2179     } else {
   2180       if (right->IsConstantOperand()) {
   2181         __ cmp(ToOperand(left),
   2182                ToImmediate(right, instr->hydrogen()->representation()));
   2183       } else if (left->IsConstantOperand()) {
   2184         __ cmp(ToOperand(right),
   2185                ToImmediate(left, instr->hydrogen()->representation()));
   2186         // We commuted the operands, so commute the condition.
   2187         cc = CommuteCondition(cc);
   2188       } else {
   2189         __ cmp(ToRegister(left), ToOperand(right));
   2190       }
   2191     }
   2192     EmitBranch(instr, cc);
   2193   }
   2194 }
   2195 
   2196 
   2197 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   2198   Register left = ToRegister(instr->left());
   2199 
   2200   if (instr->right()->IsConstantOperand()) {
   2201     Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
   2202     __ CmpObject(left, right);
   2203   } else {
   2204     Operand right = ToOperand(instr->right());
   2205     __ cmp(left, right);
   2206   }
   2207   EmitBranch(instr, equal);
   2208 }
   2209 
   2210 
   2211 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
   2212   if (instr->hydrogen()->representation().IsTagged()) {
   2213     Register input_reg = ToRegister(instr->object());
   2214     __ cmp(input_reg, factory()->the_hole_value());
   2215     EmitBranch(instr, equal);
   2216     return;
   2217   }
   2218 
   2219   XMMRegister input_reg = ToDoubleRegister(instr->object());
   2220   __ ucomisd(input_reg, input_reg);
   2221   EmitFalseBranch(instr, parity_odd);
   2222 
   2223   __ sub(esp, Immediate(kDoubleSize));
   2224   __ movsd(MemOperand(esp, 0), input_reg);
   2225 
   2226   __ add(esp, Immediate(kDoubleSize));
   2227   int offset = sizeof(kHoleNanUpper32);
   2228   __ cmp(MemOperand(esp, -offset), Immediate(kHoleNanUpper32));
   2229   EmitBranch(instr, equal);
   2230 }
   2231 
   2232 
   2233 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
   2234   Representation rep = instr->hydrogen()->value()->representation();
   2235   DCHECK(!rep.IsInteger32());
   2236   Register scratch = ToRegister(instr->temp());
   2237 
   2238   if (rep.IsDouble()) {
   2239     XMMRegister value = ToDoubleRegister(instr->value());
   2240     XMMRegister xmm_scratch = double_scratch0();
   2241     __ xorps(xmm_scratch, xmm_scratch);
   2242     __ ucomisd(xmm_scratch, value);
   2243     EmitFalseBranch(instr, not_equal);
   2244     __ movmskpd(scratch, value);
   2245     __ test(scratch, Immediate(1));
   2246     EmitBranch(instr, not_zero);
   2247   } else {
   2248     Register value = ToRegister(instr->value());
   2249     Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
   2250     __ CheckMap(value, map, instr->FalseLabel(chunk()), DO_SMI_CHECK);
   2251     __ cmp(FieldOperand(value, HeapNumber::kExponentOffset),
   2252            Immediate(0x1));
   2253     EmitFalseBranch(instr, no_overflow);
   2254     __ cmp(FieldOperand(value, HeapNumber::kMantissaOffset),
   2255            Immediate(0x00000000));
   2256     EmitBranch(instr, equal);
   2257   }
   2258 }
   2259 
   2260 
   2261 Condition LCodeGen::EmitIsString(Register input,
   2262                                  Register temp1,
   2263                                  Label* is_not_string,
   2264                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
   2265   if (check_needed == INLINE_SMI_CHECK) {
   2266     __ JumpIfSmi(input, is_not_string);
   2267   }
   2268 
   2269   Condition cond = masm_->IsObjectStringType(input, temp1, temp1);
   2270 
   2271   return cond;
   2272 }
   2273 
   2274 
   2275 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
   2276   Register reg = ToRegister(instr->value());
   2277   Register temp = ToRegister(instr->temp());
   2278 
   2279   SmiCheck check_needed =
   2280       instr->hydrogen()->value()->type().IsHeapObject()
   2281           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2282 
   2283   Condition true_cond = EmitIsString(
   2284       reg, temp, instr->FalseLabel(chunk_), check_needed);
   2285 
   2286   EmitBranch(instr, true_cond);
   2287 }
   2288 
   2289 
   2290 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   2291   Operand input = ToOperand(instr->value());
   2292 
   2293   __ test(input, Immediate(kSmiTagMask));
   2294   EmitBranch(instr, zero);
   2295 }
   2296 
   2297 
   2298 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
   2299   Register input = ToRegister(instr->value());
   2300   Register temp = ToRegister(instr->temp());
   2301 
   2302   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2303     STATIC_ASSERT(kSmiTag == 0);
   2304     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2305   }
   2306   __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
   2307   __ test_b(FieldOperand(temp, Map::kBitFieldOffset),
   2308             1 << Map::kIsUndetectable);
   2309   EmitBranch(instr, not_zero);
   2310 }
   2311 
   2312 
   2313 static Condition ComputeCompareCondition(Token::Value op) {
   2314   switch (op) {
   2315     case Token::EQ_STRICT:
   2316     case Token::EQ:
   2317       return equal;
   2318     case Token::LT:
   2319       return less;
   2320     case Token::GT:
   2321       return greater;
   2322     case Token::LTE:
   2323       return less_equal;
   2324     case Token::GTE:
   2325       return greater_equal;
   2326     default:
   2327       UNREACHABLE();
   2328       return no_condition;
   2329   }
   2330 }
   2331 
   2332 
   2333 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   2334   DCHECK(ToRegister(instr->context()).is(esi));
   2335   DCHECK(ToRegister(instr->left()).is(edx));
   2336   DCHECK(ToRegister(instr->right()).is(eax));
   2337 
   2338   Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
   2339   CallCode(code, RelocInfo::CODE_TARGET, instr);
   2340   __ test(eax, eax);
   2341 
   2342   EmitBranch(instr, ComputeCompareCondition(instr->op()));
   2343 }
   2344 
   2345 
   2346 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   2347   InstanceType from = instr->from();
   2348   InstanceType to = instr->to();
   2349   if (from == FIRST_TYPE) return to;
   2350   DCHECK(from == to || to == LAST_TYPE);
   2351   return from;
   2352 }
   2353 
   2354 
   2355 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
   2356   InstanceType from = instr->from();
   2357   InstanceType to = instr->to();
   2358   if (from == to) return equal;
   2359   if (to == LAST_TYPE) return above_equal;
   2360   if (from == FIRST_TYPE) return below_equal;
   2361   UNREACHABLE();
   2362   return equal;
   2363 }
   2364 
   2365 
   2366 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   2367   Register input = ToRegister(instr->value());
   2368   Register temp = ToRegister(instr->temp());
   2369 
   2370   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2371     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2372   }
   2373 
   2374   __ CmpObjectType(input, TestType(instr->hydrogen()), temp);
   2375   EmitBranch(instr, BranchCondition(instr->hydrogen()));
   2376 }
   2377 
   2378 
   2379 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
   2380   Register input = ToRegister(instr->value());
   2381   Register result = ToRegister(instr->result());
   2382 
   2383   __ AssertString(input);
   2384 
   2385   __ mov(result, FieldOperand(input, String::kHashFieldOffset));
   2386   __ IndexFromHash(result, result);
   2387 }
   2388 
   2389 
   2390 void LCodeGen::DoHasCachedArrayIndexAndBranch(
   2391     LHasCachedArrayIndexAndBranch* instr) {
   2392   Register input = ToRegister(instr->value());
   2393 
   2394   __ test(FieldOperand(input, String::kHashFieldOffset),
   2395           Immediate(String::kContainsCachedArrayIndexMask));
   2396   EmitBranch(instr, equal);
   2397 }
   2398 
   2399 
   2400 // Branches to a label or falls through with the answer in the z flag.  Trashes
   2401 // the temp registers, but not the input.
   2402 void LCodeGen::EmitClassOfTest(Label* is_true,
   2403                                Label* is_false,
   2404                                Handle<String>class_name,
   2405                                Register input,
   2406                                Register temp,
   2407                                Register temp2) {
   2408   DCHECK(!input.is(temp));
   2409   DCHECK(!input.is(temp2));
   2410   DCHECK(!temp.is(temp2));
   2411   __ JumpIfSmi(input, is_false);
   2412 
   2413   __ CmpObjectType(input, JS_FUNCTION_TYPE, temp);
   2414   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
   2415     __ j(equal, is_true);
   2416   } else {
   2417     __ j(equal, is_false);
   2418   }
   2419 
   2420   // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
   2421   // Check if the constructor in the map is a function.
   2422   __ GetMapConstructor(temp, temp, temp2);
   2423   // Objects with a non-function constructor have class 'Object'.
   2424   __ CmpInstanceType(temp2, JS_FUNCTION_TYPE);
   2425   if (String::Equals(class_name, isolate()->factory()->Object_string())) {
   2426     __ j(not_equal, is_true);
   2427   } else {
   2428     __ j(not_equal, is_false);
   2429   }
   2430 
   2431   // temp now contains the constructor function. Grab the
   2432   // instance class name from there.
   2433   __ mov(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
   2434   __ mov(temp, FieldOperand(temp,
   2435                             SharedFunctionInfo::kInstanceClassNameOffset));
   2436   // The class name we are testing against is internalized since it's a literal.
   2437   // The name in the constructor is internalized because of the way the context
   2438   // is booted.  This routine isn't expected to work for random API-created
   2439   // classes and it doesn't have to because you can't access it with natives
   2440   // syntax.  Since both sides are internalized it is sufficient to use an
   2441   // identity comparison.
   2442   __ cmp(temp, class_name);
   2443   // End with the answer in the z flag.
   2444 }
   2445 
   2446 
   2447 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   2448   Register input = ToRegister(instr->value());
   2449   Register temp = ToRegister(instr->temp());
   2450   Register temp2 = ToRegister(instr->temp2());
   2451 
   2452   Handle<String> class_name = instr->hydrogen()->class_name();
   2453 
   2454   EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
   2455       class_name, input, temp, temp2);
   2456 
   2457   EmitBranch(instr, equal);
   2458 }
   2459 
   2460 
   2461 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   2462   Register reg = ToRegister(instr->value());
   2463   __ cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
   2464   EmitBranch(instr, equal);
   2465 }
   2466 
   2467 
   2468 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
   2469   DCHECK(ToRegister(instr->context()).is(esi));
   2470   DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
   2471   DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
   2472   DCHECK(ToRegister(instr->result()).is(eax));
   2473   InstanceOfStub stub(isolate());
   2474   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   2475 }
   2476 
   2477 
   2478 void LCodeGen::DoHasInPrototypeChainAndBranch(
   2479     LHasInPrototypeChainAndBranch* instr) {
   2480   Register const object = ToRegister(instr->object());
   2481   Register const object_map = ToRegister(instr->scratch());
   2482   Register const object_prototype = object_map;
   2483   Register const prototype = ToRegister(instr->prototype());
   2484 
   2485   // The {object} must be a spec object.  It's sufficient to know that {object}
   2486   // is not a smi, since all other non-spec objects have {null} prototypes and
   2487   // will be ruled out below.
   2488   if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
   2489     __ test(object, Immediate(kSmiTagMask));
   2490     EmitFalseBranch(instr, zero);
   2491   }
   2492 
   2493   // Loop through the {object}s prototype chain looking for the {prototype}.
   2494   __ mov(object_map, FieldOperand(object, HeapObject::kMapOffset));
   2495   Label loop;
   2496   __ bind(&loop);
   2497 
   2498   // Deoptimize if the object needs to be access checked.
   2499   __ test_b(FieldOperand(object_map, Map::kBitFieldOffset),
   2500             1 << Map::kIsAccessCheckNeeded);
   2501   DeoptimizeIf(not_zero, instr, Deoptimizer::kAccessCheck);
   2502   // Deoptimize for proxies.
   2503   __ CmpInstanceType(object_map, JS_PROXY_TYPE);
   2504   DeoptimizeIf(equal, instr, Deoptimizer::kProxy);
   2505 
   2506   __ mov(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
   2507   __ cmp(object_prototype, prototype);
   2508   EmitTrueBranch(instr, equal);
   2509   __ cmp(object_prototype, factory()->null_value());
   2510   EmitFalseBranch(instr, equal);
   2511   __ mov(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
   2512   __ jmp(&loop);
   2513 }
   2514 
   2515 
   2516 void LCodeGen::DoCmpT(LCmpT* instr) {
   2517   Token::Value op = instr->op();
   2518 
   2519   Handle<Code> ic =
   2520       CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
   2521   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2522 
   2523   Condition condition = ComputeCompareCondition(op);
   2524   Label true_value, done;
   2525   __ test(eax, Operand(eax));
   2526   __ j(condition, &true_value, Label::kNear);
   2527   __ mov(ToRegister(instr->result()), factory()->false_value());
   2528   __ jmp(&done, Label::kNear);
   2529   __ bind(&true_value);
   2530   __ mov(ToRegister(instr->result()), factory()->true_value());
   2531   __ bind(&done);
   2532 }
   2533 
   2534 
   2535 void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
   2536   int extra_value_count = dynamic_frame_alignment ? 2 : 1;
   2537 
   2538   if (instr->has_constant_parameter_count()) {
   2539     int parameter_count = ToInteger32(instr->constant_parameter_count());
   2540     if (dynamic_frame_alignment && FLAG_debug_code) {
   2541       __ cmp(Operand(esp,
   2542                      (parameter_count + extra_value_count) * kPointerSize),
   2543              Immediate(kAlignmentZapValue));
   2544       __ Assert(equal, kExpectedAlignmentMarker);
   2545     }
   2546     __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
   2547   } else {
   2548     DCHECK(info()->IsStub());  // Functions would need to drop one more value.
   2549     Register reg = ToRegister(instr->parameter_count());
   2550     // The argument count parameter is a smi
   2551     __ SmiUntag(reg);
   2552     Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
   2553     if (dynamic_frame_alignment && FLAG_debug_code) {
   2554       DCHECK(extra_value_count == 2);
   2555       __ cmp(Operand(esp, reg, times_pointer_size,
   2556                      extra_value_count * kPointerSize),
   2557              Immediate(kAlignmentZapValue));
   2558       __ Assert(equal, kExpectedAlignmentMarker);
   2559     }
   2560 
   2561     // emit code to restore stack based on instr->parameter_count()
   2562     __ pop(return_addr_reg);  // save return address
   2563     if (dynamic_frame_alignment) {
   2564       __ inc(reg);  // 1 more for alignment
   2565     }
   2566 
   2567     __ shl(reg, kPointerSizeLog2);
   2568     __ add(esp, reg);
   2569     __ jmp(return_addr_reg);
   2570   }
   2571 }
   2572 
   2573 
   2574 void LCodeGen::DoReturn(LReturn* instr) {
   2575   if (FLAG_trace && info()->IsOptimizing()) {
   2576     // Preserve the return value on the stack and rely on the runtime call
   2577     // to return the value in the same register.  We're leaving the code
   2578     // managed by the register allocator and tearing down the frame, it's
   2579     // safe to write to the context register.
   2580     __ push(eax);
   2581     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   2582     __ CallRuntime(Runtime::kTraceExit);
   2583   }
   2584   if (info()->saves_caller_doubles()) RestoreCallerDoubles();
   2585   if (dynamic_frame_alignment_) {
   2586     // Fetch the state of the dynamic frame alignment.
   2587     __ mov(edx, Operand(ebp,
   2588       JavaScriptFrameConstants::kDynamicAlignmentStateOffset));
   2589   }
   2590   if (NeedsEagerFrame()) {
   2591     __ mov(esp, ebp);
   2592     __ pop(ebp);
   2593   }
   2594   if (dynamic_frame_alignment_) {
   2595     Label no_padding;
   2596     __ cmp(edx, Immediate(kNoAlignmentPadding));
   2597     __ j(equal, &no_padding, Label::kNear);
   2598 
   2599     EmitReturn(instr, true);
   2600     __ bind(&no_padding);
   2601   }
   2602 
   2603   EmitReturn(instr, false);
   2604 }
   2605 
   2606 
   2607 template <class T>
   2608 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
   2609   Register vector_register = ToRegister(instr->temp_vector());
   2610   Register slot_register = LoadWithVectorDescriptor::SlotRegister();
   2611   DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
   2612   DCHECK(slot_register.is(eax));
   2613 
   2614   AllowDeferredHandleDereference vector_structure_check;
   2615   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
   2616   __ mov(vector_register, vector);
   2617   // No need to allocate this register.
   2618   FeedbackVectorSlot slot = instr->hydrogen()->slot();
   2619   int index = vector->GetIndex(slot);
   2620   __ mov(slot_register, Immediate(Smi::FromInt(index)));
   2621 }
   2622 
   2623 
   2624 template <class T>
   2625 void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
   2626   Register vector_register = ToRegister(instr->temp_vector());
   2627   Register slot_register = ToRegister(instr->temp_slot());
   2628 
   2629   AllowDeferredHandleDereference vector_structure_check;
   2630   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
   2631   __ mov(vector_register, vector);
   2632   FeedbackVectorSlot slot = instr->hydrogen()->slot();
   2633   int index = vector->GetIndex(slot);
   2634   __ mov(slot_register, Immediate(Smi::FromInt(index)));
   2635 }
   2636 
   2637 
   2638 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   2639   DCHECK(ToRegister(instr->context()).is(esi));
   2640   DCHECK(ToRegister(instr->global_object())
   2641              .is(LoadDescriptor::ReceiverRegister()));
   2642   DCHECK(ToRegister(instr->result()).is(eax));
   2643 
   2644   __ mov(LoadDescriptor::NameRegister(), instr->name());
   2645   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
   2646   Handle<Code> ic =
   2647       CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
   2648                                          SLOPPY, PREMONOMORPHIC).code();
   2649   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2650 }
   2651 
   2652 
   2653 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   2654   Register context = ToRegister(instr->context());
   2655   Register result = ToRegister(instr->result());
   2656   __ mov(result, ContextOperand(context, instr->slot_index()));
   2657 
   2658   if (instr->hydrogen()->RequiresHoleCheck()) {
   2659     __ cmp(result, factory()->the_hole_value());
   2660     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2661       DeoptimizeIf(equal, instr, Deoptimizer::kHole);
   2662     } else {
   2663       Label is_not_hole;
   2664       __ j(not_equal, &is_not_hole, Label::kNear);
   2665       __ mov(result, factory()->undefined_value());
   2666       __ bind(&is_not_hole);
   2667     }
   2668   }
   2669 }
   2670 
   2671 
   2672 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   2673   Register context = ToRegister(instr->context());
   2674   Register value = ToRegister(instr->value());
   2675 
   2676   Label skip_assignment;
   2677 
   2678   Operand target = ContextOperand(context, instr->slot_index());
   2679   if (instr->hydrogen()->RequiresHoleCheck()) {
   2680     __ cmp(target, factory()->the_hole_value());
   2681     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2682       DeoptimizeIf(equal, instr, Deoptimizer::kHole);
   2683     } else {
   2684       __ j(not_equal, &skip_assignment, Label::kNear);
   2685     }
   2686   }
   2687 
   2688   __ mov(target, value);
   2689   if (instr->hydrogen()->NeedsWriteBarrier()) {
   2690     SmiCheck check_needed =
   2691         instr->hydrogen()->value()->type().IsHeapObject()
   2692             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2693     Register temp = ToRegister(instr->temp());
   2694     int offset = Context::SlotOffset(instr->slot_index());
   2695     __ RecordWriteContextSlot(context,
   2696                               offset,
   2697                               value,
   2698                               temp,
   2699                               kSaveFPRegs,
   2700                               EMIT_REMEMBERED_SET,
   2701                               check_needed);
   2702   }
   2703 
   2704   __ bind(&skip_assignment);
   2705 }
   2706 
   2707 
   2708 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   2709   HObjectAccess access = instr->hydrogen()->access();
   2710   int offset = access.offset();
   2711 
   2712   if (access.IsExternalMemory()) {
   2713     Register result = ToRegister(instr->result());
   2714     MemOperand operand = instr->object()->IsConstantOperand()
   2715         ? MemOperand::StaticVariable(ToExternalReference(
   2716                 LConstantOperand::cast(instr->object())))
   2717         : MemOperand(ToRegister(instr->object()), offset);
   2718     __ Load(result, operand, access.representation());
   2719     return;
   2720   }
   2721 
   2722   Register object = ToRegister(instr->object());
   2723   if (instr->hydrogen()->representation().IsDouble()) {
   2724     XMMRegister result = ToDoubleRegister(instr->result());
   2725     __ movsd(result, FieldOperand(object, offset));
   2726     return;
   2727   }
   2728 
   2729   Register result = ToRegister(instr->result());
   2730   if (!access.IsInobject()) {
   2731     __ mov(result, FieldOperand(object, JSObject::kPropertiesOffset));
   2732     object = result;
   2733   }
   2734   __ Load(result, FieldOperand(object, offset), access.representation());
   2735 }
   2736 
   2737 
   2738 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
   2739   DCHECK(!operand->IsDoubleRegister());
   2740   if (operand->IsConstantOperand()) {
   2741     Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
   2742     AllowDeferredHandleDereference smi_check;
   2743     if (object->IsSmi()) {
   2744       __ Push(Handle<Smi>::cast(object));
   2745     } else {
   2746       __ PushHeapObject(Handle<HeapObject>::cast(object));
   2747     }
   2748   } else if (operand->IsRegister()) {
   2749     __ push(ToRegister(operand));
   2750   } else {
   2751     __ push(ToOperand(operand));
   2752   }
   2753 }
   2754 
   2755 
   2756 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
   2757   DCHECK(ToRegister(instr->context()).is(esi));
   2758   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   2759   DCHECK(ToRegister(instr->result()).is(eax));
   2760 
   2761   __ mov(LoadDescriptor::NameRegister(), instr->name());
   2762   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
   2763   Handle<Code> ic =
   2764       CodeFactory::LoadICInOptimizedCode(
   2765           isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
   2766           instr->hydrogen()->initialization_state()).code();
   2767   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2768 }
   2769 
   2770 
   2771 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   2772   Register function = ToRegister(instr->function());
   2773   Register temp = ToRegister(instr->temp());
   2774   Register result = ToRegister(instr->result());
   2775 
   2776   // Get the prototype or initial map from the function.
   2777   __ mov(result,
   2778          FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   2779 
   2780   // Check that the function has a prototype or an initial map.
   2781   __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
   2782   DeoptimizeIf(equal, instr, Deoptimizer::kHole);
   2783 
   2784   // If the function does not have an initial map, we're done.
   2785   Label done;
   2786   __ CmpObjectType(result, MAP_TYPE, temp);
   2787   __ j(not_equal, &done, Label::kNear);
   2788 
   2789   // Get the prototype from the initial map.
   2790   __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
   2791 
   2792   // All done.
   2793   __ bind(&done);
   2794 }
   2795 
   2796 
   2797 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
   2798   Register result = ToRegister(instr->result());
   2799   __ LoadRoot(result, instr->index());
   2800 }
   2801 
   2802 
   2803 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
   2804   Register arguments = ToRegister(instr->arguments());
   2805   Register result = ToRegister(instr->result());
   2806   if (instr->length()->IsConstantOperand() &&
   2807       instr->index()->IsConstantOperand()) {
   2808     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   2809     int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
   2810     int index = (const_length - const_index) + 1;
   2811     __ mov(result, Operand(arguments, index * kPointerSize));
   2812   } else {
   2813     Register length = ToRegister(instr->length());
   2814     Operand index = ToOperand(instr->index());
   2815     // There are two words between the frame pointer and the last argument.
   2816     // Subtracting from length accounts for one of them add one more.
   2817     __ sub(length, index);
   2818     __ mov(result, Operand(arguments, length, times_4, kPointerSize));
   2819   }
   2820 }
   2821 
   2822 
   2823 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
   2824   ElementsKind elements_kind = instr->elements_kind();
   2825   LOperand* key = instr->key();
   2826   if (!key->IsConstantOperand() &&
   2827       ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
   2828                                   elements_kind)) {
   2829     __ SmiUntag(ToRegister(key));
   2830   }
   2831   Operand operand(BuildFastArrayOperand(
   2832       instr->elements(),
   2833       key,
   2834       instr->hydrogen()->key()->representation(),
   2835       elements_kind,
   2836       instr->base_offset()));
   2837   if (elements_kind == FLOAT32_ELEMENTS) {
   2838     XMMRegister result(ToDoubleRegister(instr->result()));
   2839     __ movss(result, operand);
   2840     __ cvtss2sd(result, result);
   2841   } else if (elements_kind == FLOAT64_ELEMENTS) {
   2842     __ movsd(ToDoubleRegister(instr->result()), operand);
   2843   } else {
   2844     Register result(ToRegister(instr->result()));
   2845     switch (elements_kind) {
   2846       case INT8_ELEMENTS:
   2847         __ movsx_b(result, operand);
   2848         break;
   2849       case UINT8_ELEMENTS:
   2850       case UINT8_CLAMPED_ELEMENTS:
   2851         __ movzx_b(result, operand);
   2852         break;
   2853       case INT16_ELEMENTS:
   2854         __ movsx_w(result, operand);
   2855         break;
   2856       case UINT16_ELEMENTS:
   2857         __ movzx_w(result, operand);
   2858         break;
   2859       case INT32_ELEMENTS:
   2860         __ mov(result, operand);
   2861         break;
   2862       case UINT32_ELEMENTS:
   2863         __ mov(result, operand);
   2864         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
   2865           __ test(result, Operand(result));
   2866           DeoptimizeIf(negative, instr, Deoptimizer::kNegativeValue);
   2867         }
   2868         break;
   2869       case FLOAT32_ELEMENTS:
   2870       case FLOAT64_ELEMENTS:
   2871       case FAST_SMI_ELEMENTS:
   2872       case FAST_ELEMENTS:
   2873       case FAST_DOUBLE_ELEMENTS:
   2874       case FAST_HOLEY_SMI_ELEMENTS:
   2875       case FAST_HOLEY_ELEMENTS:
   2876       case FAST_HOLEY_DOUBLE_ELEMENTS:
   2877       case DICTIONARY_ELEMENTS:
   2878       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
   2879       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
   2880         UNREACHABLE();
   2881         break;
   2882     }
   2883   }
   2884 }
   2885 
   2886 
   2887 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
   2888   if (instr->hydrogen()->RequiresHoleCheck()) {
   2889     Operand hole_check_operand = BuildFastArrayOperand(
   2890         instr->elements(), instr->key(),
   2891         instr->hydrogen()->key()->representation(),
   2892         FAST_DOUBLE_ELEMENTS,
   2893         instr->base_offset() + sizeof(kHoleNanLower32));
   2894     __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
   2895     DeoptimizeIf(equal, instr, Deoptimizer::kHole);
   2896   }
   2897 
   2898   Operand double_load_operand = BuildFastArrayOperand(
   2899       instr->elements(),
   2900       instr->key(),
   2901       instr->hydrogen()->key()->representation(),
   2902       FAST_DOUBLE_ELEMENTS,
   2903       instr->base_offset());
   2904   XMMRegister result = ToDoubleRegister(instr->result());
   2905   __ movsd(result, double_load_operand);
   2906 }
   2907 
   2908 
   2909 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
   2910   Register result = ToRegister(instr->result());
   2911 
   2912   // Load the result.
   2913   __ mov(result,
   2914          BuildFastArrayOperand(instr->elements(), instr->key(),
   2915                                instr->hydrogen()->key()->representation(),
   2916                                FAST_ELEMENTS, instr->base_offset()));
   2917 
   2918   // Check for the hole value.
   2919   if (instr->hydrogen()->RequiresHoleCheck()) {
   2920     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
   2921       __ test(result, Immediate(kSmiTagMask));
   2922       DeoptimizeIf(not_equal, instr, Deoptimizer::kNotASmi);
   2923     } else {
   2924       __ cmp(result, factory()->the_hole_value());
   2925       DeoptimizeIf(equal, instr, Deoptimizer::kHole);
   2926     }
   2927   } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
   2928     DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
   2929     Label done;
   2930     __ cmp(result, factory()->the_hole_value());
   2931     __ j(not_equal, &done);
   2932     if (info()->IsStub()) {
   2933       // A stub can safely convert the hole to undefined only if the array
   2934       // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
   2935       // it needs to bail out.
   2936       __ mov(result, isolate()->factory()->array_protector());
   2937       __ cmp(FieldOperand(result, PropertyCell::kValueOffset),
   2938              Immediate(Smi::FromInt(Isolate::kArrayProtectorValid)));
   2939       DeoptimizeIf(not_equal, instr, Deoptimizer::kHole);
   2940     }
   2941     __ mov(result, isolate()->factory()->undefined_value());
   2942     __ bind(&done);
   2943   }
   2944 }
   2945 
   2946 
   2947 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
   2948   if (instr->is_fixed_typed_array()) {
   2949     DoLoadKeyedExternalArray(instr);
   2950   } else if (instr->hydrogen()->representation().IsDouble()) {
   2951     DoLoadKeyedFixedDoubleArray(instr);
   2952   } else {
   2953     DoLoadKeyedFixedArray(instr);
   2954   }
   2955 }
   2956 
   2957 
   2958 Operand LCodeGen::BuildFastArrayOperand(
   2959     LOperand* elements_pointer,
   2960     LOperand* key,
   2961     Representation key_representation,
   2962     ElementsKind elements_kind,
   2963     uint32_t base_offset) {
   2964   Register elements_pointer_reg = ToRegister(elements_pointer);
   2965   int element_shift_size = ElementsKindToShiftSize(elements_kind);
   2966   int shift_size = element_shift_size;
   2967   if (key->IsConstantOperand()) {
   2968     int constant_value = ToInteger32(LConstantOperand::cast(key));
   2969     if (constant_value & 0xF0000000) {
   2970       Abort(kArrayIndexConstantValueTooBig);
   2971     }
   2972     return Operand(elements_pointer_reg,
   2973                    ((constant_value) << shift_size)
   2974                        + base_offset);
   2975   } else {
   2976     // Take the tag bit into account while computing the shift size.
   2977     if (key_representation.IsSmi() && (shift_size >= 1)) {
   2978       shift_size -= kSmiTagSize;
   2979     }
   2980     ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
   2981     return Operand(elements_pointer_reg,
   2982                    ToRegister(key),
   2983                    scale_factor,
   2984                    base_offset);
   2985   }
   2986 }
   2987 
   2988 
   2989 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
   2990   DCHECK(ToRegister(instr->context()).is(esi));
   2991   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   2992   DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
   2993 
   2994   if (instr->hydrogen()->HasVectorAndSlot()) {
   2995     EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
   2996   }
   2997 
   2998   Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
   2999                         isolate(), instr->hydrogen()->language_mode(),
   3000                         instr->hydrogen()->initialization_state()).code();
   3001   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3002 }
   3003 
   3004 
   3005 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   3006   Register result = ToRegister(instr->result());
   3007 
   3008   if (instr->hydrogen()->from_inlined()) {
   3009     __ lea(result, Operand(esp, -2 * kPointerSize));
   3010   } else {
   3011     // Check for arguments adapter frame.
   3012     Label done, adapted;
   3013     __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   3014     __ mov(result, Operand(result, StandardFrameConstants::kContextOffset));
   3015     __ cmp(Operand(result),
   3016            Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
   3017     __ j(equal, &adapted, Label::kNear);
   3018 
   3019     // No arguments adaptor frame.
   3020     __ mov(result, Operand(ebp));
   3021     __ jmp(&done, Label::kNear);
   3022 
   3023     // Arguments adaptor frame present.
   3024     __ bind(&adapted);
   3025     __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   3026 
   3027     // Result is the frame pointer for the frame if not adapted and for the real
   3028     // frame below the adaptor frame if adapted.
   3029     __ bind(&done);
   3030   }
   3031 }
   3032 
   3033 
   3034 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
   3035   Operand elem = ToOperand(instr->elements());
   3036   Register result = ToRegister(instr->result());
   3037 
   3038   Label done;
   3039 
   3040   // If no arguments adaptor frame the number of arguments is fixed.
   3041   __ cmp(ebp, elem);
   3042   __ mov(result, Immediate(scope()->num_parameters()));
   3043   __ j(equal, &done, Label::kNear);
   3044 
   3045   // Arguments adaptor frame present. Get argument length from there.
   3046   __ mov(result, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
   3047   __ mov(result, Operand(result,
   3048                          ArgumentsAdaptorFrameConstants::kLengthOffset));
   3049   __ SmiUntag(result);
   3050 
   3051   // Argument length is in result register.
   3052   __ bind(&done);
   3053 }
   3054 
   3055 
   3056 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
   3057   Register receiver = ToRegister(instr->receiver());
   3058   Register function = ToRegister(instr->function());
   3059 
   3060   // If the receiver is null or undefined, we have to pass the global
   3061   // object as a receiver to normal functions. Values have to be
   3062   // passed unchanged to builtins and strict-mode functions.
   3063   Label receiver_ok, global_object;
   3064   Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
   3065   Register scratch = ToRegister(instr->temp());
   3066 
   3067   if (!instr->hydrogen()->known_function()) {
   3068     // Do not transform the receiver to object for strict mode
   3069     // functions.
   3070     __ mov(scratch,
   3071            FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
   3072     __ test_b(FieldOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset),
   3073               1 << SharedFunctionInfo::kStrictModeBitWithinByte);
   3074     __ j(not_equal, &receiver_ok, dist);
   3075 
   3076     // Do not transform the receiver to object for builtins.
   3077     __ test_b(FieldOperand(scratch, SharedFunctionInfo::kNativeByteOffset),
   3078               1 << SharedFunctionInfo::kNativeBitWithinByte);
   3079     __ j(not_equal, &receiver_ok, dist);
   3080   }
   3081 
   3082   // Normal function. Replace undefined or null with global receiver.
   3083   __ cmp(receiver, factory()->null_value());
   3084   __ j(equal, &global_object, Label::kNear);
   3085   __ cmp(receiver, factory()->undefined_value());
   3086   __ j(equal, &global_object, Label::kNear);
   3087 
   3088   // The receiver should be a JS object.
   3089   __ test(receiver, Immediate(kSmiTagMask));
   3090   DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
   3091   __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, scratch);
   3092   DeoptimizeIf(below, instr, Deoptimizer::kNotAJavaScriptObject);
   3093 
   3094   __ jmp(&receiver_ok, Label::kNear);
   3095   __ bind(&global_object);
   3096   __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
   3097   __ mov(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
   3098   __ mov(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
   3099   __ bind(&receiver_ok);
   3100 }
   3101 
   3102 
   3103 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   3104   Register receiver = ToRegister(instr->receiver());
   3105   Register function = ToRegister(instr->function());
   3106   Register length = ToRegister(instr->length());
   3107   Register elements = ToRegister(instr->elements());
   3108   DCHECK(receiver.is(eax));  // Used for parameter count.
   3109   DCHECK(function.is(edi));  // Required by InvokeFunction.
   3110   DCHECK(ToRegister(instr->result()).is(eax));
   3111 
   3112   // Copy the arguments to this function possibly from the
   3113   // adaptor frame below it.
   3114   const uint32_t kArgumentsLimit = 1 * KB;
   3115   __ cmp(length, kArgumentsLimit);
   3116   DeoptimizeIf(above, instr, Deoptimizer::kTooManyArguments);
   3117 
   3118   __ push(receiver);
   3119   __ mov(receiver, length);
   3120 
   3121   // Loop through the arguments pushing them onto the execution
   3122   // stack.
   3123   Label invoke, loop;
   3124   // length is a small non-negative integer, due to the test above.
   3125   __ test(length, Operand(length));
   3126   __ j(zero, &invoke, Label::kNear);
   3127   __ bind(&loop);
   3128   __ push(Operand(elements, length, times_pointer_size, 1 * kPointerSize));
   3129   __ dec(length);
   3130   __ j(not_zero, &loop);
   3131 
   3132   // Invoke the function.
   3133   __ bind(&invoke);
   3134   DCHECK(instr->HasPointerMap());
   3135   LPointerMap* pointers = instr->pointer_map();
   3136   SafepointGenerator safepoint_generator(
   3137       this, pointers, Safepoint::kLazyDeopt);
   3138   ParameterCount actual(eax);
   3139   __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
   3140                     safepoint_generator);
   3141 }
   3142 
   3143 
   3144 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
   3145   __ int3();
   3146 }
   3147 
   3148 
   3149 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   3150   LOperand* argument = instr->value();
   3151   EmitPushTaggedOperand(argument);
   3152 }
   3153 
   3154 
   3155 void LCodeGen::DoDrop(LDrop* instr) {
   3156   __ Drop(instr->count());
   3157 }
   3158 
   3159 
   3160 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   3161   Register result = ToRegister(instr->result());
   3162   __ mov(result, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   3163 }
   3164 
   3165 
   3166 void LCodeGen::DoContext(LContext* instr) {
   3167   Register result = ToRegister(instr->result());
   3168   if (info()->IsOptimizing()) {
   3169     __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
   3170   } else {
   3171     // If there is no frame, the context must be in esi.
   3172     DCHECK(result.is(esi));
   3173   }
   3174 }
   3175 
   3176 
   3177 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   3178   DCHECK(ToRegister(instr->context()).is(esi));
   3179   __ push(Immediate(instr->hydrogen()->pairs()));
   3180   __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
   3181   CallRuntime(Runtime::kDeclareGlobals, instr);
   3182 }
   3183 
   3184 
   3185 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   3186                                  int formal_parameter_count, int arity,
   3187                                  LInstruction* instr) {
   3188   bool dont_adapt_arguments =
   3189       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   3190   bool can_invoke_directly =
   3191       dont_adapt_arguments || formal_parameter_count == arity;
   3192 
   3193   Register function_reg = edi;
   3194 
   3195   if (can_invoke_directly) {
   3196     // Change context.
   3197     __ mov(esi, FieldOperand(function_reg, JSFunction::kContextOffset));
   3198 
   3199     // Always initialize new target and number of actual arguments.
   3200     __ mov(edx, factory()->undefined_value());
   3201     __ mov(eax, arity);
   3202 
   3203     // Invoke function directly.
   3204     if (function.is_identical_to(info()->closure())) {
   3205       __ CallSelf();
   3206     } else {
   3207       __ call(FieldOperand(function_reg, JSFunction::kCodeEntryOffset));
   3208     }
   3209     RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   3210   } else {
   3211     // We need to adapt arguments.
   3212     LPointerMap* pointers = instr->pointer_map();
   3213     SafepointGenerator generator(
   3214         this, pointers, Safepoint::kLazyDeopt);
   3215     ParameterCount count(arity);
   3216     ParameterCount expected(formal_parameter_count);
   3217     __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
   3218   }
   3219 }
   3220 
   3221 
   3222 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   3223   DCHECK(ToRegister(instr->result()).is(eax));
   3224 
   3225   if (instr->hydrogen()->IsTailCall()) {
   3226     if (NeedsEagerFrame()) __ leave();
   3227 
   3228     if (instr->target()->IsConstantOperand()) {
   3229       LConstantOperand* target = LConstantOperand::cast(instr->target());
   3230       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   3231       __ jmp(code, RelocInfo::CODE_TARGET);
   3232     } else {
   3233       DCHECK(instr->target()->IsRegister());
   3234       Register target = ToRegister(instr->target());
   3235       __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
   3236       __ jmp(target);
   3237     }
   3238   } else {
   3239     LPointerMap* pointers = instr->pointer_map();
   3240     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3241 
   3242     if (instr->target()->IsConstantOperand()) {
   3243       LConstantOperand* target = LConstantOperand::cast(instr->target());
   3244       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   3245       generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
   3246       __ call(code, RelocInfo::CODE_TARGET);
   3247     } else {
   3248       DCHECK(instr->target()->IsRegister());
   3249       Register target = ToRegister(instr->target());
   3250       generator.BeforeCall(__ CallSize(Operand(target)));
   3251       __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
   3252       __ call(target);
   3253     }
   3254     generator.AfterCall();
   3255   }
   3256 }
   3257 
   3258 
   3259 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
   3260   DCHECK(ToRegister(instr->function()).is(edi));
   3261   DCHECK(ToRegister(instr->result()).is(eax));
   3262 
   3263   // Change context.
   3264   __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
   3265 
   3266   // Always initialize new target and number of actual arguments.
   3267   __ mov(edx, factory()->undefined_value());
   3268   __ mov(eax, instr->arity());
   3269 
   3270   bool is_self_call = false;
   3271   if (instr->hydrogen()->function()->IsConstant()) {
   3272     HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
   3273     Handle<JSFunction> jsfun =
   3274       Handle<JSFunction>::cast(fun_const->handle(isolate()));
   3275     is_self_call = jsfun.is_identical_to(info()->closure());
   3276   }
   3277 
   3278   if (is_self_call) {
   3279     __ CallSelf();
   3280   } else {
   3281     __ call(FieldOperand(edi, JSFunction::kCodeEntryOffset));
   3282   }
   3283 
   3284   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   3285 }
   3286 
   3287 
   3288 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
   3289   Register input_reg = ToRegister(instr->value());
   3290   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
   3291          factory()->heap_number_map());
   3292   DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
   3293 
   3294   Label slow, allocated, done;
   3295   Register tmp = input_reg.is(eax) ? ecx : eax;
   3296   Register tmp2 = tmp.is(ecx) ? edx : input_reg.is(ecx) ? edx : ecx;
   3297 
   3298   // Preserve the value of all registers.
   3299   PushSafepointRegistersScope scope(this);
   3300 
   3301   __ mov(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
   3302   // Check the sign of the argument. If the argument is positive, just
   3303   // return it. We do not need to patch the stack since |input| and
   3304   // |result| are the same register and |input| will be restored
   3305   // unchanged by popping safepoint registers.
   3306   __ test(tmp, Immediate(HeapNumber::kSignMask));
   3307   __ j(zero, &done, Label::kNear);
   3308 
   3309   __ AllocateHeapNumber(tmp, tmp2, no_reg, &slow);
   3310   __ jmp(&allocated, Label::kNear);
   3311 
   3312   // Slow case: Call the runtime system to do the number allocation.
   3313   __ bind(&slow);
   3314   CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
   3315                           instr, instr->context());
   3316   // Set the pointer to the new heap number in tmp.
   3317   if (!tmp.is(eax)) __ mov(tmp, eax);
   3318   // Restore input_reg after call to runtime.
   3319   __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
   3320 
   3321   __ bind(&allocated);
   3322   __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kExponentOffset));
   3323   __ and_(tmp2, ~HeapNumber::kSignMask);
   3324   __ mov(FieldOperand(tmp, HeapNumber::kExponentOffset), tmp2);
   3325   __ mov(tmp2, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
   3326   __ mov(FieldOperand(tmp, HeapNumber::kMantissaOffset), tmp2);
   3327   __ StoreToSafepointRegisterSlot(input_reg, tmp);
   3328 
   3329   __ bind(&done);
   3330 }
   3331 
   3332 
   3333 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
   3334   Register input_reg = ToRegister(instr->value());
   3335   __ test(input_reg, Operand(input_reg));
   3336   Label is_positive;
   3337   __ j(not_sign, &is_positive, Label::kNear);
   3338   __ neg(input_reg);  // Sets flags.
   3339   DeoptimizeIf(negative, instr, Deoptimizer::kOverflow);
   3340   __ bind(&is_positive);
   3341 }
   3342 
   3343 
   3344 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   3345   // Class for deferred case.
   3346   class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
   3347    public:
   3348     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
   3349                                     LMathAbs* instr)
   3350         : LDeferredCode(codegen), instr_(instr) { }
   3351     void Generate() override {
   3352       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
   3353     }
   3354     LInstruction* instr() override { return instr_; }
   3355 
   3356    private:
   3357     LMathAbs* instr_;
   3358   };
   3359 
   3360   DCHECK(instr->value()->Equals(instr->result()));
   3361   Representation r = instr->hydrogen()->value()->representation();
   3362 
   3363   if (r.IsDouble()) {
   3364     XMMRegister scratch = double_scratch0();
   3365     XMMRegister input_reg = ToDoubleRegister(instr->value());
   3366     __ xorps(scratch, scratch);
   3367     __ subsd(scratch, input_reg);
   3368     __ andps(input_reg, scratch);
   3369   } else if (r.IsSmiOrInteger32()) {
   3370     EmitIntegerMathAbs(instr);
   3371   } else {  // Tagged case.
   3372     DeferredMathAbsTaggedHeapNumber* deferred =
   3373         new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
   3374     Register input_reg = ToRegister(instr->value());
   3375     // Smi check.
   3376     __ JumpIfNotSmi(input_reg, deferred->entry());
   3377     EmitIntegerMathAbs(instr);
   3378     __ bind(deferred->exit());
   3379   }
   3380 }
   3381 
   3382 
   3383 void LCodeGen::DoMathFloor(LMathFloor* instr) {
   3384   XMMRegister xmm_scratch = double_scratch0();
   3385   Register output_reg = ToRegister(instr->result());
   3386   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3387 
   3388   if (CpuFeatures::IsSupported(SSE4_1)) {
   3389     CpuFeatureScope scope(masm(), SSE4_1);
   3390     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3391       // Deoptimize on negative zero.
   3392       Label non_zero;
   3393       __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
   3394       __ ucomisd(input_reg, xmm_scratch);
   3395       __ j(not_equal, &non_zero, Label::kNear);
   3396       __ movmskpd(output_reg, input_reg);
   3397       __ test(output_reg, Immediate(1));
   3398       DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
   3399       __ bind(&non_zero);
   3400     }
   3401     __ roundsd(xmm_scratch, input_reg, kRoundDown);
   3402     __ cvttsd2si(output_reg, Operand(xmm_scratch));
   3403     // Overflow is signalled with minint.
   3404     __ cmp(output_reg, 0x1);
   3405     DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
   3406   } else {
   3407     Label negative_sign, done;
   3408     // Deoptimize on unordered.
   3409     __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
   3410     __ ucomisd(input_reg, xmm_scratch);
   3411     DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
   3412     __ j(below, &negative_sign, Label::kNear);
   3413 
   3414     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3415       // Check for negative zero.
   3416       Label positive_sign;
   3417       __ j(above, &positive_sign, Label::kNear);
   3418       __ movmskpd(output_reg, input_reg);
   3419       __ test(output_reg, Immediate(1));
   3420       DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
   3421       __ Move(output_reg, Immediate(0));
   3422       __ jmp(&done, Label::kNear);
   3423       __ bind(&positive_sign);
   3424     }
   3425 
   3426     // Use truncating instruction (OK because input is positive).
   3427     __ cvttsd2si(output_reg, Operand(input_reg));
   3428     // Overflow is signalled with minint.
   3429     __ cmp(output_reg, 0x1);
   3430     DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
   3431     __ jmp(&done, Label::kNear);
   3432 
   3433     // Non-zero negative reaches here.
   3434     __ bind(&negative_sign);
   3435     // Truncate, then compare and compensate.
   3436     __ cvttsd2si(output_reg, Operand(input_reg));
   3437     __ Cvtsi2sd(xmm_scratch, output_reg);
   3438     __ ucomisd(input_reg, xmm_scratch);
   3439     __ j(equal, &done, Label::kNear);
   3440     __ sub(output_reg, Immediate(1));
   3441     DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
   3442 
   3443     __ bind(&done);
   3444   }
   3445 }
   3446 
   3447 
   3448 void LCodeGen::DoMathRound(LMathRound* instr) {
   3449   Register output_reg = ToRegister(instr->result());
   3450   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3451   XMMRegister xmm_scratch = double_scratch0();
   3452   XMMRegister input_temp = ToDoubleRegister(instr->temp());
   3453   ExternalReference one_half = ExternalReference::address_of_one_half();
   3454   ExternalReference minus_one_half =
   3455       ExternalReference::address_of_minus_one_half();
   3456 
   3457   Label done, round_to_zero, below_one_half, do_not_compensate;
   3458   Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
   3459 
   3460   __ movsd(xmm_scratch, Operand::StaticVariable(one_half));
   3461   __ ucomisd(xmm_scratch, input_reg);
   3462   __ j(above, &below_one_half, Label::kNear);
   3463 
   3464   // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
   3465   __ addsd(xmm_scratch, input_reg);
   3466   __ cvttsd2si(output_reg, Operand(xmm_scratch));
   3467   // Overflow is signalled with minint.
   3468   __ cmp(output_reg, 0x1);
   3469   DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
   3470   __ jmp(&done, dist);
   3471 
   3472   __ bind(&below_one_half);
   3473   __ movsd(xmm_scratch, Operand::StaticVariable(minus_one_half));
   3474   __ ucomisd(xmm_scratch, input_reg);
   3475   __ j(below_equal, &round_to_zero, Label::kNear);
   3476 
   3477   // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
   3478   // compare and compensate.
   3479   __ movaps(input_temp, input_reg);  // Do not alter input_reg.
   3480   __ subsd(input_temp, xmm_scratch);
   3481   __ cvttsd2si(output_reg, Operand(input_temp));
   3482   // Catch minint due to overflow, and to prevent overflow when compensating.
   3483   __ cmp(output_reg, 0x1);
   3484   DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
   3485 
   3486   __ Cvtsi2sd(xmm_scratch, output_reg);
   3487   __ ucomisd(xmm_scratch, input_temp);
   3488   __ j(equal, &done, dist);
   3489   __ sub(output_reg, Immediate(1));
   3490   // No overflow because we already ruled out minint.
   3491   __ jmp(&done, dist);
   3492 
   3493   __ bind(&round_to_zero);
   3494   // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
   3495   // we can ignore the difference between a result of -0 and +0.
   3496   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3497     // If the sign is positive, we return +0.
   3498     __ movmskpd(output_reg, input_reg);
   3499     __ test(output_reg, Immediate(1));
   3500     DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
   3501   }
   3502   __ Move(output_reg, Immediate(0));
   3503   __ bind(&done);
   3504 }
   3505 
   3506 
   3507 void LCodeGen::DoMathFround(LMathFround* instr) {
   3508   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3509   XMMRegister output_reg = ToDoubleRegister(instr->result());
   3510   __ cvtsd2ss(output_reg, input_reg);
   3511   __ cvtss2sd(output_reg, output_reg);
   3512 }
   3513 
   3514 
   3515 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   3516   Operand input = ToOperand(instr->value());
   3517   XMMRegister output = ToDoubleRegister(instr->result());
   3518   __ sqrtsd(output, input);
   3519 }
   3520 
   3521 
   3522 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   3523   XMMRegister xmm_scratch = double_scratch0();
   3524   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3525   Register scratch = ToRegister(instr->temp());
   3526   DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
   3527 
   3528   // Note that according to ECMA-262 15.8.2.13:
   3529   // Math.pow(-Infinity, 0.5) == Infinity
   3530   // Math.sqrt(-Infinity) == NaN
   3531   Label done, sqrt;
   3532   // Check base for -Infinity.  According to IEEE-754, single-precision
   3533   // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
   3534   __ mov(scratch, 0xFF800000);
   3535   __ movd(xmm_scratch, scratch);
   3536   __ cvtss2sd(xmm_scratch, xmm_scratch);
   3537   __ ucomisd(input_reg, xmm_scratch);
   3538   // Comparing -Infinity with NaN results in "unordered", which sets the
   3539   // zero flag as if both were equal.  However, it also sets the carry flag.
   3540   __ j(not_equal, &sqrt, Label::kNear);
   3541   __ j(carry, &sqrt, Label::kNear);
   3542   // If input is -Infinity, return Infinity.
   3543   __ xorps(input_reg, input_reg);
   3544   __ subsd(input_reg, xmm_scratch);
   3545   __ jmp(&done, Label::kNear);
   3546 
   3547   // Square root.
   3548   __ bind(&sqrt);
   3549   __ xorps(xmm_scratch, xmm_scratch);
   3550   __ addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
   3551   __ sqrtsd(input_reg, input_reg);
   3552   __ bind(&done);
   3553 }
   3554 
   3555 
   3556 void LCodeGen::DoPower(LPower* instr) {
   3557   Representation exponent_type = instr->hydrogen()->right()->representation();
   3558   // Having marked this as a call, we can use any registers.
   3559   // Just make sure that the input/output registers are the expected ones.
   3560   Register tagged_exponent = MathPowTaggedDescriptor::exponent();
   3561   DCHECK(!instr->right()->IsDoubleRegister() ||
   3562          ToDoubleRegister(instr->right()).is(xmm1));
   3563   DCHECK(!instr->right()->IsRegister() ||
   3564          ToRegister(instr->right()).is(tagged_exponent));
   3565   DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
   3566   DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
   3567 
   3568   if (exponent_type.IsSmi()) {
   3569     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3570     __ CallStub(&stub);
   3571   } else if (exponent_type.IsTagged()) {
   3572     Label no_deopt;
   3573     __ JumpIfSmi(tagged_exponent, &no_deopt);
   3574     DCHECK(!ecx.is(tagged_exponent));
   3575     __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, ecx);
   3576     DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
   3577     __ bind(&no_deopt);
   3578     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3579     __ CallStub(&stub);
   3580   } else if (exponent_type.IsInteger32()) {
   3581     MathPowStub stub(isolate(), MathPowStub::INTEGER);
   3582     __ CallStub(&stub);
   3583   } else {
   3584     DCHECK(exponent_type.IsDouble());
   3585     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
   3586     __ CallStub(&stub);
   3587   }
   3588 }
   3589 
   3590 
   3591 void LCodeGen::DoMathLog(LMathLog* instr) {
   3592   DCHECK(instr->value()->Equals(instr->result()));
   3593   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3594   XMMRegister xmm_scratch = double_scratch0();
   3595   Label positive, done, zero;
   3596   __ xorps(xmm_scratch, xmm_scratch);
   3597   __ ucomisd(input_reg, xmm_scratch);
   3598   __ j(above, &positive, Label::kNear);
   3599   __ j(not_carry, &zero, Label::kNear);
   3600   __ pcmpeqd(input_reg, input_reg);
   3601   __ jmp(&done, Label::kNear);
   3602   __ bind(&zero);
   3603   ExternalReference ninf =
   3604       ExternalReference::address_of_negative_infinity();
   3605   __ movsd(input_reg, Operand::StaticVariable(ninf));
   3606   __ jmp(&done, Label::kNear);
   3607   __ bind(&positive);
   3608   __ fldln2();
   3609   __ sub(Operand(esp), Immediate(kDoubleSize));
   3610   __ movsd(Operand(esp, 0), input_reg);
   3611   __ fld_d(Operand(esp, 0));
   3612   __ fyl2x();
   3613   __ fstp_d(Operand(esp, 0));
   3614   __ movsd(input_reg, Operand(esp, 0));
   3615   __ add(Operand(esp), Immediate(kDoubleSize));
   3616   __ bind(&done);
   3617 }
   3618 
   3619 
   3620 void LCodeGen::DoMathClz32(LMathClz32* instr) {
   3621   Register input = ToRegister(instr->value());
   3622   Register result = ToRegister(instr->result());
   3623 
   3624   __ Lzcnt(result, input);
   3625 }
   3626 
   3627 
   3628 void LCodeGen::DoMathExp(LMathExp* instr) {
   3629   XMMRegister input = ToDoubleRegister(instr->value());
   3630   XMMRegister result = ToDoubleRegister(instr->result());
   3631   XMMRegister temp0 = double_scratch0();
   3632   Register temp1 = ToRegister(instr->temp1());
   3633   Register temp2 = ToRegister(instr->temp2());
   3634 
   3635   MathExpGenerator::EmitMathExp(masm(), input, result, temp0, temp1, temp2);
   3636 }
   3637 
   3638 
   3639 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   3640   DCHECK(ToRegister(instr->context()).is(esi));
   3641   DCHECK(ToRegister(instr->function()).is(edi));
   3642   DCHECK(instr->HasPointerMap());
   3643 
   3644   Handle<JSFunction> known_function = instr->hydrogen()->known_function();
   3645   if (known_function.is_null()) {
   3646     LPointerMap* pointers = instr->pointer_map();
   3647     SafepointGenerator generator(
   3648         this, pointers, Safepoint::kLazyDeopt);
   3649     ParameterCount count(instr->arity());
   3650     __ InvokeFunction(edi, no_reg, count, CALL_FUNCTION, generator);
   3651   } else {
   3652     CallKnownFunction(known_function,
   3653                       instr->hydrogen()->formal_parameter_count(),
   3654                       instr->arity(), instr);
   3655   }
   3656 }
   3657 
   3658 
   3659 void LCodeGen::DoCallFunction(LCallFunction* instr) {
   3660   DCHECK(ToRegister(instr->context()).is(esi));
   3661   DCHECK(ToRegister(instr->function()).is(edi));
   3662   DCHECK(ToRegister(instr->result()).is(eax));
   3663 
   3664   int arity = instr->arity();
   3665   ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
   3666   if (instr->hydrogen()->HasVectorAndSlot()) {
   3667     Register slot_register = ToRegister(instr->temp_slot());
   3668     Register vector_register = ToRegister(instr->temp_vector());
   3669     DCHECK(slot_register.is(edx));
   3670     DCHECK(vector_register.is(ebx));
   3671 
   3672     AllowDeferredHandleDereference vector_structure_check;
   3673     Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
   3674     int index = vector->GetIndex(instr->hydrogen()->slot());
   3675 
   3676     __ mov(vector_register, vector);
   3677     __ mov(slot_register, Immediate(Smi::FromInt(index)));
   3678 
   3679     Handle<Code> ic =
   3680         CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
   3681     CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3682   } else {
   3683     __ Set(eax, arity);
   3684     CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
   3685   }
   3686 }
   3687 
   3688 
   3689 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   3690   DCHECK(ToRegister(instr->context()).is(esi));
   3691   DCHECK(ToRegister(instr->constructor()).is(edi));
   3692   DCHECK(ToRegister(instr->result()).is(eax));
   3693 
   3694   __ Move(eax, Immediate(instr->arity()));
   3695   if (instr->arity() == 1) {
   3696     // We only need the allocation site for the case we have a length argument.
   3697     // The case may bail out to the runtime, which will determine the correct
   3698     // elements kind with the site.
   3699     __ mov(ebx, instr->hydrogen()->site());
   3700   } else {
   3701     __ mov(ebx, isolate()->factory()->undefined_value());
   3702   }
   3703 
   3704   ElementsKind kind = instr->hydrogen()->elements_kind();
   3705   AllocationSiteOverrideMode override_mode =
   3706       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
   3707           ? DISABLE_ALLOCATION_SITES
   3708           : DONT_OVERRIDE;
   3709 
   3710   if (instr->arity() == 0) {
   3711     ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
   3712     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3713   } else if (instr->arity() == 1) {
   3714     Label done;
   3715     if (IsFastPackedElementsKind(kind)) {
   3716       Label packed_case;
   3717       // We might need a change here
   3718       // look at the first argument
   3719       __ mov(ecx, Operand(esp, 0));
   3720       __ test(ecx, ecx);
   3721       __ j(zero, &packed_case, Label::kNear);
   3722 
   3723       ElementsKind holey_kind = GetHoleyElementsKind(kind);
   3724       ArraySingleArgumentConstructorStub stub(isolate(),
   3725                                               holey_kind,
   3726                                               override_mode);
   3727       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3728       __ jmp(&done, Label::kNear);
   3729       __ bind(&packed_case);
   3730     }
   3731 
   3732     ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
   3733     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3734     __ bind(&done);
   3735   } else {
   3736     ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
   3737     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3738   }
   3739 }
   3740 
   3741 
   3742 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
   3743   DCHECK(ToRegister(instr->context()).is(esi));
   3744   CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
   3745 }
   3746 
   3747 
   3748 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
   3749   Register function = ToRegister(instr->function());
   3750   Register code_object = ToRegister(instr->code_object());
   3751   __ lea(code_object, FieldOperand(code_object, Code::kHeaderSize));
   3752   __ mov(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
   3753 }
   3754 
   3755 
   3756 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
   3757   Register result = ToRegister(instr->result());
   3758   Register base = ToRegister(instr->base_object());
   3759   if (instr->offset()->IsConstantOperand()) {
   3760     LConstantOperand* offset = LConstantOperand::cast(instr->offset());
   3761     __ lea(result, Operand(base, ToInteger32(offset)));
   3762   } else {
   3763     Register offset = ToRegister(instr->offset());
   3764     __ lea(result, Operand(base, offset, times_1, 0));
   3765   }
   3766 }
   3767 
   3768 
   3769 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
   3770   Representation representation = instr->hydrogen()->field_representation();
   3771 
   3772   HObjectAccess access = instr->hydrogen()->access();
   3773   int offset = access.offset();
   3774 
   3775   if (access.IsExternalMemory()) {
   3776     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
   3777     MemOperand operand = instr->object()->IsConstantOperand()
   3778         ? MemOperand::StaticVariable(
   3779             ToExternalReference(LConstantOperand::cast(instr->object())))
   3780         : MemOperand(ToRegister(instr->object()), offset);
   3781     if (instr->value()->IsConstantOperand()) {
   3782       LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
   3783       __ mov(operand, Immediate(ToInteger32(operand_value)));
   3784     } else {
   3785       Register value = ToRegister(instr->value());
   3786       __ Store(value, operand, representation);
   3787     }
   3788     return;
   3789   }
   3790 
   3791   Register object = ToRegister(instr->object());
   3792   __ AssertNotSmi(object);
   3793 
   3794   DCHECK(!representation.IsSmi() ||
   3795          !instr->value()->IsConstantOperand() ||
   3796          IsSmi(LConstantOperand::cast(instr->value())));
   3797   if (representation.IsDouble()) {
   3798     DCHECK(access.IsInobject());
   3799     DCHECK(!instr->hydrogen()->has_transition());
   3800     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
   3801     XMMRegister value = ToDoubleRegister(instr->value());
   3802     __ movsd(FieldOperand(object, offset), value);
   3803     return;
   3804   }
   3805 
   3806   if (instr->hydrogen()->has_transition()) {
   3807     Handle<Map> transition = instr->hydrogen()->transition_map();
   3808     AddDeprecationDependency(transition);
   3809     __ mov(FieldOperand(object, HeapObject::kMapOffset), transition);
   3810     if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
   3811       Register temp = ToRegister(instr->temp());
   3812       Register temp_map = ToRegister(instr->temp_map());
   3813       // Update the write barrier for the map field.
   3814       __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs);
   3815     }
   3816   }
   3817 
   3818   // Do the store.
   3819   Register write_register = object;
   3820   if (!access.IsInobject()) {
   3821     write_register = ToRegister(instr->temp());
   3822     __ mov(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
   3823   }
   3824 
   3825   MemOperand operand = FieldOperand(write_register, offset);
   3826   if (instr->value()->IsConstantOperand()) {
   3827     LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
   3828     if (operand_value->IsRegister()) {
   3829       Register value = ToRegister(operand_value);
   3830       __ Store(value, operand, representation);
   3831     } else if (representation.IsInteger32() || representation.IsExternal()) {
   3832       Immediate immediate = ToImmediate(operand_value, representation);
   3833       DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
   3834       __ mov(operand, immediate);
   3835     } else {
   3836       Handle<Object> handle_value = ToHandle(operand_value);
   3837       DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
   3838       __ mov(operand, handle_value);
   3839     }
   3840   } else {
   3841     Register value = ToRegister(instr->value());
   3842     __ Store(value, operand, representation);
   3843   }
   3844 
   3845   if (instr->hydrogen()->NeedsWriteBarrier()) {
   3846     Register value = ToRegister(instr->value());
   3847     Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
   3848     // Update the write barrier for the object for in-object properties.
   3849     __ RecordWriteField(write_register,
   3850                         offset,
   3851                         value,
   3852                         temp,
   3853                         kSaveFPRegs,
   3854                         EMIT_REMEMBERED_SET,
   3855                         instr->hydrogen()->SmiCheckForWriteBarrier(),
   3856                         instr->hydrogen()->PointersToHereCheckForValue());
   3857   }
   3858 }
   3859 
   3860 
   3861 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
   3862   DCHECK(ToRegister(instr->context()).is(esi));
   3863   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
   3864   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
   3865 
   3866   if (instr->hydrogen()->HasVectorAndSlot()) {
   3867     EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
   3868   }
   3869 
   3870   __ mov(StoreDescriptor::NameRegister(), instr->name());
   3871   Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
   3872                         isolate(), instr->language_mode(),
   3873                         instr->hydrogen()->initialization_state()).code();
   3874   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3875 }
   3876 
   3877 
   3878 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   3879   Condition cc = instr->hydrogen()->allow_equality() ? above : above_equal;
   3880   if (instr->index()->IsConstantOperand()) {
   3881     __ cmp(ToOperand(instr->length()),
   3882            ToImmediate(LConstantOperand::cast(instr->index()),
   3883                        instr->hydrogen()->length()->representation()));
   3884     cc = CommuteCondition(cc);
   3885   } else if (instr->length()->IsConstantOperand()) {
   3886     __ cmp(ToOperand(instr->index()),
   3887            ToImmediate(LConstantOperand::cast(instr->length()),
   3888                        instr->hydrogen()->index()->representation()));
   3889   } else {
   3890     __ cmp(ToRegister(instr->index()), ToOperand(instr->length()));
   3891   }
   3892   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
   3893     Label done;
   3894     __ j(NegateCondition(cc), &done, Label::kNear);
   3895     __ int3();
   3896     __ bind(&done);
   3897   } else {
   3898     DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
   3899   }
   3900 }
   3901 
   3902 
   3903 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
   3904   ElementsKind elements_kind = instr->elements_kind();
   3905   LOperand* key = instr->key();
   3906   if (!key->IsConstantOperand() &&
   3907       ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
   3908                                   elements_kind)) {
   3909     __ SmiUntag(ToRegister(key));
   3910   }
   3911   Operand operand(BuildFastArrayOperand(
   3912       instr->elements(),
   3913       key,
   3914       instr->hydrogen()->key()->representation(),
   3915       elements_kind,
   3916       instr->base_offset()));
   3917   if (elements_kind == FLOAT32_ELEMENTS) {
   3918     XMMRegister xmm_scratch = double_scratch0();
   3919     __ cvtsd2ss(xmm_scratch, ToDoubleRegister(instr->value()));
   3920     __ movss(operand, xmm_scratch);
   3921   } else if (elements_kind == FLOAT64_ELEMENTS) {
   3922     __ movsd(operand, ToDoubleRegister(instr->value()));
   3923   } else {
   3924     Register value = ToRegister(instr->value());
   3925     switch (elements_kind) {
   3926       case UINT8_ELEMENTS:
   3927       case INT8_ELEMENTS:
   3928       case UINT8_CLAMPED_ELEMENTS:
   3929         __ mov_b(operand, value);
   3930         break;
   3931       case UINT16_ELEMENTS:
   3932       case INT16_ELEMENTS:
   3933         __ mov_w(operand, value);
   3934         break;
   3935       case UINT32_ELEMENTS:
   3936       case INT32_ELEMENTS:
   3937         __ mov(operand, value);
   3938         break;
   3939       case FLOAT32_ELEMENTS:
   3940       case FLOAT64_ELEMENTS:
   3941       case FAST_SMI_ELEMENTS:
   3942       case FAST_ELEMENTS:
   3943       case FAST_DOUBLE_ELEMENTS:
   3944       case FAST_HOLEY_SMI_ELEMENTS:
   3945       case FAST_HOLEY_ELEMENTS:
   3946       case FAST_HOLEY_DOUBLE_ELEMENTS:
   3947       case DICTIONARY_ELEMENTS:
   3948       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
   3949       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
   3950         UNREACHABLE();
   3951         break;
   3952     }
   3953   }
   3954 }
   3955 
   3956 
   3957 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
   3958   Operand double_store_operand = BuildFastArrayOperand(
   3959       instr->elements(),
   3960       instr->key(),
   3961       instr->hydrogen()->key()->representation(),
   3962       FAST_DOUBLE_ELEMENTS,
   3963       instr->base_offset());
   3964 
   3965   XMMRegister value = ToDoubleRegister(instr->value());
   3966 
   3967   if (instr->NeedsCanonicalization()) {
   3968     XMMRegister xmm_scratch = double_scratch0();
   3969     // Turn potential sNaN value into qNaN.
   3970     __ xorps(xmm_scratch, xmm_scratch);
   3971     __ subsd(value, xmm_scratch);
   3972   }
   3973 
   3974   __ movsd(double_store_operand, value);
   3975 }
   3976 
   3977 
   3978 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
   3979   Register elements = ToRegister(instr->elements());
   3980   Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
   3981 
   3982   Operand operand = BuildFastArrayOperand(
   3983       instr->elements(),
   3984       instr->key(),
   3985       instr->hydrogen()->key()->representation(),
   3986       FAST_ELEMENTS,
   3987       instr->base_offset());
   3988   if (instr->value()->IsRegister()) {
   3989     __ mov(operand, ToRegister(instr->value()));
   3990   } else {
   3991     LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
   3992     if (IsSmi(operand_value)) {
   3993       Immediate immediate = ToImmediate(operand_value, Representation::Smi());
   3994       __ mov(operand, immediate);
   3995     } else {
   3996       DCHECK(!IsInteger32(operand_value));
   3997       Handle<Object> handle_value = ToHandle(operand_value);
   3998       __ mov(operand, handle_value);
   3999     }
   4000   }
   4001 
   4002   if (instr->hydrogen()->NeedsWriteBarrier()) {
   4003     DCHECK(instr->value()->IsRegister());
   4004     Register value = ToRegister(instr->value());
   4005     DCHECK(!instr->key()->IsConstantOperand());
   4006     SmiCheck check_needed =
   4007         instr->hydrogen()->value()->type().IsHeapObject()
   4008           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   4009     // Compute address of modified element and store it into key register.
   4010     __ lea(key, operand);
   4011     __ RecordWrite(elements,
   4012                    key,
   4013                    value,
   4014                    kSaveFPRegs,
   4015                    EMIT_REMEMBERED_SET,
   4016                    check_needed,
   4017                    instr->hydrogen()->PointersToHereCheckForValue());
   4018   }
   4019 }
   4020 
   4021 
   4022 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
   4023   // By cases...external, fast-double, fast
   4024   if (instr->is_fixed_typed_array()) {
   4025     DoStoreKeyedExternalArray(instr);
   4026   } else if (instr->hydrogen()->value()->representation().IsDouble()) {
   4027     DoStoreKeyedFixedDoubleArray(instr);
   4028   } else {
   4029     DoStoreKeyedFixedArray(instr);
   4030   }
   4031 }
   4032 
   4033 
   4034 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
   4035   DCHECK(ToRegister(instr->context()).is(esi));
   4036   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
   4037   DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
   4038   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
   4039 
   4040   if (instr->hydrogen()->HasVectorAndSlot()) {
   4041     EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
   4042   }
   4043 
   4044   Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
   4045                         isolate(), instr->language_mode(),
   4046                         instr->hydrogen()->initialization_state()).code();
   4047   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   4048 }
   4049 
   4050 
   4051 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
   4052   Register object = ToRegister(instr->object());
   4053   Register temp = ToRegister(instr->temp());
   4054   Label no_memento_found;
   4055   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
   4056   DeoptimizeIf(equal, instr, Deoptimizer::kMementoFound);
   4057   __ bind(&no_memento_found);
   4058 }
   4059 
   4060 
   4061 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
   4062   class DeferredMaybeGrowElements final : public LDeferredCode {
   4063    public:
   4064     DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
   4065         : LDeferredCode(codegen), instr_(instr) {}
   4066     void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
   4067     LInstruction* instr() override { return instr_; }
   4068 
   4069    private:
   4070     LMaybeGrowElements* instr_;
   4071   };
   4072 
   4073   Register result = eax;
   4074   DeferredMaybeGrowElements* deferred =
   4075       new (zone()) DeferredMaybeGrowElements(this, instr);
   4076   LOperand* key = instr->key();
   4077   LOperand* current_capacity = instr->current_capacity();
   4078 
   4079   DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
   4080   DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
   4081   DCHECK(key->IsConstantOperand() || key->IsRegister());
   4082   DCHECK(current_capacity->IsConstantOperand() ||
   4083          current_capacity->IsRegister());
   4084 
   4085   if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
   4086     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
   4087     int32_t constant_capacity =
   4088         ToInteger32(LConstantOperand::cast(current_capacity));
   4089     if (constant_key >= constant_capacity) {
   4090       // Deferred case.
   4091       __ jmp(deferred->entry());
   4092     }
   4093   } else if (key->IsConstantOperand()) {
   4094     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
   4095     __ cmp(ToOperand(current_capacity), Immediate(constant_key));
   4096     __ j(less_equal, deferred->entry());
   4097   } else if (current_capacity->IsConstantOperand()) {
   4098     int32_t constant_capacity =
   4099         ToInteger32(LConstantOperand::cast(current_capacity));
   4100     __ cmp(ToRegister(key), Immediate(constant_capacity));
   4101     __ j(greater_equal, deferred->entry());
   4102   } else {
   4103     __ cmp(ToRegister(key), ToRegister(current_capacity));
   4104     __ j(greater_equal, deferred->entry());
   4105   }
   4106 
   4107   __ mov(result, ToOperand(instr->elements()));
   4108   __ bind(deferred->exit());
   4109 }
   4110 
   4111 
   4112 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
   4113   // TODO(3095996): Get rid of this. For now, we need to make the
   4114   // result register contain a valid pointer because it is already
   4115   // contained in the register pointer map.
   4116   Register result = eax;
   4117   __ Move(result, Immediate(0));
   4118 
   4119   // We have to call a stub.
   4120   {
   4121     PushSafepointRegistersScope scope(this);
   4122     if (instr->object()->IsRegister()) {
   4123       __ Move(result, ToRegister(instr->object()));
   4124     } else {
   4125       __ mov(result, ToOperand(instr->object()));
   4126     }
   4127 
   4128     LOperand* key = instr->key();
   4129     if (key->IsConstantOperand()) {
   4130       __ mov(ebx, ToImmediate(key, Representation::Smi()));
   4131     } else {
   4132       __ Move(ebx, ToRegister(key));
   4133       __ SmiTag(ebx);
   4134     }
   4135 
   4136     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
   4137                                instr->hydrogen()->kind());
   4138     __ CallStub(&stub);
   4139     RecordSafepointWithLazyDeopt(
   4140         instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   4141     __ StoreToSafepointRegisterSlot(result, result);
   4142   }
   4143 
   4144   // Deopt on smi, which means the elements array changed to dictionary mode.
   4145   __ test(result, Immediate(kSmiTagMask));
   4146   DeoptimizeIf(equal, instr, Deoptimizer::kSmi);
   4147 }
   4148 
   4149 
   4150 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
   4151   Register object_reg = ToRegister(instr->object());
   4152 
   4153   Handle<Map> from_map = instr->original_map();
   4154   Handle<Map> to_map = instr->transitioned_map();
   4155   ElementsKind from_kind = instr->from_kind();
   4156   ElementsKind to_kind = instr->to_kind();
   4157 
   4158   Label not_applicable;
   4159   bool is_simple_map_transition =
   4160       IsSimpleMapChangeTransition(from_kind, to_kind);
   4161   Label::Distance branch_distance =
   4162       is_simple_map_transition ? Label::kNear : Label::kFar;
   4163   __ cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
   4164   __ j(not_equal, &not_applicable, branch_distance);
   4165   if (is_simple_map_transition) {
   4166     Register new_map_reg = ToRegister(instr->new_map_temp());
   4167     __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
   4168            Immediate(to_map));
   4169     // Write barrier.
   4170     DCHECK_NOT_NULL(instr->temp());
   4171     __ RecordWriteForMap(object_reg, to_map, new_map_reg,
   4172                          ToRegister(instr->temp()),
   4173                          kDontSaveFPRegs);
   4174   } else {
   4175     DCHECK(ToRegister(instr->context()).is(esi));
   4176     DCHECK(object_reg.is(eax));
   4177     PushSafepointRegistersScope scope(this);
   4178     __ mov(ebx, to_map);
   4179     bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
   4180     TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
   4181     __ CallStub(&stub);
   4182     RecordSafepointWithLazyDeopt(instr,
   4183         RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   4184   }
   4185   __ bind(&not_applicable);
   4186 }
   4187 
   4188 
   4189 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   4190   class DeferredStringCharCodeAt final : public LDeferredCode {
   4191    public:
   4192     DeferredStringCharCodeAt(LCodeGen* codegen,
   4193                              LStringCharCodeAt* instr)
   4194         : LDeferredCode(codegen), instr_(instr) { }
   4195     void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
   4196     LInstruction* instr() override { return instr_; }
   4197 
   4198    private:
   4199     LStringCharCodeAt* instr_;
   4200   };
   4201 
   4202   DeferredStringCharCodeAt* deferred =
   4203       new(zone()) DeferredStringCharCodeAt(this, instr);
   4204 
   4205   StringCharLoadGenerator::Generate(masm(),
   4206                                     factory(),
   4207                                     ToRegister(instr->string()),
   4208                                     ToRegister(instr->index()),
   4209                                     ToRegister(instr->result()),
   4210                                     deferred->entry());
   4211   __ bind(deferred->exit());
   4212 }
   4213 
   4214 
   4215 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
   4216   Register string = ToRegister(instr->string());
   4217   Register result = ToRegister(instr->result());
   4218 
   4219   // TODO(3095996): Get rid of this. For now, we need to make the
   4220   // result register contain a valid pointer because it is already
   4221   // contained in the register pointer map.
   4222   __ Move(result, Immediate(0));
   4223 
   4224   PushSafepointRegistersScope scope(this);
   4225   __ push(string);
   4226   // Push the index as a smi. This is safe because of the checks in
   4227   // DoStringCharCodeAt above.
   4228   STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
   4229   if (instr->index()->IsConstantOperand()) {
   4230     Immediate immediate = ToImmediate(LConstantOperand::cast(instr->index()),
   4231                                       Representation::Smi());
   4232     __ push(immediate);
   4233   } else {
   4234     Register index = ToRegister(instr->index());
   4235     __ SmiTag(index);
   4236     __ push(index);
   4237   }
   4238   CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2,
   4239                           instr, instr->context());
   4240   __ AssertSmi(eax);
   4241   __ SmiUntag(eax);
   4242   __ StoreToSafepointRegisterSlot(result, eax);
   4243 }
   4244 
   4245 
   4246 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
   4247   class DeferredStringCharFromCode final : public LDeferredCode {
   4248    public:
   4249     DeferredStringCharFromCode(LCodeGen* codegen,
   4250                                LStringCharFromCode* instr)
   4251         : LDeferredCode(codegen), instr_(instr) { }
   4252     void Generate() override {
   4253       codegen()->DoDeferredStringCharFromCode(instr_);
   4254     }
   4255     LInstruction* instr() override { return instr_; }
   4256 
   4257    private:
   4258     LStringCharFromCode* instr_;
   4259   };
   4260 
   4261   DeferredStringCharFromCode* deferred =
   4262       new(zone()) DeferredStringCharFromCode(this, instr);
   4263 
   4264   DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
   4265   Register char_code = ToRegister(instr->char_code());
   4266   Register result = ToRegister(instr->result());
   4267   DCHECK(!char_code.is(result));
   4268 
   4269   __ cmp(char_code, String::kMaxOneByteCharCode);
   4270   __ j(above, deferred->entry());
   4271   __ Move(result, Immediate(factory()->single_character_string_cache()));
   4272   __ mov(result, FieldOperand(result,
   4273                               char_code, times_pointer_size,
   4274                               FixedArray::kHeaderSize));
   4275   __ cmp(result, factory()->undefined_value());
   4276   __ j(equal, deferred->entry());
   4277   __ bind(deferred->exit());
   4278 }
   4279 
   4280 
   4281 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
   4282   Register char_code = ToRegister(instr->char_code());
   4283   Register result = ToRegister(instr->result());
   4284 
   4285   // TODO(3095996): Get rid of this. For now, we need to make the
   4286   // result register contain a valid pointer because it is already
   4287   // contained in the register pointer map.
   4288   __ Move(result, Immediate(0));
   4289 
   4290   PushSafepointRegistersScope scope(this);
   4291   __ SmiTag(char_code);
   4292   __ push(char_code);
   4293   CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
   4294                           instr->context());
   4295   __ StoreToSafepointRegisterSlot(result, eax);
   4296 }
   4297 
   4298 
   4299 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   4300   DCHECK(ToRegister(instr->context()).is(esi));
   4301   DCHECK(ToRegister(instr->left()).is(edx));
   4302   DCHECK(ToRegister(instr->right()).is(eax));
   4303   StringAddStub stub(isolate(),
   4304                      instr->hydrogen()->flags(),
   4305                      instr->hydrogen()->pretenure_flag());
   4306   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   4307 }
   4308 
   4309 
   4310 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   4311   LOperand* input = instr->value();
   4312   LOperand* output = instr->result();
   4313   DCHECK(input->IsRegister() || input->IsStackSlot());
   4314   DCHECK(output->IsDoubleRegister());
   4315   __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
   4316 }
   4317 
   4318 
   4319 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
   4320   LOperand* input = instr->value();
   4321   LOperand* output = instr->result();
   4322   __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
   4323 }
   4324 
   4325 
   4326 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
   4327   class DeferredNumberTagI final : public LDeferredCode {
   4328    public:
   4329     DeferredNumberTagI(LCodeGen* codegen,
   4330                        LNumberTagI* instr)
   4331         : LDeferredCode(codegen), instr_(instr) { }
   4332     void Generate() override {
   4333       codegen()->DoDeferredNumberTagIU(
   4334           instr_, instr_->value(), instr_->temp(), SIGNED_INT32);
   4335     }
   4336     LInstruction* instr() override { return instr_; }
   4337 
   4338    private:
   4339     LNumberTagI* instr_;
   4340   };
   4341 
   4342   LOperand* input = instr->value();
   4343   DCHECK(input->IsRegister() && input->Equals(instr->result()));
   4344   Register reg = ToRegister(input);
   4345 
   4346   DeferredNumberTagI* deferred =
   4347       new(zone()) DeferredNumberTagI(this, instr);
   4348   __ SmiTag(reg);
   4349   __ j(overflow, deferred->entry());
   4350   __ bind(deferred->exit());
   4351 }
   4352 
   4353 
   4354 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
   4355   class DeferredNumberTagU final : public LDeferredCode {
   4356    public:
   4357     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
   4358         : LDeferredCode(codegen), instr_(instr) { }
   4359     void Generate() override {
   4360       codegen()->DoDeferredNumberTagIU(
   4361           instr_, instr_->value(), instr_->temp(), UNSIGNED_INT32);
   4362     }
   4363     LInstruction* instr() override { return instr_; }
   4364 
   4365    private:
   4366     LNumberTagU* instr_;
   4367   };
   4368 
   4369   LOperand* input = instr->value();
   4370   DCHECK(input->IsRegister() && input->Equals(instr->result()));
   4371   Register reg = ToRegister(input);
   4372 
   4373   DeferredNumberTagU* deferred =
   4374       new(zone()) DeferredNumberTagU(this, instr);
   4375   __ cmp(reg, Immediate(Smi::kMaxValue));
   4376   __ j(above, deferred->entry());
   4377   __ SmiTag(reg);
   4378   __ bind(deferred->exit());
   4379 }
   4380 
   4381 
   4382 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
   4383                                      LOperand* value,
   4384                                      LOperand* temp,
   4385                                      IntegerSignedness signedness) {
   4386   Label done, slow;
   4387   Register reg = ToRegister(value);
   4388   Register tmp = ToRegister(temp);
   4389   XMMRegister xmm_scratch = double_scratch0();
   4390 
   4391   if (signedness == SIGNED_INT32) {
   4392     // There was overflow, so bits 30 and 31 of the original integer
   4393     // disagree. Try to allocate a heap number in new space and store
   4394     // the value in there. If that fails, call the runtime system.
   4395     __ SmiUntag(reg);
   4396     __ xor_(reg, 0x80000000);
   4397     __ Cvtsi2sd(xmm_scratch, Operand(reg));
   4398   } else {
   4399     __ LoadUint32(xmm_scratch, reg);
   4400   }
   4401 
   4402   if (FLAG_inline_new) {
   4403     __ AllocateHeapNumber(reg, tmp, no_reg, &slow);
   4404     __ jmp(&done, Label::kNear);
   4405   }
   4406 
   4407   // Slow case: Call the runtime system to do the number allocation.
   4408   __ bind(&slow);
   4409   {
   4410     // TODO(3095996): Put a valid pointer value in the stack slot where the
   4411     // result register is stored, as this register is in the pointer map, but
   4412     // contains an integer value.
   4413     __ Move(reg, Immediate(0));
   4414 
   4415     // Preserve the value of all registers.
   4416     PushSafepointRegistersScope scope(this);
   4417 
   4418     // NumberTagI and NumberTagD use the context from the frame, rather than
   4419     // the environment's HContext or HInlinedContext value.
   4420     // They only call Runtime::kAllocateHeapNumber.
   4421     // The corresponding HChange instructions are added in a phase that does
   4422     // not have easy access to the local context.
   4423     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   4424     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4425     RecordSafepointWithRegisters(
   4426         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4427     __ StoreToSafepointRegisterSlot(reg, eax);
   4428   }
   4429 
   4430   // Done. Put the value in xmm_scratch into the value of the allocated heap
   4431   // number.
   4432   __ bind(&done);
   4433   __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), xmm_scratch);
   4434 }
   4435 
   4436 
   4437 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
   4438   class DeferredNumberTagD final : public LDeferredCode {
   4439    public:
   4440     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
   4441         : LDeferredCode(codegen), instr_(instr) { }
   4442     void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
   4443     LInstruction* instr() override { return instr_; }
   4444 
   4445    private:
   4446     LNumberTagD* instr_;
   4447   };
   4448 
   4449   Register reg = ToRegister(instr->result());
   4450 
   4451   DeferredNumberTagD* deferred =
   4452       new(zone()) DeferredNumberTagD(this, instr);
   4453   if (FLAG_inline_new) {
   4454     Register tmp = ToRegister(instr->temp());
   4455     __ AllocateHeapNumber(reg, tmp, no_reg, deferred->entry());
   4456   } else {
   4457     __ jmp(deferred->entry());
   4458   }
   4459   __ bind(deferred->exit());
   4460   XMMRegister input_reg = ToDoubleRegister(instr->value());
   4461   __ movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
   4462 }
   4463 
   4464 
   4465 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
   4466   // TODO(3095996): Get rid of this. For now, we need to make the
   4467   // result register contain a valid pointer because it is already
   4468   // contained in the register pointer map.
   4469   Register reg = ToRegister(instr->result());
   4470   __ Move(reg, Immediate(0));
   4471 
   4472   PushSafepointRegistersScope scope(this);
   4473   // NumberTagI and NumberTagD use the context from the frame, rather than
   4474   // the environment's HContext or HInlinedContext value.
   4475   // They only call Runtime::kAllocateHeapNumber.
   4476   // The corresponding HChange instructions are added in a phase that does
   4477   // not have easy access to the local context.
   4478   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   4479   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4480   RecordSafepointWithRegisters(
   4481       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4482   __ StoreToSafepointRegisterSlot(reg, eax);
   4483 }
   4484 
   4485 
   4486 void LCodeGen::DoSmiTag(LSmiTag* instr) {
   4487   HChange* hchange = instr->hydrogen();
   4488   Register input = ToRegister(instr->value());
   4489   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4490       hchange->value()->CheckFlag(HValue::kUint32)) {
   4491     __ test(input, Immediate(0xc0000000));
   4492     DeoptimizeIf(not_zero, instr, Deoptimizer::kOverflow);
   4493   }
   4494   __ SmiTag(input);
   4495   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4496       !hchange->value()->CheckFlag(HValue::kUint32)) {
   4497     DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
   4498   }
   4499 }
   4500 
   4501 
   4502 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
   4503   LOperand* input = instr->value();
   4504   Register result = ToRegister(input);
   4505   DCHECK(input->IsRegister() && input->Equals(instr->result()));
   4506   if (instr->needs_check()) {
   4507     __ test(result, Immediate(kSmiTagMask));
   4508     DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
   4509   } else {
   4510     __ AssertSmi(result);
   4511   }
   4512   __ SmiUntag(result);
   4513 }
   4514 
   4515 
   4516 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
   4517                                 Register temp_reg, XMMRegister result_reg,
   4518                                 NumberUntagDMode mode) {
   4519   bool can_convert_undefined_to_nan =
   4520       instr->hydrogen()->can_convert_undefined_to_nan();
   4521   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
   4522 
   4523   Label convert, load_smi, done;
   4524 
   4525   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
   4526     // Smi check.
   4527     __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
   4528 
   4529     // Heap number map check.
   4530     __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
   4531            factory()->heap_number_map());
   4532     if (can_convert_undefined_to_nan) {
   4533       __ j(not_equal, &convert, Label::kNear);
   4534     } else {
   4535       DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
   4536     }
   4537 
   4538     // Heap number to XMM conversion.
   4539     __ movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
   4540 
   4541     if (deoptimize_on_minus_zero) {
   4542       XMMRegister xmm_scratch = double_scratch0();
   4543       __ xorps(xmm_scratch, xmm_scratch);
   4544       __ ucomisd(result_reg, xmm_scratch);
   4545       __ j(not_zero, &done, Label::kNear);
   4546       __ movmskpd(temp_reg, result_reg);
   4547       __ test_b(temp_reg, 1);
   4548       DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
   4549     }
   4550     __ jmp(&done, Label::kNear);
   4551 
   4552     if (can_convert_undefined_to_nan) {
   4553       __ bind(&convert);
   4554 
   4555       // Convert undefined to NaN.
   4556       __ cmp(input_reg, factory()->undefined_value());
   4557       DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
   4558 
   4559       __ pcmpeqd(result_reg, result_reg);
   4560       __ jmp(&done, Label::kNear);
   4561     }
   4562   } else {
   4563     DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
   4564   }
   4565 
   4566   __ bind(&load_smi);
   4567   // Smi to XMM conversion. Clobbering a temp is faster than re-tagging the
   4568   // input register since we avoid dependencies.
   4569   __ mov(temp_reg, input_reg);
   4570   __ SmiUntag(temp_reg);  // Untag smi before converting to float.
   4571   __ Cvtsi2sd(result_reg, Operand(temp_reg));
   4572   __ bind(&done);
   4573 }
   4574 
   4575 
   4576 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
   4577   Register input_reg = ToRegister(instr->value());
   4578 
   4579   // The input was optimistically untagged; revert it.
   4580   STATIC_ASSERT(kSmiTagSize == 1);
   4581   __ lea(input_reg, Operand(input_reg, times_2, kHeapObjectTag));
   4582 
   4583   if (instr->truncating()) {
   4584     Label no_heap_number, check_bools, check_false;
   4585 
   4586     // Heap number map check.
   4587     __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
   4588            factory()->heap_number_map());
   4589     __ j(not_equal, &no_heap_number, Label::kNear);
   4590     __ TruncateHeapNumberToI(input_reg, input_reg);
   4591     __ jmp(done);
   4592 
   4593     __ bind(&no_heap_number);
   4594     // Check for Oddballs. Undefined/False is converted to zero and True to one
   4595     // for truncating conversions.
   4596     __ cmp(input_reg, factory()->undefined_value());
   4597     __ j(not_equal, &check_bools, Label::kNear);
   4598     __ Move(input_reg, Immediate(0));
   4599     __ jmp(done);
   4600 
   4601     __ bind(&check_bools);
   4602     __ cmp(input_reg, factory()->true_value());
   4603     __ j(not_equal, &check_false, Label::kNear);
   4604     __ Move(input_reg, Immediate(1));
   4605     __ jmp(done);
   4606 
   4607     __ bind(&check_false);
   4608     __ cmp(input_reg, factory()->false_value());
   4609     DeoptimizeIf(not_equal, instr,
   4610                  Deoptimizer::kNotAHeapNumberUndefinedBoolean);
   4611     __ Move(input_reg, Immediate(0));
   4612   } else {
   4613     XMMRegister scratch = ToDoubleRegister(instr->temp());
   4614     DCHECK(!scratch.is(xmm0));
   4615     __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
   4616            isolate()->factory()->heap_number_map());
   4617     DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumber);
   4618     __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
   4619     __ cvttsd2si(input_reg, Operand(xmm0));
   4620     __ Cvtsi2sd(scratch, Operand(input_reg));
   4621     __ ucomisd(xmm0, scratch);
   4622     DeoptimizeIf(not_equal, instr, Deoptimizer::kLostPrecision);
   4623     DeoptimizeIf(parity_even, instr, Deoptimizer::kNaN);
   4624     if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
   4625       __ test(input_reg, Operand(input_reg));
   4626       __ j(not_zero, done);
   4627       __ movmskpd(input_reg, xmm0);
   4628       __ and_(input_reg, 1);
   4629       DeoptimizeIf(not_zero, instr, Deoptimizer::kMinusZero);
   4630     }
   4631   }
   4632 }
   4633 
   4634 
   4635 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
   4636   class DeferredTaggedToI final : public LDeferredCode {
   4637    public:
   4638     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
   4639         : LDeferredCode(codegen), instr_(instr) { }
   4640     void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
   4641     LInstruction* instr() override { return instr_; }
   4642 
   4643    private:
   4644     LTaggedToI* instr_;
   4645   };
   4646 
   4647   LOperand* input = instr->value();
   4648   DCHECK(input->IsRegister());
   4649   Register input_reg = ToRegister(input);
   4650   DCHECK(input_reg.is(ToRegister(instr->result())));
   4651 
   4652   if (instr->hydrogen()->value()->representation().IsSmi()) {
   4653     __ SmiUntag(input_reg);
   4654   } else {
   4655     DeferredTaggedToI* deferred =
   4656         new(zone()) DeferredTaggedToI(this, instr);
   4657     // Optimistically untag the input.
   4658     // If the input is a HeapObject, SmiUntag will set the carry flag.
   4659     STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
   4660     __ SmiUntag(input_reg);
   4661     // Branch to deferred code if the input was tagged.
   4662     // The deferred code will take care of restoring the tag.
   4663     __ j(carry, deferred->entry());
   4664     __ bind(deferred->exit());
   4665   }
   4666 }
   4667 
   4668 
   4669 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   4670   LOperand* input = instr->value();
   4671   DCHECK(input->IsRegister());
   4672   LOperand* temp = instr->temp();
   4673   DCHECK(temp->IsRegister());
   4674   LOperand* result = instr->result();
   4675   DCHECK(result->IsDoubleRegister());
   4676 
   4677   Register input_reg = ToRegister(input);
   4678   Register temp_reg = ToRegister(temp);
   4679 
   4680   HValue* value = instr->hydrogen()->value();
   4681   NumberUntagDMode mode = value->representation().IsSmi()
   4682       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
   4683 
   4684   XMMRegister result_reg = ToDoubleRegister(result);
   4685   EmitNumberUntagD(instr, input_reg, temp_reg, result_reg, mode);
   4686 }
   4687 
   4688 
   4689 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
   4690   LOperand* input = instr->value();
   4691   DCHECK(input->IsDoubleRegister());
   4692   LOperand* result = instr->result();
   4693   DCHECK(result->IsRegister());
   4694   Register result_reg = ToRegister(result);
   4695 
   4696   if (instr->truncating()) {
   4697     XMMRegister input_reg = ToDoubleRegister(input);
   4698     __ TruncateDoubleToI(result_reg, input_reg);
   4699   } else {
   4700     Label lost_precision, is_nan, minus_zero, done;
   4701     XMMRegister input_reg = ToDoubleRegister(input);
   4702     XMMRegister xmm_scratch = double_scratch0();
   4703     Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
   4704     __ DoubleToI(result_reg, input_reg, xmm_scratch,
   4705                  instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
   4706                  &is_nan, &minus_zero, dist);
   4707     __ jmp(&done, dist);
   4708     __ bind(&lost_precision);
   4709     DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
   4710     __ bind(&is_nan);
   4711     DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
   4712     __ bind(&minus_zero);
   4713     DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
   4714     __ bind(&done);
   4715   }
   4716 }
   4717 
   4718 
   4719 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
   4720   LOperand* input = instr->value();
   4721   DCHECK(input->IsDoubleRegister());
   4722   LOperand* result = instr->result();
   4723   DCHECK(result->IsRegister());
   4724   Register result_reg = ToRegister(result);
   4725 
   4726   Label lost_precision, is_nan, minus_zero, done;
   4727   XMMRegister input_reg = ToDoubleRegister(input);
   4728   XMMRegister xmm_scratch = double_scratch0();
   4729   Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
   4730   __ DoubleToI(result_reg, input_reg, xmm_scratch,
   4731                instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
   4732                &minus_zero, dist);
   4733   __ jmp(&done, dist);
   4734   __ bind(&lost_precision);
   4735   DeoptimizeIf(no_condition, instr, Deoptimizer::kLostPrecision);
   4736   __ bind(&is_nan);
   4737   DeoptimizeIf(no_condition, instr, Deoptimizer::kNaN);
   4738   __ bind(&minus_zero);
   4739   DeoptimizeIf(no_condition, instr, Deoptimizer::kMinusZero);
   4740   __ bind(&done);
   4741   __ SmiTag(result_reg);
   4742   DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow);
   4743 }
   4744 
   4745 
   4746 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   4747   LOperand* input = instr->value();
   4748   __ test(ToOperand(input), Immediate(kSmiTagMask));
   4749   DeoptimizeIf(not_zero, instr, Deoptimizer::kNotASmi);
   4750 }
   4751 
   4752 
   4753 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
   4754   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   4755     LOperand* input = instr->value();
   4756     __ test(ToOperand(input), Immediate(kSmiTagMask));
   4757     DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
   4758   }
   4759 }
   4760 
   4761 
   4762 void LCodeGen::DoCheckArrayBufferNotNeutered(
   4763     LCheckArrayBufferNotNeutered* instr) {
   4764   Register view = ToRegister(instr->view());
   4765   Register scratch = ToRegister(instr->scratch());
   4766 
   4767   __ mov(scratch, FieldOperand(view, JSArrayBufferView::kBufferOffset));
   4768   __ test_b(FieldOperand(scratch, JSArrayBuffer::kBitFieldOffset),
   4769             1 << JSArrayBuffer::WasNeutered::kShift);
   4770   DeoptimizeIf(not_zero, instr, Deoptimizer::kOutOfBounds);
   4771 }
   4772 
   4773 
   4774 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   4775   Register input = ToRegister(instr->value());
   4776   Register temp = ToRegister(instr->temp());
   4777 
   4778   __ mov(temp, FieldOperand(input, HeapObject::kMapOffset));
   4779 
   4780   if (instr->hydrogen()->is_interval_check()) {
   4781     InstanceType first;
   4782     InstanceType last;
   4783     instr->hydrogen()->GetCheckInterval(&first, &last);
   4784 
   4785     __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
   4786             static_cast<int8_t>(first));
   4787 
   4788     // If there is only one type in the interval check for equality.
   4789     if (first == last) {
   4790       DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
   4791     } else {
   4792       DeoptimizeIf(below, instr, Deoptimizer::kWrongInstanceType);
   4793       // Omit check for the last type.
   4794       if (last != LAST_TYPE) {
   4795         __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
   4796                 static_cast<int8_t>(last));
   4797         DeoptimizeIf(above, instr, Deoptimizer::kWrongInstanceType);
   4798       }
   4799     }
   4800   } else {
   4801     uint8_t mask;
   4802     uint8_t tag;
   4803     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
   4804 
   4805     if (base::bits::IsPowerOfTwo32(mask)) {
   4806       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
   4807       __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
   4808       DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
   4809                    Deoptimizer::kWrongInstanceType);
   4810     } else {
   4811       __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
   4812       __ and_(temp, mask);
   4813       __ cmp(temp, tag);
   4814       DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongInstanceType);
   4815     }
   4816   }
   4817 }
   4818 
   4819 
   4820 void LCodeGen::DoCheckValue(LCheckValue* instr) {
   4821   Handle<HeapObject> object = instr->hydrogen()->object().handle();
   4822   if (instr->hydrogen()->object_in_new_space()) {
   4823     Register reg = ToRegister(instr->value());
   4824     Handle<Cell> cell = isolate()->factory()->NewCell(object);
   4825     __ cmp(reg, Operand::ForCell(cell));
   4826   } else {
   4827     Operand operand = ToOperand(instr->value());
   4828     __ cmp(operand, object);
   4829   }
   4830   DeoptimizeIf(not_equal, instr, Deoptimizer::kValueMismatch);
   4831 }
   4832 
   4833 
   4834 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   4835   {
   4836     PushSafepointRegistersScope scope(this);
   4837     __ push(object);
   4838     __ xor_(esi, esi);
   4839     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
   4840     RecordSafepointWithRegisters(
   4841         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
   4842 
   4843     __ test(eax, Immediate(kSmiTagMask));
   4844   }
   4845   DeoptimizeIf(zero, instr, Deoptimizer::kInstanceMigrationFailed);
   4846 }
   4847 
   4848 
   4849 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
   4850   class DeferredCheckMaps final : public LDeferredCode {
   4851    public:
   4852     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr,  Register object)
   4853         : LDeferredCode(codegen), instr_(instr), object_(object) {
   4854       SetExit(check_maps());
   4855     }
   4856     void Generate() override {
   4857       codegen()->DoDeferredInstanceMigration(instr_, object_);
   4858     }
   4859     Label* check_maps() { return &check_maps_; }
   4860     LInstruction* instr() override { return instr_; }
   4861 
   4862    private:
   4863     LCheckMaps* instr_;
   4864     Label check_maps_;
   4865     Register object_;
   4866   };
   4867 
   4868   if (instr->hydrogen()->IsStabilityCheck()) {
   4869     const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   4870     for (int i = 0; i < maps->size(); ++i) {
   4871       AddStabilityDependency(maps->at(i).handle());
   4872     }
   4873     return;
   4874   }
   4875 
   4876   LOperand* input = instr->value();
   4877   DCHECK(input->IsRegister());
   4878   Register reg = ToRegister(input);
   4879 
   4880   DeferredCheckMaps* deferred = NULL;
   4881   if (instr->hydrogen()->HasMigrationTarget()) {
   4882     deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
   4883     __ bind(deferred->check_maps());
   4884   }
   4885 
   4886   const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   4887   Label success;
   4888   for (int i = 0; i < maps->size() - 1; i++) {
   4889     Handle<Map> map = maps->at(i).handle();
   4890     __ CompareMap(reg, map);
   4891     __ j(equal, &success, Label::kNear);
   4892   }
   4893 
   4894   Handle<Map> map = maps->at(maps->size() - 1).handle();
   4895   __ CompareMap(reg, map);
   4896   if (instr->hydrogen()->HasMigrationTarget()) {
   4897     __ j(not_equal, deferred->entry());
   4898   } else {
   4899     DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
   4900   }
   4901 
   4902   __ bind(&success);
   4903 }
   4904 
   4905 
   4906 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
   4907   XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
   4908   XMMRegister xmm_scratch = double_scratch0();
   4909   Register result_reg = ToRegister(instr->result());
   4910   __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
   4911 }
   4912 
   4913 
   4914 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
   4915   DCHECK(instr->unclamped()->Equals(instr->result()));
   4916   Register value_reg = ToRegister(instr->result());
   4917   __ ClampUint8(value_reg);
   4918 }
   4919 
   4920 
   4921 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
   4922   DCHECK(instr->unclamped()->Equals(instr->result()));
   4923   Register input_reg = ToRegister(instr->unclamped());
   4924   XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
   4925   XMMRegister xmm_scratch = double_scratch0();
   4926   Label is_smi, done, heap_number;
   4927 
   4928   __ JumpIfSmi(input_reg, &is_smi);
   4929 
   4930   // Check for heap number
   4931   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
   4932          factory()->heap_number_map());
   4933   __ j(equal, &heap_number, Label::kNear);
   4934 
   4935   // Check for undefined. Undefined is converted to zero for clamping
   4936   // conversions.
   4937   __ cmp(input_reg, factory()->undefined_value());
   4938   DeoptimizeIf(not_equal, instr, Deoptimizer::kNotAHeapNumberUndefined);
   4939   __ mov(input_reg, 0);
   4940   __ jmp(&done, Label::kNear);
   4941 
   4942   // Heap number
   4943   __ bind(&heap_number);
   4944   __ movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
   4945   __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
   4946   __ jmp(&done, Label::kNear);
   4947 
   4948   // smi
   4949   __ bind(&is_smi);
   4950   __ SmiUntag(input_reg);
   4951   __ ClampUint8(input_reg);
   4952   __ bind(&done);
   4953 }
   4954 
   4955 
   4956 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
   4957   XMMRegister value_reg = ToDoubleRegister(instr->value());
   4958   Register result_reg = ToRegister(instr->result());
   4959   if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
   4960     if (CpuFeatures::IsSupported(SSE4_1)) {
   4961       CpuFeatureScope scope2(masm(), SSE4_1);
   4962       __ pextrd(result_reg, value_reg, 1);
   4963     } else {
   4964       XMMRegister xmm_scratch = double_scratch0();
   4965       __ pshufd(xmm_scratch, value_reg, 1);
   4966       __ movd(result_reg, xmm_scratch);
   4967     }
   4968   } else {
   4969     __ movd(result_reg, value_reg);
   4970   }
   4971 }
   4972 
   4973 
   4974 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
   4975   Register hi_reg = ToRegister(instr->hi());
   4976   Register lo_reg = ToRegister(instr->lo());
   4977   XMMRegister result_reg = ToDoubleRegister(instr->result());
   4978 
   4979   if (CpuFeatures::IsSupported(SSE4_1)) {
   4980     CpuFeatureScope scope2(masm(), SSE4_1);
   4981     __ movd(result_reg, lo_reg);
   4982     __ pinsrd(result_reg, hi_reg, 1);
   4983   } else {
   4984     XMMRegister xmm_scratch = double_scratch0();
   4985     __ movd(result_reg, hi_reg);
   4986     __ psllq(result_reg, 32);
   4987     __ movd(xmm_scratch, lo_reg);
   4988     __ orps(result_reg, xmm_scratch);
   4989   }
   4990 }
   4991 
   4992 
   4993 void LCodeGen::DoAllocate(LAllocate* instr) {
   4994   class DeferredAllocate final : public LDeferredCode {
   4995    public:
   4996     DeferredAllocate(LCodeGen* codegen,  LAllocate* instr)
   4997         : LDeferredCode(codegen), instr_(instr) { }
   4998     void Generate() override { codegen()->DoDeferredAllocate(instr_); }
   4999     LInstruction* instr() override { return instr_; }
   5000 
   5001    private:
   5002     LAllocate* instr_;
   5003   };
   5004 
   5005   DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
   5006 
   5007   Register result = ToRegister(instr->result());
   5008   Register temp = ToRegister(instr->temp());
   5009 
   5010   // Allocate memory for the object.
   5011   AllocationFlags flags = TAG_OBJECT;
   5012   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   5013     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   5014   }
   5015   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5016     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5017     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   5018   }
   5019 
   5020   if (instr->size()->IsConstantOperand()) {
   5021     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5022     CHECK(size <= Page::kMaxRegularHeapObjectSize);
   5023     __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
   5024   } else {
   5025     Register size = ToRegister(instr->size());
   5026     __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
   5027   }
   5028 
   5029   __ bind(deferred->exit());
   5030 
   5031   if (instr->hydrogen()->MustPrefillWithFiller()) {
   5032     if (instr->size()->IsConstantOperand()) {
   5033       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5034       __ mov(temp, (size / kPointerSize) - 1);
   5035     } else {
   5036       temp = ToRegister(instr->size());
   5037       __ shr(temp, kPointerSizeLog2);
   5038       __ dec(temp);
   5039     }
   5040     Label loop;
   5041     __ bind(&loop);
   5042     __ mov(FieldOperand(result, temp, times_pointer_size, 0),
   5043         isolate()->factory()->one_pointer_filler_map());
   5044     __ dec(temp);
   5045     __ j(not_zero, &loop);
   5046   }
   5047 }
   5048 
   5049 
   5050 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
   5051   Register result = ToRegister(instr->result());
   5052 
   5053   // TODO(3095996): Get rid of this. For now, we need to make the
   5054   // result register contain a valid pointer because it is already
   5055   // contained in the register pointer map.
   5056   __ Move(result, Immediate(Smi::FromInt(0)));
   5057 
   5058   PushSafepointRegistersScope scope(this);
   5059   if (instr->size()->IsRegister()) {
   5060     Register size = ToRegister(instr->size());
   5061     DCHECK(!size.is(result));
   5062     __ SmiTag(ToRegister(instr->size()));
   5063     __ push(size);
   5064   } else {
   5065     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5066     if (size >= 0 && size <= Smi::kMaxValue) {
   5067       __ push(Immediate(Smi::FromInt(size)));
   5068     } else {
   5069       // We should never get here at runtime => abort
   5070       __ int3();
   5071       return;
   5072     }
   5073   }
   5074 
   5075   int flags = AllocateDoubleAlignFlag::encode(
   5076       instr->hydrogen()->MustAllocateDoubleAligned());
   5077   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5078     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5079     flags = AllocateTargetSpace::update(flags, OLD_SPACE);
   5080   } else {
   5081     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
   5082   }
   5083   __ push(Immediate(Smi::FromInt(flags)));
   5084 
   5085   CallRuntimeFromDeferred(
   5086       Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   5087   __ StoreToSafepointRegisterSlot(result, eax);
   5088 }
   5089 
   5090 
   5091 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
   5092   DCHECK(ToRegister(instr->value()).is(eax));
   5093   __ push(eax);
   5094   CallRuntime(Runtime::kToFastProperties, 1, instr);
   5095 }
   5096 
   5097 
   5098 void LCodeGen::DoTypeof(LTypeof* instr) {
   5099   DCHECK(ToRegister(instr->context()).is(esi));
   5100   DCHECK(ToRegister(instr->value()).is(ebx));
   5101   Label end, do_call;
   5102   Register value_register = ToRegister(instr->value());
   5103   __ JumpIfNotSmi(value_register, &do_call);
   5104   __ mov(eax, Immediate(isolate()->factory()->number_string()));
   5105   __ jmp(&end);
   5106   __ bind(&do_call);
   5107   TypeofStub stub(isolate());
   5108   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   5109   __ bind(&end);
   5110 }
   5111 
   5112 
   5113 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   5114   Register input = ToRegister(instr->value());
   5115   Condition final_branch_condition = EmitTypeofIs(instr, input);
   5116   if (final_branch_condition != no_condition) {
   5117     EmitBranch(instr, final_branch_condition);
   5118   }
   5119 }
   5120 
   5121 
   5122 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
   5123   Label* true_label = instr->TrueLabel(chunk_);
   5124   Label* false_label = instr->FalseLabel(chunk_);
   5125   Handle<String> type_name = instr->type_literal();
   5126   int left_block = instr->TrueDestination(chunk_);
   5127   int right_block = instr->FalseDestination(chunk_);
   5128   int next_block = GetNextEmittedBlock();
   5129 
   5130   Label::Distance true_distance = left_block == next_block ? Label::kNear
   5131                                                            : Label::kFar;
   5132   Label::Distance false_distance = right_block == next_block ? Label::kNear
   5133                                                              : Label::kFar;
   5134   Condition final_branch_condition = no_condition;
   5135   if (String::Equals(type_name, factory()->number_string())) {
   5136     __ JumpIfSmi(input, true_label, true_distance);
   5137     __ cmp(FieldOperand(input, HeapObject::kMapOffset),
   5138            factory()->heap_number_map());
   5139     final_branch_condition = equal;
   5140 
   5141   } else if (String::Equals(type_name, factory()->string_string())) {
   5142     __ JumpIfSmi(input, false_label, false_distance);
   5143     __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
   5144     final_branch_condition = below;
   5145 
   5146   } else if (String::Equals(type_name, factory()->symbol_string())) {
   5147     __ JumpIfSmi(input, false_label, false_distance);
   5148     __ CmpObjectType(input, SYMBOL_TYPE, input);
   5149     final_branch_condition = equal;
   5150 
   5151   } else if (String::Equals(type_name, factory()->boolean_string())) {
   5152     __ cmp(input, factory()->true_value());
   5153     __ j(equal, true_label, true_distance);
   5154     __ cmp(input, factory()->false_value());
   5155     final_branch_condition = equal;
   5156 
   5157   } else if (String::Equals(type_name, factory()->undefined_string())) {
   5158     __ cmp(input, factory()->undefined_value());
   5159     __ j(equal, true_label, true_distance);
   5160     __ JumpIfSmi(input, false_label, false_distance);
   5161     // Check for undetectable objects => true.
   5162     __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
   5163     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
   5164               1 << Map::kIsUndetectable);
   5165     final_branch_condition = not_zero;
   5166 
   5167   } else if (String::Equals(type_name, factory()->function_string())) {
   5168     __ JumpIfSmi(input, false_label, false_distance);
   5169     // Check for callable and not undetectable objects => true.
   5170     __ mov(input, FieldOperand(input, HeapObject::kMapOffset));
   5171     __ movzx_b(input, FieldOperand(input, Map::kBitFieldOffset));
   5172     __ and_(input, (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
   5173     __ cmp(input, 1 << Map::kIsCallable);
   5174     final_branch_condition = equal;
   5175 
   5176   } else if (String::Equals(type_name, factory()->object_string())) {
   5177     __ JumpIfSmi(input, false_label, false_distance);
   5178     __ cmp(input, factory()->null_value());
   5179     __ j(equal, true_label, true_distance);
   5180     STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
   5181     __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
   5182     __ j(below, false_label, false_distance);
   5183     // Check for callable or undetectable objects => false.
   5184     __ test_b(FieldOperand(input, Map::kBitFieldOffset),
   5185               (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
   5186     final_branch_condition = zero;
   5187 
   5188 // clang-format off
   5189 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)         \
   5190   } else if (String::Equals(type_name, factory()->type##_string())) { \
   5191     __ JumpIfSmi(input, false_label, false_distance);                 \
   5192     __ cmp(FieldOperand(input, HeapObject::kMapOffset),               \
   5193            factory()->type##_map());                                  \
   5194     final_branch_condition = equal;
   5195   SIMD128_TYPES(SIMD128_TYPE)
   5196 #undef SIMD128_TYPE
   5197     // clang-format on
   5198 
   5199   } else {
   5200     __ jmp(false_label, false_distance);
   5201   }
   5202   return final_branch_condition;
   5203 }
   5204 
   5205 
   5206 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
   5207   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
   5208     // Ensure that we have enough space after the previous lazy-bailout
   5209     // instruction for patching the code here.
   5210     int current_pc = masm()->pc_offset();
   5211     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
   5212       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
   5213       __ Nop(padding_size);
   5214     }
   5215   }
   5216   last_lazy_deopt_pc_ = masm()->pc_offset();
   5217 }
   5218 
   5219 
   5220 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   5221   last_lazy_deopt_pc_ = masm()->pc_offset();
   5222   DCHECK(instr->HasEnvironment());
   5223   LEnvironment* env = instr->environment();
   5224   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5225   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5226 }
   5227 
   5228 
   5229 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
   5230   Deoptimizer::BailoutType type = instr->hydrogen()->type();
   5231   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
   5232   // needed return address), even though the implementation of LAZY and EAGER is
   5233   // now identical. When LAZY is eventually completely folded into EAGER, remove
   5234   // the special case below.
   5235   if (info()->IsStub() && type == Deoptimizer::EAGER) {
   5236     type = Deoptimizer::LAZY;
   5237   }
   5238   DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
   5239 }
   5240 
   5241 
   5242 void LCodeGen::DoDummy(LDummy* instr) {
   5243   // Nothing to see here, move on!
   5244 }
   5245 
   5246 
   5247 void LCodeGen::DoDummyUse(LDummyUse* instr) {
   5248   // Nothing to see here, move on!
   5249 }
   5250 
   5251 
   5252 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   5253   PushSafepointRegistersScope scope(this);
   5254   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
   5255   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   5256   RecordSafepointWithLazyDeopt(
   5257       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   5258   DCHECK(instr->HasEnvironment());
   5259   LEnvironment* env = instr->environment();
   5260   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5261 }
   5262 
   5263 
   5264 void LCodeGen::DoStackCheck(LStackCheck* instr) {
   5265   class DeferredStackCheck final : public LDeferredCode {
   5266    public:
   5267     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
   5268         : LDeferredCode(codegen), instr_(instr) { }
   5269     void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
   5270     LInstruction* instr() override { return instr_; }
   5271 
   5272    private:
   5273     LStackCheck* instr_;
   5274   };
   5275 
   5276   DCHECK(instr->HasEnvironment());
   5277   LEnvironment* env = instr->environment();
   5278   // There is no LLazyBailout instruction for stack-checks. We have to
   5279   // prepare for lazy deoptimization explicitly here.
   5280   if (instr->hydrogen()->is_function_entry()) {
   5281     // Perform stack overflow check.
   5282     Label done;
   5283     ExternalReference stack_limit =
   5284         ExternalReference::address_of_stack_limit(isolate());
   5285     __ cmp(esp, Operand::StaticVariable(stack_limit));
   5286     __ j(above_equal, &done, Label::kNear);
   5287 
   5288     DCHECK(instr->context()->IsRegister());
   5289     DCHECK(ToRegister(instr->context()).is(esi));
   5290     CallCode(isolate()->builtins()->StackCheck(),
   5291              RelocInfo::CODE_TARGET,
   5292              instr);
   5293     __ bind(&done);
   5294   } else {
   5295     DCHECK(instr->hydrogen()->is_backwards_branch());
   5296     // Perform stack overflow check if this goto needs it before jumping.
   5297     DeferredStackCheck* deferred_stack_check =
   5298         new(zone()) DeferredStackCheck(this, instr);
   5299     ExternalReference stack_limit =
   5300         ExternalReference::address_of_stack_limit(isolate());
   5301     __ cmp(esp, Operand::StaticVariable(stack_limit));
   5302     __ j(below, deferred_stack_check->entry());
   5303     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   5304     __ bind(instr->done_label());
   5305     deferred_stack_check->SetExit(instr->done_label());
   5306     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5307     // Don't record a deoptimization index for the safepoint here.
   5308     // This will be done explicitly when emitting call and the safepoint in
   5309     // the deferred code.
   5310   }
   5311 }
   5312 
   5313 
   5314 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
   5315   // This is a pseudo-instruction that ensures that the environment here is
   5316   // properly registered for deoptimization and records the assembler's PC
   5317   // offset.
   5318   LEnvironment* environment = instr->environment();
   5319 
   5320   // If the environment were already registered, we would have no way of
   5321   // backpatching it with the spill slot operands.
   5322   DCHECK(!environment->HasBeenRegistered());
   5323   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   5324 
   5325   GenerateOsrPrologue();
   5326 }
   5327 
   5328 
   5329 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   5330   DCHECK(ToRegister(instr->context()).is(esi));
   5331   __ test(eax, Immediate(kSmiTagMask));
   5332   DeoptimizeIf(zero, instr, Deoptimizer::kSmi);
   5333 
   5334   STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
   5335   __ CmpObjectType(eax, JS_PROXY_TYPE, ecx);
   5336   DeoptimizeIf(below_equal, instr, Deoptimizer::kWrongInstanceType);
   5337 
   5338   Label use_cache, call_runtime;
   5339   __ CheckEnumCache(&call_runtime);
   5340 
   5341   __ mov(eax, FieldOperand(eax, HeapObject::kMapOffset));
   5342   __ jmp(&use_cache, Label::kNear);
   5343 
   5344   // Get the set of properties to enumerate.
   5345   __ bind(&call_runtime);
   5346   __ push(eax);
   5347   CallRuntime(Runtime::kGetPropertyNamesFast, instr);
   5348 
   5349   __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
   5350          isolate()->factory()->meta_map());
   5351   DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
   5352   __ bind(&use_cache);
   5353 }
   5354 
   5355 
   5356 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
   5357   Register map = ToRegister(instr->map());
   5358   Register result = ToRegister(instr->result());
   5359   Label load_cache, done;
   5360   __ EnumLength(result, map);
   5361   __ cmp(result, Immediate(Smi::FromInt(0)));
   5362   __ j(not_equal, &load_cache, Label::kNear);
   5363   __ mov(result, isolate()->factory()->empty_fixed_array());
   5364   __ jmp(&done, Label::kNear);
   5365 
   5366   __ bind(&load_cache);
   5367   __ LoadInstanceDescriptors(map, result);
   5368   __ mov(result,
   5369          FieldOperand(result, DescriptorArray::kEnumCacheOffset));
   5370   __ mov(result,
   5371          FieldOperand(result, FixedArray::SizeFor(instr->idx())));
   5372   __ bind(&done);
   5373   __ test(result, result);
   5374   DeoptimizeIf(equal, instr, Deoptimizer::kNoCache);
   5375 }
   5376 
   5377 
   5378 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
   5379   Register object = ToRegister(instr->value());
   5380   __ cmp(ToRegister(instr->map()),
   5381          FieldOperand(object, HeapObject::kMapOffset));
   5382   DeoptimizeIf(not_equal, instr, Deoptimizer::kWrongMap);
   5383 }
   5384 
   5385 
   5386 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
   5387                                            Register object,
   5388                                            Register index) {
   5389   PushSafepointRegistersScope scope(this);
   5390   __ push(object);
   5391   __ push(index);
   5392   __ xor_(esi, esi);
   5393   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
   5394   RecordSafepointWithRegisters(
   5395       instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
   5396   __ StoreToSafepointRegisterSlot(object, eax);
   5397 }
   5398 
   5399 
   5400 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
   5401   class DeferredLoadMutableDouble final : public LDeferredCode {
   5402    public:
   5403     DeferredLoadMutableDouble(LCodeGen* codegen,
   5404                               LLoadFieldByIndex* instr,
   5405                               Register object,
   5406                               Register index)
   5407         : LDeferredCode(codegen),
   5408           instr_(instr),
   5409           object_(object),
   5410           index_(index) {
   5411     }
   5412     void Generate() override {
   5413       codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
   5414     }
   5415     LInstruction* instr() override { return instr_; }
   5416 
   5417    private:
   5418     LLoadFieldByIndex* instr_;
   5419     Register object_;
   5420     Register index_;
   5421   };
   5422 
   5423   Register object = ToRegister(instr->object());
   5424   Register index = ToRegister(instr->index());
   5425 
   5426   DeferredLoadMutableDouble* deferred;
   5427   deferred = new(zone()) DeferredLoadMutableDouble(
   5428       this, instr, object, index);
   5429 
   5430   Label out_of_object, done;
   5431   __ test(index, Immediate(Smi::FromInt(1)));
   5432   __ j(not_zero, deferred->entry());
   5433 
   5434   __ sar(index, 1);
   5435 
   5436   __ cmp(index, Immediate(0));
   5437   __ j(less, &out_of_object, Label::kNear);
   5438   __ mov(object, FieldOperand(object,
   5439                               index,
   5440                               times_half_pointer_size,
   5441                               JSObject::kHeaderSize));
   5442   __ jmp(&done, Label::kNear);
   5443 
   5444   __ bind(&out_of_object);
   5445   __ mov(object, FieldOperand(object, JSObject::kPropertiesOffset));
   5446   __ neg(index);
   5447   // Index is now equal to out of object property index plus 1.
   5448   __ mov(object, FieldOperand(object,
   5449                               index,
   5450                               times_half_pointer_size,
   5451                               FixedArray::kHeaderSize - kPointerSize));
   5452   __ bind(deferred->exit());
   5453   __ bind(&done);
   5454 }
   5455 
   5456 
   5457 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
   5458   Register context = ToRegister(instr->context());
   5459   __ mov(Operand(ebp, StandardFrameConstants::kContextOffset), context);
   5460 }
   5461 
   5462 
   5463 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
   5464   Handle<ScopeInfo> scope_info = instr->scope_info();
   5465   __ Push(scope_info);
   5466   __ push(ToRegister(instr->function()));
   5467   CallRuntime(Runtime::kPushBlockContext, instr);
   5468   RecordSafepoint(Safepoint::kNoLazyDeopt);
   5469 }
   5470 
   5471 
   5472 #undef __
   5473 
   5474 }  // namespace internal
   5475 }  // namespace v8
   5476 
   5477 #endif  // V8_TARGET_ARCH_IA32
   5478