Home | History | Annotate | Download | only in x64
      1 // Copyright 2013 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #if V8_TARGET_ARCH_X64
      6 
      7 #include "src/crankshaft/x64/lithium-codegen-x64.h"
      8 
      9 #include "src/base/bits.h"
     10 #include "src/code-factory.h"
     11 #include "src/code-stubs.h"
     12 #include "src/crankshaft/hydrogen-osr.h"
     13 #include "src/ic/ic.h"
     14 #include "src/ic/stub-cache.h"
     15 
     16 namespace v8 {
     17 namespace internal {
     18 
     19 
     20 // When invoking builtins, we need to record the safepoint in the middle of
     21 // the invoke instruction sequence generated by the macro assembler.
     22 class SafepointGenerator final : public CallWrapper {
     23  public:
     24   SafepointGenerator(LCodeGen* codegen,
     25                      LPointerMap* pointers,
     26                      Safepoint::DeoptMode mode)
     27       : codegen_(codegen),
     28         pointers_(pointers),
     29         deopt_mode_(mode) { }
     30   virtual ~SafepointGenerator() {}
     31 
     32   void BeforeCall(int call_size) const override {}
     33 
     34   void AfterCall() const override {
     35     codegen_->RecordSafepoint(pointers_, deopt_mode_);
     36   }
     37 
     38  private:
     39   LCodeGen* codegen_;
     40   LPointerMap* pointers_;
     41   Safepoint::DeoptMode deopt_mode_;
     42 };
     43 
     44 
     45 #define __ masm()->
     46 
     47 bool LCodeGen::GenerateCode() {
     48   LPhase phase("Z_Code generation", chunk());
     49   DCHECK(is_unused());
     50   status_ = GENERATING;
     51 
     52   // Open a frame scope to indicate that there is a frame on the stack.  The
     53   // MANUAL indicates that the scope shouldn't actually generate code to set up
     54   // the frame (that is done in GeneratePrologue).
     55   FrameScope frame_scope(masm_, StackFrame::MANUAL);
     56 
     57   return GeneratePrologue() &&
     58       GenerateBody() &&
     59       GenerateDeferredCode() &&
     60       GenerateJumpTable() &&
     61       GenerateSafepointTable();
     62 }
     63 
     64 
     65 void LCodeGen::FinishCode(Handle<Code> code) {
     66   DCHECK(is_done());
     67   code->set_stack_slots(GetTotalFrameSlotCount());
     68   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
     69   PopulateDeoptimizationData(code);
     70 }
     71 
     72 
     73 #ifdef _MSC_VER
     74 void LCodeGen::MakeSureStackPagesMapped(int offset) {
     75   const int kPageSize = 4 * KB;
     76   for (offset -= kPageSize; offset > 0; offset -= kPageSize) {
     77     __ movp(Operand(rsp, offset), rax);
     78   }
     79 }
     80 #endif
     81 
     82 
     83 void LCodeGen::SaveCallerDoubles() {
     84   DCHECK(info()->saves_caller_doubles());
     85   DCHECK(NeedsEagerFrame());
     86   Comment(";;; Save clobbered callee double registers");
     87   int count = 0;
     88   BitVector* doubles = chunk()->allocated_double_registers();
     89   BitVector::Iterator save_iterator(doubles);
     90   while (!save_iterator.Done()) {
     91     __ Movsd(MemOperand(rsp, count * kDoubleSize),
     92              XMMRegister::from_code(save_iterator.Current()));
     93     save_iterator.Advance();
     94     count++;
     95   }
     96 }
     97 
     98 
     99 void LCodeGen::RestoreCallerDoubles() {
    100   DCHECK(info()->saves_caller_doubles());
    101   DCHECK(NeedsEagerFrame());
    102   Comment(";;; Restore clobbered callee double registers");
    103   BitVector* doubles = chunk()->allocated_double_registers();
    104   BitVector::Iterator save_iterator(doubles);
    105   int count = 0;
    106   while (!save_iterator.Done()) {
    107     __ Movsd(XMMRegister::from_code(save_iterator.Current()),
    108              MemOperand(rsp, count * kDoubleSize));
    109     save_iterator.Advance();
    110     count++;
    111   }
    112 }
    113 
    114 
    115 bool LCodeGen::GeneratePrologue() {
    116   DCHECK(is_generating());
    117 
    118   if (info()->IsOptimizing()) {
    119     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
    120   }
    121 
    122   info()->set_prologue_offset(masm_->pc_offset());
    123   if (NeedsEagerFrame()) {
    124     DCHECK(!frame_is_built_);
    125     frame_is_built_ = true;
    126     if (info()->IsStub()) {
    127       __ StubPrologue(StackFrame::STUB);
    128     } else {
    129       __ Prologue(info()->GeneratePreagedPrologue());
    130     }
    131   }
    132 
    133   // Reserve space for the stack slots needed by the code.
    134   int slots = GetStackSlotCount();
    135   if (slots > 0) {
    136     if (FLAG_debug_code) {
    137       __ subp(rsp, Immediate(slots * kPointerSize));
    138 #ifdef _MSC_VER
    139       MakeSureStackPagesMapped(slots * kPointerSize);
    140 #endif
    141       __ Push(rax);
    142       __ Set(rax, slots);
    143       __ Set(kScratchRegister, kSlotsZapValue);
    144       Label loop;
    145       __ bind(&loop);
    146       __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
    147               kScratchRegister);
    148       __ decl(rax);
    149       __ j(not_zero, &loop);
    150       __ Pop(rax);
    151     } else {
    152       __ subp(rsp, Immediate(slots * kPointerSize));
    153 #ifdef _MSC_VER
    154       MakeSureStackPagesMapped(slots * kPointerSize);
    155 #endif
    156     }
    157 
    158     if (info()->saves_caller_doubles()) {
    159       SaveCallerDoubles();
    160     }
    161   }
    162   return !is_aborted();
    163 }
    164 
    165 
    166 void LCodeGen::DoPrologue(LPrologue* instr) {
    167   Comment(";;; Prologue begin");
    168 
    169   // Possibly allocate a local context.
    170   if (info_->scope()->NeedsContext()) {
    171     Comment(";;; Allocate local context");
    172     bool need_write_barrier = true;
    173     // Argument to NewContext is the function, which is still in rdi.
    174     int slots = info_->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
    175     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
    176     if (info()->scope()->is_script_scope()) {
    177       __ Push(rdi);
    178       __ Push(info()->scope()->scope_info());
    179       __ CallRuntime(Runtime::kNewScriptContext);
    180       deopt_mode = Safepoint::kLazyDeopt;
    181     } else {
    182       if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
    183         FastNewFunctionContextStub stub(isolate());
    184         __ Set(FastNewFunctionContextDescriptor::SlotsRegister(), slots);
    185         __ CallStub(&stub);
    186         // Result of FastNewFunctionContextStub is always in new space.
    187         need_write_barrier = false;
    188       } else {
    189         __ Push(rdi);
    190         __ CallRuntime(Runtime::kNewFunctionContext);
    191       }
    192     }
    193     RecordSafepoint(deopt_mode);
    194 
    195     // Context is returned in rax.  It replaces the context passed to us.
    196     // It's saved in the stack and kept live in rsi.
    197     __ movp(rsi, rax);
    198     __ movp(Operand(rbp, StandardFrameConstants::kContextOffset), rax);
    199 
    200     // Copy any necessary parameters into the context.
    201     int num_parameters = info()->scope()->num_parameters();
    202     int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
    203     for (int i = first_parameter; i < num_parameters; i++) {
    204       Variable* var = (i == -1) ? info()->scope()->receiver()
    205                                 : info()->scope()->parameter(i);
    206       if (var->IsContextSlot()) {
    207         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
    208             (num_parameters - 1 - i) * kPointerSize;
    209         // Load parameter from stack.
    210         __ movp(rax, Operand(rbp, parameter_offset));
    211         // Store it in the context.
    212         int context_offset = Context::SlotOffset(var->index());
    213         __ movp(Operand(rsi, context_offset), rax);
    214         // Update the write barrier. This clobbers rax and rbx.
    215         if (need_write_barrier) {
    216           __ RecordWriteContextSlot(rsi, context_offset, rax, rbx, kSaveFPRegs);
    217         } else if (FLAG_debug_code) {
    218           Label done;
    219           __ JumpIfInNewSpace(rsi, rax, &done, Label::kNear);
    220           __ Abort(kExpectedNewSpaceObject);
    221           __ bind(&done);
    222         }
    223       }
    224     }
    225     Comment(";;; End allocate local context");
    226   }
    227 
    228   Comment(";;; Prologue end");
    229 }
    230 
    231 
    232 void LCodeGen::GenerateOsrPrologue() {
    233   // Generate the OSR entry prologue at the first unknown OSR value, or if there
    234   // are none, at the OSR entrypoint instruction.
    235   if (osr_pc_offset_ >= 0) return;
    236 
    237   osr_pc_offset_ = masm()->pc_offset();
    238 
    239   // Adjust the frame size, subsuming the unoptimized frame into the
    240   // optimized frame.
    241   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
    242   DCHECK(slots >= 0);
    243   __ subp(rsp, Immediate(slots * kPointerSize));
    244 }
    245 
    246 
    247 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
    248   if (instr->IsCall()) {
    249     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
    250   }
    251   if (!instr->IsLazyBailout() && !instr->IsGap()) {
    252     safepoints_.BumpLastLazySafepointIndex();
    253   }
    254 }
    255 
    256 
    257 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
    258   if (FLAG_debug_code && FLAG_enable_slow_asserts && instr->HasResult() &&
    259       instr->hydrogen_value()->representation().IsInteger32() &&
    260       instr->result()->IsRegister()) {
    261     __ AssertZeroExtended(ToRegister(instr->result()));
    262   }
    263 
    264   if (instr->HasResult() && instr->MustSignExtendResult(chunk())) {
    265     // We sign extend the dehoisted key at the definition point when the pointer
    266     // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
    267     // points and MustSignExtendResult is always false. We can't use
    268     // STATIC_ASSERT here as the pointer size is 32-bit for x32.
    269     DCHECK(kPointerSize == kInt64Size);
    270     if (instr->result()->IsRegister()) {
    271       Register result_reg = ToRegister(instr->result());
    272       __ movsxlq(result_reg, result_reg);
    273     } else {
    274       // Sign extend the 32bit result in the stack slots.
    275       DCHECK(instr->result()->IsStackSlot());
    276       Operand src = ToOperand(instr->result());
    277       __ movsxlq(kScratchRegister, src);
    278       __ movq(src, kScratchRegister);
    279     }
    280   }
    281 }
    282 
    283 
    284 bool LCodeGen::GenerateJumpTable() {
    285   if (jump_table_.length() == 0) return !is_aborted();
    286 
    287   Label needs_frame;
    288   Comment(";;; -------------------- Jump table --------------------");
    289   for (int i = 0; i < jump_table_.length(); i++) {
    290     Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
    291     __ bind(&table_entry->label);
    292     Address entry = table_entry->address;
    293     DeoptComment(table_entry->deopt_info);
    294     if (table_entry->needs_frame) {
    295       DCHECK(!info()->saves_caller_doubles());
    296       __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
    297       __ call(&needs_frame);
    298     } else {
    299       if (info()->saves_caller_doubles()) {
    300         DCHECK(info()->IsStub());
    301         RestoreCallerDoubles();
    302       }
    303       __ call(entry, RelocInfo::RUNTIME_ENTRY);
    304     }
    305   }
    306 
    307   if (needs_frame.is_linked()) {
    308     __ bind(&needs_frame);
    309     /* stack layout
    310        3: return address  <-- rsp
    311        2: garbage
    312        1: garbage
    313        0: garbage
    314     */
    315     // Reserve space for stub marker.
    316     __ subp(rsp, Immediate(TypedFrameConstants::kFrameTypeSize));
    317     __ Push(MemOperand(
    318         rsp, TypedFrameConstants::kFrameTypeSize));  // Copy return address.
    319     __ Push(kScratchRegister);
    320 
    321     /* stack layout
    322        3: return address
    323        2: garbage
    324        1: return address
    325        0: entry address  <-- rsp
    326     */
    327 
    328     // Create a stack frame.
    329     __ movp(MemOperand(rsp, 3 * kPointerSize), rbp);
    330     __ leap(rbp, MemOperand(rsp, 3 * kPointerSize));
    331 
    332     // This variant of deopt can only be used with stubs. Since we don't
    333     // have a function pointer to install in the stack frame that we're
    334     // building, install a special marker there instead.
    335     DCHECK(info()->IsStub());
    336     __ Move(MemOperand(rsp, 2 * kPointerSize), Smi::FromInt(StackFrame::STUB));
    337 
    338     /* stack layout
    339        3: old rbp
    340        2: stub marker
    341        1: return address
    342        0: entry address  <-- rsp
    343     */
    344     __ ret(0);
    345   }
    346 
    347   return !is_aborted();
    348 }
    349 
    350 
    351 bool LCodeGen::GenerateDeferredCode() {
    352   DCHECK(is_generating());
    353   if (deferred_.length() > 0) {
    354     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
    355       LDeferredCode* code = deferred_[i];
    356 
    357       HValue* value =
    358           instructions_->at(code->instruction_index())->hydrogen_value();
    359       RecordAndWritePosition(value->position());
    360 
    361       Comment(";;; <@%d,#%d> "
    362               "-------------------- Deferred %s --------------------",
    363               code->instruction_index(),
    364               code->instr()->hydrogen_value()->id(),
    365               code->instr()->Mnemonic());
    366       __ bind(code->entry());
    367       if (NeedsDeferredFrame()) {
    368         Comment(";;; Build frame");
    369         DCHECK(!frame_is_built_);
    370         DCHECK(info()->IsStub());
    371         frame_is_built_ = true;
    372         // Build the frame in such a way that esi isn't trashed.
    373         __ pushq(rbp);  // Caller's frame pointer.
    374         __ Push(Smi::FromInt(StackFrame::STUB));
    375         __ leap(rbp, Operand(rsp, TypedFrameConstants::kFixedFrameSizeFromFp));
    376         Comment(";;; Deferred code");
    377       }
    378       code->Generate();
    379       if (NeedsDeferredFrame()) {
    380         __ bind(code->done());
    381         Comment(";;; Destroy frame");
    382         DCHECK(frame_is_built_);
    383         frame_is_built_ = false;
    384         __ movp(rsp, rbp);
    385         __ popq(rbp);
    386       }
    387       __ jmp(code->exit());
    388     }
    389   }
    390 
    391   // Deferred code is the last part of the instruction sequence. Mark
    392   // the generated code as done unless we bailed out.
    393   if (!is_aborted()) status_ = DONE;
    394   return !is_aborted();
    395 }
    396 
    397 
    398 bool LCodeGen::GenerateSafepointTable() {
    399   DCHECK(is_done());
    400   safepoints_.Emit(masm(), GetTotalFrameSlotCount());
    401   return !is_aborted();
    402 }
    403 
    404 
    405 Register LCodeGen::ToRegister(int index) const {
    406   return Register::from_code(index);
    407 }
    408 
    409 
    410 XMMRegister LCodeGen::ToDoubleRegister(int index) const {
    411   return XMMRegister::from_code(index);
    412 }
    413 
    414 
    415 Register LCodeGen::ToRegister(LOperand* op) const {
    416   DCHECK(op->IsRegister());
    417   return ToRegister(op->index());
    418 }
    419 
    420 
    421 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
    422   DCHECK(op->IsDoubleRegister());
    423   return ToDoubleRegister(op->index());
    424 }
    425 
    426 
    427 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
    428   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
    429 }
    430 
    431 
    432 bool LCodeGen::IsExternalConstant(LConstantOperand* op) const {
    433   return chunk_->LookupLiteralRepresentation(op).IsExternal();
    434 }
    435 
    436 
    437 bool LCodeGen::IsDehoistedKeyConstant(LConstantOperand* op) const {
    438   return op->IsConstantOperand() &&
    439       chunk_->IsDehoistedKey(chunk_->LookupConstant(op));
    440 }
    441 
    442 
    443 bool LCodeGen::IsSmiConstant(LConstantOperand* op) const {
    444   return chunk_->LookupLiteralRepresentation(op).IsSmi();
    445 }
    446 
    447 
    448 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
    449   return ToRepresentation(op, Representation::Integer32());
    450 }
    451 
    452 
    453 int32_t LCodeGen::ToRepresentation(LConstantOperand* op,
    454                                    const Representation& r) const {
    455   HConstant* constant = chunk_->LookupConstant(op);
    456   int32_t value = constant->Integer32Value();
    457   if (r.IsInteger32()) return value;
    458   DCHECK(SmiValuesAre31Bits() && r.IsSmiOrTagged());
    459   return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value)));
    460 }
    461 
    462 
    463 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
    464   HConstant* constant = chunk_->LookupConstant(op);
    465   return Smi::FromInt(constant->Integer32Value());
    466 }
    467 
    468 
    469 double LCodeGen::ToDouble(LConstantOperand* op) const {
    470   HConstant* constant = chunk_->LookupConstant(op);
    471   DCHECK(constant->HasDoubleValue());
    472   return constant->DoubleValue();
    473 }
    474 
    475 
    476 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
    477   HConstant* constant = chunk_->LookupConstant(op);
    478   DCHECK(constant->HasExternalReferenceValue());
    479   return constant->ExternalReferenceValue();
    480 }
    481 
    482 
    483 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
    484   HConstant* constant = chunk_->LookupConstant(op);
    485   DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
    486   return constant->handle(isolate());
    487 }
    488 
    489 
    490 static int ArgumentsOffsetWithoutFrame(int index) {
    491   DCHECK(index < 0);
    492   return -(index + 1) * kPointerSize + kPCOnStackSize;
    493 }
    494 
    495 
    496 Operand LCodeGen::ToOperand(LOperand* op) const {
    497   // Does not handle registers. In X64 assembler, plain registers are not
    498   // representable as an Operand.
    499   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
    500   if (NeedsEagerFrame()) {
    501     return Operand(rbp, FrameSlotToFPOffset(op->index()));
    502   } else {
    503     // Retrieve parameter without eager stack-frame relative to the
    504     // stack-pointer.
    505     return Operand(rsp, ArgumentsOffsetWithoutFrame(op->index()));
    506   }
    507 }
    508 
    509 
    510 void LCodeGen::WriteTranslation(LEnvironment* environment,
    511                                 Translation* translation) {
    512   if (environment == NULL) return;
    513 
    514   // The translation includes one command per value in the environment.
    515   int translation_size = environment->translation_size();
    516 
    517   WriteTranslation(environment->outer(), translation);
    518   WriteTranslationFrame(environment, translation);
    519 
    520   int object_index = 0;
    521   int dematerialized_index = 0;
    522   for (int i = 0; i < translation_size; ++i) {
    523     LOperand* value = environment->values()->at(i);
    524     AddToTranslation(
    525         environment, translation, value, environment->HasTaggedValueAt(i),
    526         environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
    527   }
    528 }
    529 
    530 
    531 void LCodeGen::AddToTranslation(LEnvironment* environment,
    532                                 Translation* translation,
    533                                 LOperand* op,
    534                                 bool is_tagged,
    535                                 bool is_uint32,
    536                                 int* object_index_pointer,
    537                                 int* dematerialized_index_pointer) {
    538   if (op == LEnvironment::materialization_marker()) {
    539     int object_index = (*object_index_pointer)++;
    540     if (environment->ObjectIsDuplicateAt(object_index)) {
    541       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
    542       translation->DuplicateObject(dupe_of);
    543       return;
    544     }
    545     int object_length = environment->ObjectLengthAt(object_index);
    546     if (environment->ObjectIsArgumentsAt(object_index)) {
    547       translation->BeginArgumentsObject(object_length);
    548     } else {
    549       translation->BeginCapturedObject(object_length);
    550     }
    551     int dematerialized_index = *dematerialized_index_pointer;
    552     int env_offset = environment->translation_size() + dematerialized_index;
    553     *dematerialized_index_pointer += object_length;
    554     for (int i = 0; i < object_length; ++i) {
    555       LOperand* value = environment->values()->at(env_offset + i);
    556       AddToTranslation(environment,
    557                        translation,
    558                        value,
    559                        environment->HasTaggedValueAt(env_offset + i),
    560                        environment->HasUint32ValueAt(env_offset + i),
    561                        object_index_pointer,
    562                        dematerialized_index_pointer);
    563     }
    564     return;
    565   }
    566 
    567   if (op->IsStackSlot()) {
    568     int index = op->index();
    569     if (is_tagged) {
    570       translation->StoreStackSlot(index);
    571     } else if (is_uint32) {
    572       translation->StoreUint32StackSlot(index);
    573     } else {
    574       translation->StoreInt32StackSlot(index);
    575     }
    576   } else if (op->IsDoubleStackSlot()) {
    577     int index = op->index();
    578     translation->StoreDoubleStackSlot(index);
    579   } else if (op->IsRegister()) {
    580     Register reg = ToRegister(op);
    581     if (is_tagged) {
    582       translation->StoreRegister(reg);
    583     } else if (is_uint32) {
    584       translation->StoreUint32Register(reg);
    585     } else {
    586       translation->StoreInt32Register(reg);
    587     }
    588   } else if (op->IsDoubleRegister()) {
    589     XMMRegister reg = ToDoubleRegister(op);
    590     translation->StoreDoubleRegister(reg);
    591   } else if (op->IsConstantOperand()) {
    592     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
    593     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
    594     translation->StoreLiteral(src_index);
    595   } else {
    596     UNREACHABLE();
    597   }
    598 }
    599 
    600 
    601 void LCodeGen::CallCodeGeneric(Handle<Code> code,
    602                                RelocInfo::Mode mode,
    603                                LInstruction* instr,
    604                                SafepointMode safepoint_mode,
    605                                int argc) {
    606   DCHECK(instr != NULL);
    607   __ call(code, mode);
    608   RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
    609 
    610   // Signal that we don't inline smi code before these stubs in the
    611   // optimizing code generator.
    612   if (code->kind() == Code::BINARY_OP_IC ||
    613       code->kind() == Code::COMPARE_IC) {
    614     __ nop();
    615   }
    616 }
    617 
    618 
    619 void LCodeGen::CallCode(Handle<Code> code,
    620                         RelocInfo::Mode mode,
    621                         LInstruction* instr) {
    622   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT, 0);
    623 }
    624 
    625 
    626 void LCodeGen::CallRuntime(const Runtime::Function* function,
    627                            int num_arguments,
    628                            LInstruction* instr,
    629                            SaveFPRegsMode save_doubles) {
    630   DCHECK(instr != NULL);
    631   DCHECK(instr->HasPointerMap());
    632 
    633   __ CallRuntime(function, num_arguments, save_doubles);
    634 
    635   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
    636 }
    637 
    638 
    639 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
    640   if (context->IsRegister()) {
    641     if (!ToRegister(context).is(rsi)) {
    642       __ movp(rsi, ToRegister(context));
    643     }
    644   } else if (context->IsStackSlot()) {
    645     __ movp(rsi, ToOperand(context));
    646   } else if (context->IsConstantOperand()) {
    647     HConstant* constant =
    648         chunk_->LookupConstant(LConstantOperand::cast(context));
    649     __ Move(rsi, Handle<Object>::cast(constant->handle(isolate())));
    650   } else {
    651     UNREACHABLE();
    652   }
    653 }
    654 
    655 
    656 
    657 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
    658                                        int argc,
    659                                        LInstruction* instr,
    660                                        LOperand* context) {
    661   LoadContextFromDeferred(context);
    662 
    663   __ CallRuntimeSaveDoubles(id);
    664   RecordSafepointWithRegisters(
    665       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
    666 }
    667 
    668 
    669 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
    670                                                     Safepoint::DeoptMode mode) {
    671   environment->set_has_been_used();
    672   if (!environment->HasBeenRegistered()) {
    673     // Physical stack frame layout:
    674     // -x ............. -4  0 ..................................... y
    675     // [incoming arguments] [spill slots] [pushed outgoing arguments]
    676 
    677     // Layout of the environment:
    678     // 0 ..................................................... size-1
    679     // [parameters] [locals] [expression stack including arguments]
    680 
    681     // Layout of the translation:
    682     // 0 ........................................................ size - 1 + 4
    683     // [expression stack including arguments] [locals] [4 words] [parameters]
    684     // |>------------  translation_size ------------<|
    685 
    686     int frame_count = 0;
    687     int jsframe_count = 0;
    688     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
    689       ++frame_count;
    690       if (e->frame_type() == JS_FUNCTION) {
    691         ++jsframe_count;
    692       }
    693     }
    694     Translation translation(&translations_, frame_count, jsframe_count, zone());
    695     WriteTranslation(environment, &translation);
    696     int deoptimization_index = deoptimizations_.length();
    697     int pc_offset = masm()->pc_offset();
    698     environment->Register(deoptimization_index,
    699                           translation.index(),
    700                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
    701     deoptimizations_.Add(environment, environment->zone());
    702   }
    703 }
    704 
    705 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
    706                             DeoptimizeReason deopt_reason,
    707                             Deoptimizer::BailoutType bailout_type) {
    708   LEnvironment* environment = instr->environment();
    709   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
    710   DCHECK(environment->HasBeenRegistered());
    711   int id = environment->deoptimization_index();
    712   Address entry =
    713       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
    714   if (entry == NULL) {
    715     Abort(kBailoutWasNotPrepared);
    716     return;
    717   }
    718 
    719   if (DeoptEveryNTimes()) {
    720     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
    721     Label no_deopt;
    722     __ pushfq();
    723     __ pushq(rax);
    724     Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
    725     __ movl(rax, count_operand);
    726     __ subl(rax, Immediate(1));
    727     __ j(not_zero, &no_deopt, Label::kNear);
    728     if (FLAG_trap_on_deopt) __ int3();
    729     __ movl(rax, Immediate(FLAG_deopt_every_n_times));
    730     __ movl(count_operand, rax);
    731     __ popq(rax);
    732     __ popfq();
    733     DCHECK(frame_is_built_);
    734     __ call(entry, RelocInfo::RUNTIME_ENTRY);
    735     __ bind(&no_deopt);
    736     __ movl(count_operand, rax);
    737     __ popq(rax);
    738     __ popfq();
    739   }
    740 
    741   if (info()->ShouldTrapOnDeopt()) {
    742     Label done;
    743     if (cc != no_condition) {
    744       __ j(NegateCondition(cc), &done, Label::kNear);
    745     }
    746     __ int3();
    747     __ bind(&done);
    748   }
    749 
    750   Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
    751 
    752   DCHECK(info()->IsStub() || frame_is_built_);
    753   // Go through jump table if we need to handle condition, build frame, or
    754   // restore caller doubles.
    755   if (cc == no_condition && frame_is_built_ &&
    756       !info()->saves_caller_doubles()) {
    757     DeoptComment(deopt_info);
    758     __ call(entry, RelocInfo::RUNTIME_ENTRY);
    759   } else {
    760     Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
    761                                             !frame_is_built_);
    762     // We often have several deopts to the same entry, reuse the last
    763     // jump entry if this is the case.
    764     if (FLAG_trace_deopt || isolate()->is_profiling() ||
    765         jump_table_.is_empty() ||
    766         !table_entry.IsEquivalentTo(jump_table_.last())) {
    767       jump_table_.Add(table_entry, zone());
    768     }
    769     if (cc == no_condition) {
    770       __ jmp(&jump_table_.last().label);
    771     } else {
    772       __ j(cc, &jump_table_.last().label);
    773     }
    774   }
    775 }
    776 
    777 void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
    778                             DeoptimizeReason deopt_reason) {
    779   Deoptimizer::BailoutType bailout_type = info()->IsStub()
    780       ? Deoptimizer::LAZY
    781       : Deoptimizer::EAGER;
    782   DeoptimizeIf(cc, instr, deopt_reason, bailout_type);
    783 }
    784 
    785 
    786 void LCodeGen::RecordSafepointWithLazyDeopt(
    787     LInstruction* instr, SafepointMode safepoint_mode, int argc) {
    788   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
    789     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
    790   } else {
    791     DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
    792     RecordSafepointWithRegisters(
    793         instr->pointer_map(), argc, Safepoint::kLazyDeopt);
    794   }
    795 }
    796 
    797 
    798 void LCodeGen::RecordSafepoint(
    799     LPointerMap* pointers,
    800     Safepoint::Kind kind,
    801     int arguments,
    802     Safepoint::DeoptMode deopt_mode) {
    803   DCHECK(kind == expected_safepoint_kind_);
    804 
    805   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
    806 
    807   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
    808       kind, arguments, deopt_mode);
    809   for (int i = 0; i < operands->length(); i++) {
    810     LOperand* pointer = operands->at(i);
    811     if (pointer->IsStackSlot()) {
    812       safepoint.DefinePointerSlot(pointer->index(), zone());
    813     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
    814       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
    815     }
    816   }
    817 }
    818 
    819 
    820 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
    821                                Safepoint::DeoptMode deopt_mode) {
    822   RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
    823 }
    824 
    825 
    826 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
    827   LPointerMap empty_pointers(zone());
    828   RecordSafepoint(&empty_pointers, deopt_mode);
    829 }
    830 
    831 
    832 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
    833                                             int arguments,
    834                                             Safepoint::DeoptMode deopt_mode) {
    835   RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
    836 }
    837 
    838 
    839 static const char* LabelType(LLabel* label) {
    840   if (label->is_loop_header()) return " (loop header)";
    841   if (label->is_osr_entry()) return " (OSR entry)";
    842   return "";
    843 }
    844 
    845 
    846 void LCodeGen::DoLabel(LLabel* label) {
    847   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
    848           current_instruction_,
    849           label->hydrogen_value()->id(),
    850           label->block_id(),
    851           LabelType(label));
    852   __ bind(label->label());
    853   current_block_ = label->block_id();
    854   DoGap(label);
    855 }
    856 
    857 
    858 void LCodeGen::DoParallelMove(LParallelMove* move) {
    859   resolver_.Resolve(move);
    860 }
    861 
    862 
    863 void LCodeGen::DoGap(LGap* gap) {
    864   for (int i = LGap::FIRST_INNER_POSITION;
    865        i <= LGap::LAST_INNER_POSITION;
    866        i++) {
    867     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
    868     LParallelMove* move = gap->GetParallelMove(inner_pos);
    869     if (move != NULL) DoParallelMove(move);
    870   }
    871 }
    872 
    873 
    874 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
    875   DoGap(instr);
    876 }
    877 
    878 
    879 void LCodeGen::DoParameter(LParameter* instr) {
    880   // Nothing to do.
    881 }
    882 
    883 
    884 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
    885   GenerateOsrPrologue();
    886 }
    887 
    888 
    889 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
    890   Register dividend = ToRegister(instr->dividend());
    891   int32_t divisor = instr->divisor();
    892   DCHECK(dividend.is(ToRegister(instr->result())));
    893 
    894   // Theoretically, a variation of the branch-free code for integer division by
    895   // a power of 2 (calculating the remainder via an additional multiplication
    896   // (which gets simplified to an 'and') and subtraction) should be faster, and
    897   // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
    898   // indicate that positive dividends are heavily favored, so the branching
    899   // version performs better.
    900   HMod* hmod = instr->hydrogen();
    901   int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
    902   Label dividend_is_not_negative, done;
    903   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
    904     __ testl(dividend, dividend);
    905     __ j(not_sign, &dividend_is_not_negative, Label::kNear);
    906     // Note that this is correct even for kMinInt operands.
    907     __ negl(dividend);
    908     __ andl(dividend, Immediate(mask));
    909     __ negl(dividend);
    910     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    911       DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
    912     }
    913     __ jmp(&done, Label::kNear);
    914   }
    915 
    916   __ bind(&dividend_is_not_negative);
    917   __ andl(dividend, Immediate(mask));
    918   __ bind(&done);
    919 }
    920 
    921 
    922 void LCodeGen::DoModByConstI(LModByConstI* instr) {
    923   Register dividend = ToRegister(instr->dividend());
    924   int32_t divisor = instr->divisor();
    925   DCHECK(ToRegister(instr->result()).is(rax));
    926 
    927   if (divisor == 0) {
    928     DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
    929     return;
    930   }
    931 
    932   __ TruncatingDiv(dividend, Abs(divisor));
    933   __ imull(rdx, rdx, Immediate(Abs(divisor)));
    934   __ movl(rax, dividend);
    935   __ subl(rax, rdx);
    936 
    937   // Check for negative zero.
    938   HMod* hmod = instr->hydrogen();
    939   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    940     Label remainder_not_zero;
    941     __ j(not_zero, &remainder_not_zero, Label::kNear);
    942     __ cmpl(dividend, Immediate(0));
    943     DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
    944     __ bind(&remainder_not_zero);
    945   }
    946 }
    947 
    948 
    949 void LCodeGen::DoModI(LModI* instr) {
    950   HMod* hmod = instr->hydrogen();
    951 
    952   Register left_reg = ToRegister(instr->left());
    953   DCHECK(left_reg.is(rax));
    954   Register right_reg = ToRegister(instr->right());
    955   DCHECK(!right_reg.is(rax));
    956   DCHECK(!right_reg.is(rdx));
    957   Register result_reg = ToRegister(instr->result());
    958   DCHECK(result_reg.is(rdx));
    959 
    960   Label done;
    961   // Check for x % 0, idiv would signal a divide error. We have to
    962   // deopt in this case because we can't return a NaN.
    963   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
    964     __ testl(right_reg, right_reg);
    965     DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
    966   }
    967 
    968   // Check for kMinInt % -1, idiv would signal a divide error. We
    969   // have to deopt if we care about -0, because we can't return that.
    970   if (hmod->CheckFlag(HValue::kCanOverflow)) {
    971     Label no_overflow_possible;
    972     __ cmpl(left_reg, Immediate(kMinInt));
    973     __ j(not_zero, &no_overflow_possible, Label::kNear);
    974     __ cmpl(right_reg, Immediate(-1));
    975     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    976       DeoptimizeIf(equal, instr, DeoptimizeReason::kMinusZero);
    977     } else {
    978       __ j(not_equal, &no_overflow_possible, Label::kNear);
    979       __ Set(result_reg, 0);
    980       __ jmp(&done, Label::kNear);
    981     }
    982     __ bind(&no_overflow_possible);
    983   }
    984 
    985   // Sign extend dividend in eax into edx:eax, since we are using only the low
    986   // 32 bits of the values.
    987   __ cdq();
    988 
    989   // If we care about -0, test if the dividend is <0 and the result is 0.
    990   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    991     Label positive_left;
    992     __ testl(left_reg, left_reg);
    993     __ j(not_sign, &positive_left, Label::kNear);
    994     __ idivl(right_reg);
    995     __ testl(result_reg, result_reg);
    996     DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
    997     __ jmp(&done, Label::kNear);
    998     __ bind(&positive_left);
    999   }
   1000   __ idivl(right_reg);
   1001   __ bind(&done);
   1002 }
   1003 
   1004 
   1005 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
   1006   Register dividend = ToRegister(instr->dividend());
   1007   int32_t divisor = instr->divisor();
   1008   DCHECK(dividend.is(ToRegister(instr->result())));
   1009 
   1010   // If the divisor is positive, things are easy: There can be no deopts and we
   1011   // can simply do an arithmetic right shift.
   1012   if (divisor == 1) return;
   1013   int32_t shift = WhichPowerOf2Abs(divisor);
   1014   if (divisor > 1) {
   1015     __ sarl(dividend, Immediate(shift));
   1016     return;
   1017   }
   1018 
   1019   // If the divisor is negative, we have to negate and handle edge cases.
   1020   __ negl(dividend);
   1021   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1022     DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
   1023   }
   1024 
   1025   // Dividing by -1 is basically negation, unless we overflow.
   1026   if (divisor == -1) {
   1027     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
   1028       DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   1029     }
   1030     return;
   1031   }
   1032 
   1033   // If the negation could not overflow, simply shifting is OK.
   1034   if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
   1035     __ sarl(dividend, Immediate(shift));
   1036     return;
   1037   }
   1038 
   1039   Label not_kmin_int, done;
   1040   __ j(no_overflow, &not_kmin_int, Label::kNear);
   1041   __ movl(dividend, Immediate(kMinInt / divisor));
   1042   __ jmp(&done, Label::kNear);
   1043   __ bind(&not_kmin_int);
   1044   __ sarl(dividend, Immediate(shift));
   1045   __ bind(&done);
   1046 }
   1047 
   1048 
   1049 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
   1050   Register dividend = ToRegister(instr->dividend());
   1051   int32_t divisor = instr->divisor();
   1052   DCHECK(ToRegister(instr->result()).is(rdx));
   1053 
   1054   if (divisor == 0) {
   1055     DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
   1056     return;
   1057   }
   1058 
   1059   // Check for (0 / -x) that will produce negative zero.
   1060   HMathFloorOfDiv* hdiv = instr->hydrogen();
   1061   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1062     __ testl(dividend, dividend);
   1063     DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
   1064   }
   1065 
   1066   // Easy case: We need no dynamic check for the dividend and the flooring
   1067   // division is the same as the truncating division.
   1068   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
   1069       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
   1070     __ TruncatingDiv(dividend, Abs(divisor));
   1071     if (divisor < 0) __ negl(rdx);
   1072     return;
   1073   }
   1074 
   1075   // In the general case we may need to adjust before and after the truncating
   1076   // division to get a flooring division.
   1077   Register temp = ToRegister(instr->temp3());
   1078   DCHECK(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
   1079   Label needs_adjustment, done;
   1080   __ cmpl(dividend, Immediate(0));
   1081   __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
   1082   __ TruncatingDiv(dividend, Abs(divisor));
   1083   if (divisor < 0) __ negl(rdx);
   1084   __ jmp(&done, Label::kNear);
   1085   __ bind(&needs_adjustment);
   1086   __ leal(temp, Operand(dividend, divisor > 0 ? 1 : -1));
   1087   __ TruncatingDiv(temp, Abs(divisor));
   1088   if (divisor < 0) __ negl(rdx);
   1089   __ decl(rdx);
   1090   __ bind(&done);
   1091 }
   1092 
   1093 
   1094 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
   1095 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
   1096   HBinaryOperation* hdiv = instr->hydrogen();
   1097   Register dividend = ToRegister(instr->dividend());
   1098   Register divisor = ToRegister(instr->divisor());
   1099   Register remainder = ToRegister(instr->temp());
   1100   Register result = ToRegister(instr->result());
   1101   DCHECK(dividend.is(rax));
   1102   DCHECK(remainder.is(rdx));
   1103   DCHECK(result.is(rax));
   1104   DCHECK(!divisor.is(rax));
   1105   DCHECK(!divisor.is(rdx));
   1106 
   1107   // Check for x / 0.
   1108   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1109     __ testl(divisor, divisor);
   1110     DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
   1111   }
   1112 
   1113   // Check for (0 / -x) that will produce negative zero.
   1114   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1115     Label dividend_not_zero;
   1116     __ testl(dividend, dividend);
   1117     __ j(not_zero, &dividend_not_zero, Label::kNear);
   1118     __ testl(divisor, divisor);
   1119     DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
   1120     __ bind(&dividend_not_zero);
   1121   }
   1122 
   1123   // Check for (kMinInt / -1).
   1124   if (hdiv->CheckFlag(HValue::kCanOverflow)) {
   1125     Label dividend_not_min_int;
   1126     __ cmpl(dividend, Immediate(kMinInt));
   1127     __ j(not_zero, &dividend_not_min_int, Label::kNear);
   1128     __ cmpl(divisor, Immediate(-1));
   1129     DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
   1130     __ bind(&dividend_not_min_int);
   1131   }
   1132 
   1133   // Sign extend to rdx (= remainder).
   1134   __ cdq();
   1135   __ idivl(divisor);
   1136 
   1137   Label done;
   1138   __ testl(remainder, remainder);
   1139   __ j(zero, &done, Label::kNear);
   1140   __ xorl(remainder, divisor);
   1141   __ sarl(remainder, Immediate(31));
   1142   __ addl(result, remainder);
   1143   __ bind(&done);
   1144 }
   1145 
   1146 
   1147 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
   1148   Register dividend = ToRegister(instr->dividend());
   1149   int32_t divisor = instr->divisor();
   1150   Register result = ToRegister(instr->result());
   1151   DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
   1152   DCHECK(!result.is(dividend));
   1153 
   1154   // Check for (0 / -x) that will produce negative zero.
   1155   HDiv* hdiv = instr->hydrogen();
   1156   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1157     __ testl(dividend, dividend);
   1158     DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
   1159   }
   1160   // Check for (kMinInt / -1).
   1161   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
   1162     __ cmpl(dividend, Immediate(kMinInt));
   1163     DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
   1164   }
   1165   // Deoptimize if remainder will not be 0.
   1166   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
   1167       divisor != 1 && divisor != -1) {
   1168     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
   1169     __ testl(dividend, Immediate(mask));
   1170     DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
   1171   }
   1172   __ Move(result, dividend);
   1173   int32_t shift = WhichPowerOf2Abs(divisor);
   1174   if (shift > 0) {
   1175     // The arithmetic shift is always OK, the 'if' is an optimization only.
   1176     if (shift > 1) __ sarl(result, Immediate(31));
   1177     __ shrl(result, Immediate(32 - shift));
   1178     __ addl(result, dividend);
   1179     __ sarl(result, Immediate(shift));
   1180   }
   1181   if (divisor < 0) __ negl(result);
   1182 }
   1183 
   1184 
   1185 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
   1186   Register dividend = ToRegister(instr->dividend());
   1187   int32_t divisor = instr->divisor();
   1188   DCHECK(ToRegister(instr->result()).is(rdx));
   1189 
   1190   if (divisor == 0) {
   1191     DeoptimizeIf(no_condition, instr, DeoptimizeReason::kDivisionByZero);
   1192     return;
   1193   }
   1194 
   1195   // Check for (0 / -x) that will produce negative zero.
   1196   HDiv* hdiv = instr->hydrogen();
   1197   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1198     __ testl(dividend, dividend);
   1199     DeoptimizeIf(zero, instr, DeoptimizeReason::kMinusZero);
   1200   }
   1201 
   1202   __ TruncatingDiv(dividend, Abs(divisor));
   1203   if (divisor < 0) __ negl(rdx);
   1204 
   1205   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
   1206     __ movl(rax, rdx);
   1207     __ imull(rax, rax, Immediate(divisor));
   1208     __ subl(rax, dividend);
   1209     DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
   1210   }
   1211 }
   1212 
   1213 
   1214 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
   1215 void LCodeGen::DoDivI(LDivI* instr) {
   1216   HBinaryOperation* hdiv = instr->hydrogen();
   1217   Register dividend = ToRegister(instr->dividend());
   1218   Register divisor = ToRegister(instr->divisor());
   1219   Register remainder = ToRegister(instr->temp());
   1220   DCHECK(dividend.is(rax));
   1221   DCHECK(remainder.is(rdx));
   1222   DCHECK(ToRegister(instr->result()).is(rax));
   1223   DCHECK(!divisor.is(rax));
   1224   DCHECK(!divisor.is(rdx));
   1225 
   1226   // Check for x / 0.
   1227   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1228     __ testl(divisor, divisor);
   1229     DeoptimizeIf(zero, instr, DeoptimizeReason::kDivisionByZero);
   1230   }
   1231 
   1232   // Check for (0 / -x) that will produce negative zero.
   1233   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1234     Label dividend_not_zero;
   1235     __ testl(dividend, dividend);
   1236     __ j(not_zero, &dividend_not_zero, Label::kNear);
   1237     __ testl(divisor, divisor);
   1238     DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
   1239     __ bind(&dividend_not_zero);
   1240   }
   1241 
   1242   // Check for (kMinInt / -1).
   1243   if (hdiv->CheckFlag(HValue::kCanOverflow)) {
   1244     Label dividend_not_min_int;
   1245     __ cmpl(dividend, Immediate(kMinInt));
   1246     __ j(not_zero, &dividend_not_min_int, Label::kNear);
   1247     __ cmpl(divisor, Immediate(-1));
   1248     DeoptimizeIf(zero, instr, DeoptimizeReason::kOverflow);
   1249     __ bind(&dividend_not_min_int);
   1250   }
   1251 
   1252   // Sign extend to rdx (= remainder).
   1253   __ cdq();
   1254   __ idivl(divisor);
   1255 
   1256   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
   1257     // Deoptimize if remainder is not 0.
   1258     __ testl(remainder, remainder);
   1259     DeoptimizeIf(not_zero, instr, DeoptimizeReason::kLostPrecision);
   1260   }
   1261 }
   1262 
   1263 
   1264 void LCodeGen::DoMulI(LMulI* instr) {
   1265   Register left = ToRegister(instr->left());
   1266   LOperand* right = instr->right();
   1267 
   1268   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1269     if (instr->hydrogen_value()->representation().IsSmi()) {
   1270       __ movp(kScratchRegister, left);
   1271     } else {
   1272       __ movl(kScratchRegister, left);
   1273     }
   1274   }
   1275 
   1276   bool can_overflow =
   1277       instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1278   if (right->IsConstantOperand()) {
   1279     int32_t right_value = ToInteger32(LConstantOperand::cast(right));
   1280     if (right_value == -1) {
   1281       __ negl(left);
   1282     } else if (right_value == 0) {
   1283       __ xorl(left, left);
   1284     } else if (right_value == 2) {
   1285       __ addl(left, left);
   1286     } else if (!can_overflow) {
   1287       // If the multiplication is known to not overflow, we
   1288       // can use operations that don't set the overflow flag
   1289       // correctly.
   1290       switch (right_value) {
   1291         case 1:
   1292           // Do nothing.
   1293           break;
   1294         case 3:
   1295           __ leal(left, Operand(left, left, times_2, 0));
   1296           break;
   1297         case 4:
   1298           __ shll(left, Immediate(2));
   1299           break;
   1300         case 5:
   1301           __ leal(left, Operand(left, left, times_4, 0));
   1302           break;
   1303         case 8:
   1304           __ shll(left, Immediate(3));
   1305           break;
   1306         case 9:
   1307           __ leal(left, Operand(left, left, times_8, 0));
   1308           break;
   1309         case 16:
   1310           __ shll(left, Immediate(4));
   1311           break;
   1312         default:
   1313           __ imull(left, left, Immediate(right_value));
   1314           break;
   1315       }
   1316     } else {
   1317       __ imull(left, left, Immediate(right_value));
   1318     }
   1319   } else if (right->IsStackSlot()) {
   1320     if (instr->hydrogen_value()->representation().IsSmi()) {
   1321       __ SmiToInteger64(left, left);
   1322       __ imulp(left, ToOperand(right));
   1323     } else {
   1324       __ imull(left, ToOperand(right));
   1325     }
   1326   } else {
   1327     if (instr->hydrogen_value()->representation().IsSmi()) {
   1328       __ SmiToInteger64(left, left);
   1329       __ imulp(left, ToRegister(right));
   1330     } else {
   1331       __ imull(left, ToRegister(right));
   1332     }
   1333   }
   1334 
   1335   if (can_overflow) {
   1336     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   1337   }
   1338 
   1339   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1340     // Bail out if the result is supposed to be negative zero.
   1341     Label done;
   1342     if (instr->hydrogen_value()->representation().IsSmi()) {
   1343       __ testp(left, left);
   1344     } else {
   1345       __ testl(left, left);
   1346     }
   1347     __ j(not_zero, &done, Label::kNear);
   1348     if (right->IsConstantOperand()) {
   1349       // Constant can't be represented as 32-bit Smi due to immediate size
   1350       // limit.
   1351       DCHECK(SmiValuesAre32Bits()
   1352           ? !instr->hydrogen_value()->representation().IsSmi()
   1353           : SmiValuesAre31Bits());
   1354       if (ToInteger32(LConstantOperand::cast(right)) < 0) {
   1355         DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
   1356       } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
   1357         __ cmpl(kScratchRegister, Immediate(0));
   1358         DeoptimizeIf(less, instr, DeoptimizeReason::kMinusZero);
   1359       }
   1360     } else if (right->IsStackSlot()) {
   1361       if (instr->hydrogen_value()->representation().IsSmi()) {
   1362         __ orp(kScratchRegister, ToOperand(right));
   1363       } else {
   1364         __ orl(kScratchRegister, ToOperand(right));
   1365       }
   1366       DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
   1367     } else {
   1368       // Test the non-zero operand for negative sign.
   1369       if (instr->hydrogen_value()->representation().IsSmi()) {
   1370         __ orp(kScratchRegister, ToRegister(right));
   1371       } else {
   1372         __ orl(kScratchRegister, ToRegister(right));
   1373       }
   1374       DeoptimizeIf(sign, instr, DeoptimizeReason::kMinusZero);
   1375     }
   1376     __ bind(&done);
   1377   }
   1378 }
   1379 
   1380 
   1381 void LCodeGen::DoBitI(LBitI* instr) {
   1382   LOperand* left = instr->left();
   1383   LOperand* right = instr->right();
   1384   DCHECK(left->Equals(instr->result()));
   1385   DCHECK(left->IsRegister());
   1386 
   1387   if (right->IsConstantOperand()) {
   1388     int32_t right_operand =
   1389         ToRepresentation(LConstantOperand::cast(right),
   1390                          instr->hydrogen()->right()->representation());
   1391     switch (instr->op()) {
   1392       case Token::BIT_AND:
   1393         __ andl(ToRegister(left), Immediate(right_operand));
   1394         break;
   1395       case Token::BIT_OR:
   1396         __ orl(ToRegister(left), Immediate(right_operand));
   1397         break;
   1398       case Token::BIT_XOR:
   1399         if (right_operand == int32_t(~0)) {
   1400           __ notl(ToRegister(left));
   1401         } else {
   1402           __ xorl(ToRegister(left), Immediate(right_operand));
   1403         }
   1404         break;
   1405       default:
   1406         UNREACHABLE();
   1407         break;
   1408     }
   1409   } else if (right->IsStackSlot()) {
   1410     switch (instr->op()) {
   1411       case Token::BIT_AND:
   1412         if (instr->IsInteger32()) {
   1413           __ andl(ToRegister(left), ToOperand(right));
   1414         } else {
   1415           __ andp(ToRegister(left), ToOperand(right));
   1416         }
   1417         break;
   1418       case Token::BIT_OR:
   1419         if (instr->IsInteger32()) {
   1420           __ orl(ToRegister(left), ToOperand(right));
   1421         } else {
   1422           __ orp(ToRegister(left), ToOperand(right));
   1423         }
   1424         break;
   1425       case Token::BIT_XOR:
   1426         if (instr->IsInteger32()) {
   1427           __ xorl(ToRegister(left), ToOperand(right));
   1428         } else {
   1429           __ xorp(ToRegister(left), ToOperand(right));
   1430         }
   1431         break;
   1432       default:
   1433         UNREACHABLE();
   1434         break;
   1435     }
   1436   } else {
   1437     DCHECK(right->IsRegister());
   1438     switch (instr->op()) {
   1439       case Token::BIT_AND:
   1440         if (instr->IsInteger32()) {
   1441           __ andl(ToRegister(left), ToRegister(right));
   1442         } else {
   1443           __ andp(ToRegister(left), ToRegister(right));
   1444         }
   1445         break;
   1446       case Token::BIT_OR:
   1447         if (instr->IsInteger32()) {
   1448           __ orl(ToRegister(left), ToRegister(right));
   1449         } else {
   1450           __ orp(ToRegister(left), ToRegister(right));
   1451         }
   1452         break;
   1453       case Token::BIT_XOR:
   1454         if (instr->IsInteger32()) {
   1455           __ xorl(ToRegister(left), ToRegister(right));
   1456         } else {
   1457           __ xorp(ToRegister(left), ToRegister(right));
   1458         }
   1459         break;
   1460       default:
   1461         UNREACHABLE();
   1462         break;
   1463     }
   1464   }
   1465 }
   1466 
   1467 
   1468 void LCodeGen::DoShiftI(LShiftI* instr) {
   1469   LOperand* left = instr->left();
   1470   LOperand* right = instr->right();
   1471   DCHECK(left->Equals(instr->result()));
   1472   DCHECK(left->IsRegister());
   1473   if (right->IsRegister()) {
   1474     DCHECK(ToRegister(right).is(rcx));
   1475 
   1476     switch (instr->op()) {
   1477       case Token::ROR:
   1478         __ rorl_cl(ToRegister(left));
   1479         break;
   1480       case Token::SAR:
   1481         __ sarl_cl(ToRegister(left));
   1482         break;
   1483       case Token::SHR:
   1484         __ shrl_cl(ToRegister(left));
   1485         if (instr->can_deopt()) {
   1486           __ testl(ToRegister(left), ToRegister(left));
   1487           DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
   1488         }
   1489         break;
   1490       case Token::SHL:
   1491         __ shll_cl(ToRegister(left));
   1492         break;
   1493       default:
   1494         UNREACHABLE();
   1495         break;
   1496     }
   1497   } else {
   1498     int32_t value = ToInteger32(LConstantOperand::cast(right));
   1499     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
   1500     switch (instr->op()) {
   1501       case Token::ROR:
   1502         if (shift_count != 0) {
   1503           __ rorl(ToRegister(left), Immediate(shift_count));
   1504         }
   1505         break;
   1506       case Token::SAR:
   1507         if (shift_count != 0) {
   1508           __ sarl(ToRegister(left), Immediate(shift_count));
   1509         }
   1510         break;
   1511       case Token::SHR:
   1512         if (shift_count != 0) {
   1513           __ shrl(ToRegister(left), Immediate(shift_count));
   1514         } else if (instr->can_deopt()) {
   1515           __ testl(ToRegister(left), ToRegister(left));
   1516           DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
   1517         }
   1518         break;
   1519       case Token::SHL:
   1520         if (shift_count != 0) {
   1521           if (instr->hydrogen_value()->representation().IsSmi()) {
   1522             if (SmiValuesAre32Bits()) {
   1523               __ shlp(ToRegister(left), Immediate(shift_count));
   1524             } else {
   1525               DCHECK(SmiValuesAre31Bits());
   1526               if (instr->can_deopt()) {
   1527                 if (shift_count != 1) {
   1528                   __ shll(ToRegister(left), Immediate(shift_count - 1));
   1529                 }
   1530                 __ Integer32ToSmi(ToRegister(left), ToRegister(left));
   1531                 DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   1532               } else {
   1533                 __ shll(ToRegister(left), Immediate(shift_count));
   1534               }
   1535             }
   1536           } else {
   1537             __ shll(ToRegister(left), Immediate(shift_count));
   1538           }
   1539         }
   1540         break;
   1541       default:
   1542         UNREACHABLE();
   1543         break;
   1544     }
   1545   }
   1546 }
   1547 
   1548 
   1549 void LCodeGen::DoSubI(LSubI* instr) {
   1550   LOperand* left = instr->left();
   1551   LOperand* right = instr->right();
   1552   DCHECK(left->Equals(instr->result()));
   1553 
   1554   if (right->IsConstantOperand()) {
   1555     int32_t right_operand =
   1556         ToRepresentation(LConstantOperand::cast(right),
   1557                          instr->hydrogen()->right()->representation());
   1558     __ subl(ToRegister(left), Immediate(right_operand));
   1559   } else if (right->IsRegister()) {
   1560     if (instr->hydrogen_value()->representation().IsSmi()) {
   1561       __ subp(ToRegister(left), ToRegister(right));
   1562     } else {
   1563       __ subl(ToRegister(left), ToRegister(right));
   1564     }
   1565   } else {
   1566     if (instr->hydrogen_value()->representation().IsSmi()) {
   1567       __ subp(ToRegister(left), ToOperand(right));
   1568     } else {
   1569       __ subl(ToRegister(left), ToOperand(right));
   1570     }
   1571   }
   1572 
   1573   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1574     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   1575   }
   1576 }
   1577 
   1578 
   1579 void LCodeGen::DoConstantI(LConstantI* instr) {
   1580   Register dst = ToRegister(instr->result());
   1581   if (instr->value() == 0) {
   1582     __ xorl(dst, dst);
   1583   } else {
   1584     __ movl(dst, Immediate(instr->value()));
   1585   }
   1586 }
   1587 
   1588 
   1589 void LCodeGen::DoConstantS(LConstantS* instr) {
   1590   __ Move(ToRegister(instr->result()), instr->value());
   1591 }
   1592 
   1593 
   1594 void LCodeGen::DoConstantD(LConstantD* instr) {
   1595   __ Move(ToDoubleRegister(instr->result()), instr->bits());
   1596 }
   1597 
   1598 
   1599 void LCodeGen::DoConstantE(LConstantE* instr) {
   1600   __ LoadAddress(ToRegister(instr->result()), instr->value());
   1601 }
   1602 
   1603 
   1604 void LCodeGen::DoConstantT(LConstantT* instr) {
   1605   Handle<Object> object = instr->value(isolate());
   1606   AllowDeferredHandleDereference smi_check;
   1607   __ Move(ToRegister(instr->result()), object);
   1608 }
   1609 
   1610 
   1611 Operand LCodeGen::BuildSeqStringOperand(Register string,
   1612                                         LOperand* index,
   1613                                         String::Encoding encoding) {
   1614   if (index->IsConstantOperand()) {
   1615     int offset = ToInteger32(LConstantOperand::cast(index));
   1616     if (encoding == String::TWO_BYTE_ENCODING) {
   1617       offset *= kUC16Size;
   1618     }
   1619     STATIC_ASSERT(kCharSize == 1);
   1620     return FieldOperand(string, SeqString::kHeaderSize + offset);
   1621   }
   1622   return FieldOperand(
   1623       string, ToRegister(index),
   1624       encoding == String::ONE_BYTE_ENCODING ? times_1 : times_2,
   1625       SeqString::kHeaderSize);
   1626 }
   1627 
   1628 
   1629 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
   1630   String::Encoding encoding = instr->hydrogen()->encoding();
   1631   Register result = ToRegister(instr->result());
   1632   Register string = ToRegister(instr->string());
   1633 
   1634   if (FLAG_debug_code) {
   1635     __ Push(string);
   1636     __ movp(string, FieldOperand(string, HeapObject::kMapOffset));
   1637     __ movzxbp(string, FieldOperand(string, Map::kInstanceTypeOffset));
   1638 
   1639     __ andb(string, Immediate(kStringRepresentationMask | kStringEncodingMask));
   1640     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1641     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1642     __ cmpp(string, Immediate(encoding == String::ONE_BYTE_ENCODING
   1643                               ? one_byte_seq_type : two_byte_seq_type));
   1644     __ Check(equal, kUnexpectedStringType);
   1645     __ Pop(string);
   1646   }
   1647 
   1648   Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   1649   if (encoding == String::ONE_BYTE_ENCODING) {
   1650     __ movzxbl(result, operand);
   1651   } else {
   1652     __ movzxwl(result, operand);
   1653   }
   1654 }
   1655 
   1656 
   1657 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
   1658   String::Encoding encoding = instr->hydrogen()->encoding();
   1659   Register string = ToRegister(instr->string());
   1660 
   1661   if (FLAG_debug_code) {
   1662     Register value = ToRegister(instr->value());
   1663     Register index = ToRegister(instr->index());
   1664     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1665     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1666     int encoding_mask =
   1667         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
   1668         ? one_byte_seq_type : two_byte_seq_type;
   1669     __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
   1670   }
   1671 
   1672   Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   1673   if (instr->value()->IsConstantOperand()) {
   1674     int value = ToInteger32(LConstantOperand::cast(instr->value()));
   1675     DCHECK_LE(0, value);
   1676     if (encoding == String::ONE_BYTE_ENCODING) {
   1677       DCHECK_LE(value, String::kMaxOneByteCharCode);
   1678       __ movb(operand, Immediate(value));
   1679     } else {
   1680       DCHECK_LE(value, String::kMaxUtf16CodeUnit);
   1681       __ movw(operand, Immediate(value));
   1682     }
   1683   } else {
   1684     Register value = ToRegister(instr->value());
   1685     if (encoding == String::ONE_BYTE_ENCODING) {
   1686       __ movb(operand, value);
   1687     } else {
   1688       __ movw(operand, value);
   1689     }
   1690   }
   1691 }
   1692 
   1693 
   1694 void LCodeGen::DoAddI(LAddI* instr) {
   1695   LOperand* left = instr->left();
   1696   LOperand* right = instr->right();
   1697 
   1698   Representation target_rep = instr->hydrogen()->representation();
   1699   bool is_p = target_rep.IsSmi() || target_rep.IsExternal();
   1700 
   1701   if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
   1702     if (right->IsConstantOperand()) {
   1703       // No support for smi-immediates for 32-bit SMI.
   1704       DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
   1705       int32_t offset =
   1706           ToRepresentation(LConstantOperand::cast(right),
   1707                            instr->hydrogen()->right()->representation());
   1708       if (is_p) {
   1709         __ leap(ToRegister(instr->result()),
   1710                 MemOperand(ToRegister(left), offset));
   1711       } else {
   1712         __ leal(ToRegister(instr->result()),
   1713                 MemOperand(ToRegister(left), offset));
   1714       }
   1715     } else {
   1716       Operand address(ToRegister(left), ToRegister(right), times_1, 0);
   1717       if (is_p) {
   1718         __ leap(ToRegister(instr->result()), address);
   1719       } else {
   1720         __ leal(ToRegister(instr->result()), address);
   1721       }
   1722     }
   1723   } else {
   1724     if (right->IsConstantOperand()) {
   1725       // No support for smi-immediates for 32-bit SMI.
   1726       DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
   1727       int32_t right_operand =
   1728           ToRepresentation(LConstantOperand::cast(right),
   1729                            instr->hydrogen()->right()->representation());
   1730       if (is_p) {
   1731         __ addp(ToRegister(left), Immediate(right_operand));
   1732       } else {
   1733         __ addl(ToRegister(left), Immediate(right_operand));
   1734       }
   1735     } else if (right->IsRegister()) {
   1736       if (is_p) {
   1737         __ addp(ToRegister(left), ToRegister(right));
   1738       } else {
   1739         __ addl(ToRegister(left), ToRegister(right));
   1740       }
   1741     } else {
   1742       if (is_p) {
   1743         __ addp(ToRegister(left), ToOperand(right));
   1744       } else {
   1745         __ addl(ToRegister(left), ToOperand(right));
   1746       }
   1747     }
   1748     if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1749       DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   1750     }
   1751   }
   1752 }
   1753 
   1754 
   1755 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
   1756   LOperand* left = instr->left();
   1757   LOperand* right = instr->right();
   1758   DCHECK(left->Equals(instr->result()));
   1759   HMathMinMax::Operation operation = instr->hydrogen()->operation();
   1760   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
   1761     Label return_left;
   1762     Condition condition = (operation == HMathMinMax::kMathMin)
   1763         ? less_equal
   1764         : greater_equal;
   1765     Register left_reg = ToRegister(left);
   1766     if (right->IsConstantOperand()) {
   1767       Immediate right_imm = Immediate(
   1768           ToRepresentation(LConstantOperand::cast(right),
   1769                            instr->hydrogen()->right()->representation()));
   1770       DCHECK(SmiValuesAre32Bits()
   1771           ? !instr->hydrogen()->representation().IsSmi()
   1772           : SmiValuesAre31Bits());
   1773       __ cmpl(left_reg, right_imm);
   1774       __ j(condition, &return_left, Label::kNear);
   1775       __ movl(left_reg, right_imm);
   1776     } else if (right->IsRegister()) {
   1777       Register right_reg = ToRegister(right);
   1778       if (instr->hydrogen_value()->representation().IsSmi()) {
   1779         __ cmpp(left_reg, right_reg);
   1780       } else {
   1781         __ cmpl(left_reg, right_reg);
   1782       }
   1783       __ j(condition, &return_left, Label::kNear);
   1784       __ movp(left_reg, right_reg);
   1785     } else {
   1786       Operand right_op = ToOperand(right);
   1787       if (instr->hydrogen_value()->representation().IsSmi()) {
   1788         __ cmpp(left_reg, right_op);
   1789       } else {
   1790         __ cmpl(left_reg, right_op);
   1791       }
   1792       __ j(condition, &return_left, Label::kNear);
   1793       __ movp(left_reg, right_op);
   1794     }
   1795     __ bind(&return_left);
   1796   } else {
   1797     DCHECK(instr->hydrogen()->representation().IsDouble());
   1798     Label not_nan, distinct, return_left, return_right;
   1799     Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
   1800     XMMRegister left_reg = ToDoubleRegister(left);
   1801     XMMRegister right_reg = ToDoubleRegister(right);
   1802     __ Ucomisd(left_reg, right_reg);
   1803     __ j(parity_odd, &not_nan, Label::kNear);  // Both are not NaN.
   1804 
   1805     // One of the numbers is NaN. Find which one and return it.
   1806     __ Ucomisd(left_reg, left_reg);
   1807     __ j(parity_even, &return_left, Label::kNear);  // left is NaN.
   1808     __ jmp(&return_right, Label::kNear);            // right is NaN.
   1809 
   1810     __ bind(&not_nan);
   1811     __ j(not_equal, &distinct, Label::kNear);  // left != right.
   1812 
   1813     // left == right
   1814     XMMRegister xmm_scratch = double_scratch0();
   1815     __ Xorpd(xmm_scratch, xmm_scratch);
   1816     __ Ucomisd(left_reg, xmm_scratch);
   1817     __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
   1818 
   1819     // At this point, both left and right are either +0 or -0.
   1820     if (operation == HMathMinMax::kMathMin) {
   1821       __ Orpd(left_reg, right_reg);
   1822     } else {
   1823       __ Andpd(left_reg, right_reg);
   1824     }
   1825     __ jmp(&return_left, Label::kNear);
   1826 
   1827     __ bind(&distinct);
   1828     __ j(condition, &return_left, Label::kNear);
   1829 
   1830     __ bind(&return_right);
   1831     __ Movapd(left_reg, right_reg);
   1832 
   1833     __ bind(&return_left);
   1834   }
   1835 }
   1836 
   1837 
   1838 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
   1839   XMMRegister left = ToDoubleRegister(instr->left());
   1840   XMMRegister right = ToDoubleRegister(instr->right());
   1841   XMMRegister result = ToDoubleRegister(instr->result());
   1842   switch (instr->op()) {
   1843     case Token::ADD:
   1844       if (CpuFeatures::IsSupported(AVX)) {
   1845         CpuFeatureScope scope(masm(), AVX);
   1846         __ vaddsd(result, left, right);
   1847       } else {
   1848         DCHECK(result.is(left));
   1849         __ addsd(left, right);
   1850       }
   1851       break;
   1852     case Token::SUB:
   1853       if (CpuFeatures::IsSupported(AVX)) {
   1854         CpuFeatureScope scope(masm(), AVX);
   1855         __ vsubsd(result, left, right);
   1856       } else {
   1857         DCHECK(result.is(left));
   1858         __ subsd(left, right);
   1859       }
   1860        break;
   1861     case Token::MUL:
   1862       if (CpuFeatures::IsSupported(AVX)) {
   1863         CpuFeatureScope scope(masm(), AVX);
   1864         __ vmulsd(result, left, right);
   1865       } else {
   1866         DCHECK(result.is(left));
   1867         __ mulsd(left, right);
   1868       }
   1869       break;
   1870     case Token::DIV:
   1871       if (CpuFeatures::IsSupported(AVX)) {
   1872         CpuFeatureScope scope(masm(), AVX);
   1873         __ vdivsd(result, left, right);
   1874       } else {
   1875         DCHECK(result.is(left));
   1876         __ divsd(left, right);
   1877       }
   1878       // Don't delete this mov. It may improve performance on some CPUs,
   1879       // when there is a (v)mulsd depending on the result
   1880       __ Movapd(result, result);
   1881       break;
   1882     case Token::MOD: {
   1883       DCHECK(left.is(xmm0));
   1884       DCHECK(right.is(xmm1));
   1885       DCHECK(result.is(xmm0));
   1886       __ PrepareCallCFunction(2);
   1887       __ CallCFunction(
   1888           ExternalReference::mod_two_doubles_operation(isolate()), 2);
   1889       break;
   1890     }
   1891     default:
   1892       UNREACHABLE();
   1893       break;
   1894   }
   1895 }
   1896 
   1897 
   1898 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
   1899   DCHECK(ToRegister(instr->context()).is(rsi));
   1900   DCHECK(ToRegister(instr->left()).is(rdx));
   1901   DCHECK(ToRegister(instr->right()).is(rax));
   1902   DCHECK(ToRegister(instr->result()).is(rax));
   1903 
   1904   Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
   1905   CallCode(code, RelocInfo::CODE_TARGET, instr);
   1906 }
   1907 
   1908 
   1909 template<class InstrType>
   1910 void LCodeGen::EmitBranch(InstrType instr, Condition cc) {
   1911   int left_block = instr->TrueDestination(chunk_);
   1912   int right_block = instr->FalseDestination(chunk_);
   1913 
   1914   int next_block = GetNextEmittedBlock();
   1915 
   1916   if (right_block == left_block || cc == no_condition) {
   1917     EmitGoto(left_block);
   1918   } else if (left_block == next_block) {
   1919     __ j(NegateCondition(cc), chunk_->GetAssemblyLabel(right_block));
   1920   } else if (right_block == next_block) {
   1921     __ j(cc, chunk_->GetAssemblyLabel(left_block));
   1922   } else {
   1923     __ j(cc, chunk_->GetAssemblyLabel(left_block));
   1924     if (cc != always) {
   1925       __ jmp(chunk_->GetAssemblyLabel(right_block));
   1926     }
   1927   }
   1928 }
   1929 
   1930 
   1931 template <class InstrType>
   1932 void LCodeGen::EmitTrueBranch(InstrType instr, Condition cc) {
   1933   int true_block = instr->TrueDestination(chunk_);
   1934   __ j(cc, chunk_->GetAssemblyLabel(true_block));
   1935 }
   1936 
   1937 
   1938 template <class InstrType>
   1939 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cc) {
   1940   int false_block = instr->FalseDestination(chunk_);
   1941   __ j(cc, chunk_->GetAssemblyLabel(false_block));
   1942 }
   1943 
   1944 
   1945 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
   1946   __ int3();
   1947 }
   1948 
   1949 
   1950 void LCodeGen::DoBranch(LBranch* instr) {
   1951   Representation r = instr->hydrogen()->value()->representation();
   1952   if (r.IsInteger32()) {
   1953     DCHECK(!info()->IsStub());
   1954     Register reg = ToRegister(instr->value());
   1955     __ testl(reg, reg);
   1956     EmitBranch(instr, not_zero);
   1957   } else if (r.IsSmi()) {
   1958     DCHECK(!info()->IsStub());
   1959     Register reg = ToRegister(instr->value());
   1960     __ testp(reg, reg);
   1961     EmitBranch(instr, not_zero);
   1962   } else if (r.IsDouble()) {
   1963     DCHECK(!info()->IsStub());
   1964     XMMRegister reg = ToDoubleRegister(instr->value());
   1965     XMMRegister xmm_scratch = double_scratch0();
   1966     __ Xorpd(xmm_scratch, xmm_scratch);
   1967     __ Ucomisd(reg, xmm_scratch);
   1968     EmitBranch(instr, not_equal);
   1969   } else {
   1970     DCHECK(r.IsTagged());
   1971     Register reg = ToRegister(instr->value());
   1972     HType type = instr->hydrogen()->value()->type();
   1973     if (type.IsBoolean()) {
   1974       DCHECK(!info()->IsStub());
   1975       __ CompareRoot(reg, Heap::kTrueValueRootIndex);
   1976       EmitBranch(instr, equal);
   1977     } else if (type.IsSmi()) {
   1978       DCHECK(!info()->IsStub());
   1979       __ SmiCompare(reg, Smi::kZero);
   1980       EmitBranch(instr, not_equal);
   1981     } else if (type.IsJSArray()) {
   1982       DCHECK(!info()->IsStub());
   1983       EmitBranch(instr, no_condition);
   1984     } else if (type.IsHeapNumber()) {
   1985       DCHECK(!info()->IsStub());
   1986       XMMRegister xmm_scratch = double_scratch0();
   1987       __ Xorpd(xmm_scratch, xmm_scratch);
   1988       __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
   1989       EmitBranch(instr, not_equal);
   1990     } else if (type.IsString()) {
   1991       DCHECK(!info()->IsStub());
   1992       __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
   1993       EmitBranch(instr, not_equal);
   1994     } else {
   1995       ToBooleanHints expected = instr->hydrogen()->expected_input_types();
   1996       // Avoid deopts in the case where we've never executed this path before.
   1997       if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
   1998 
   1999       if (expected & ToBooleanHint::kUndefined) {
   2000         // undefined -> false.
   2001         __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
   2002         __ j(equal, instr->FalseLabel(chunk_));
   2003       }
   2004       if (expected & ToBooleanHint::kBoolean) {
   2005         // true -> true.
   2006         __ CompareRoot(reg, Heap::kTrueValueRootIndex);
   2007         __ j(equal, instr->TrueLabel(chunk_));
   2008         // false -> false.
   2009         __ CompareRoot(reg, Heap::kFalseValueRootIndex);
   2010         __ j(equal, instr->FalseLabel(chunk_));
   2011       }
   2012       if (expected & ToBooleanHint::kNull) {
   2013         // 'null' -> false.
   2014         __ CompareRoot(reg, Heap::kNullValueRootIndex);
   2015         __ j(equal, instr->FalseLabel(chunk_));
   2016       }
   2017 
   2018       if (expected & ToBooleanHint::kSmallInteger) {
   2019         // Smis: 0 -> false, all other -> true.
   2020         __ Cmp(reg, Smi::kZero);
   2021         __ j(equal, instr->FalseLabel(chunk_));
   2022         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
   2023       } else if (expected & ToBooleanHint::kNeedsMap) {
   2024         // If we need a map later and have a Smi -> deopt.
   2025         __ testb(reg, Immediate(kSmiTagMask));
   2026         DeoptimizeIf(zero, instr, DeoptimizeReason::kSmi);
   2027       }
   2028 
   2029       const Register map = kScratchRegister;
   2030       if (expected & ToBooleanHint::kNeedsMap) {
   2031         __ movp(map, FieldOperand(reg, HeapObject::kMapOffset));
   2032 
   2033         if (expected & ToBooleanHint::kCanBeUndetectable) {
   2034           // Undetectable -> false.
   2035           __ testb(FieldOperand(map, Map::kBitFieldOffset),
   2036                    Immediate(1 << Map::kIsUndetectable));
   2037           __ j(not_zero, instr->FalseLabel(chunk_));
   2038         }
   2039       }
   2040 
   2041       if (expected & ToBooleanHint::kReceiver) {
   2042         // spec object -> true.
   2043         __ CmpInstanceType(map, FIRST_JS_RECEIVER_TYPE);
   2044         __ j(above_equal, instr->TrueLabel(chunk_));
   2045       }
   2046 
   2047       if (expected & ToBooleanHint::kString) {
   2048         // String value -> false iff empty.
   2049         Label not_string;
   2050         __ CmpInstanceType(map, FIRST_NONSTRING_TYPE);
   2051         __ j(above_equal, &not_string, Label::kNear);
   2052         __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
   2053         __ j(not_zero, instr->TrueLabel(chunk_));
   2054         __ jmp(instr->FalseLabel(chunk_));
   2055         __ bind(&not_string);
   2056       }
   2057 
   2058       if (expected & ToBooleanHint::kSymbol) {
   2059         // Symbol value -> true.
   2060         __ CmpInstanceType(map, SYMBOL_TYPE);
   2061         __ j(equal, instr->TrueLabel(chunk_));
   2062       }
   2063 
   2064       if (expected & ToBooleanHint::kSimdValue) {
   2065         // SIMD value -> true.
   2066         __ CmpInstanceType(map, SIMD128_VALUE_TYPE);
   2067         __ j(equal, instr->TrueLabel(chunk_));
   2068       }
   2069 
   2070       if (expected & ToBooleanHint::kHeapNumber) {
   2071         // heap number -> false iff +0, -0, or NaN.
   2072         Label not_heap_number;
   2073         __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
   2074         __ j(not_equal, &not_heap_number, Label::kNear);
   2075         XMMRegister xmm_scratch = double_scratch0();
   2076         __ Xorpd(xmm_scratch, xmm_scratch);
   2077         __ Ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
   2078         __ j(zero, instr->FalseLabel(chunk_));
   2079         __ jmp(instr->TrueLabel(chunk_));
   2080         __ bind(&not_heap_number);
   2081       }
   2082 
   2083       if (expected != ToBooleanHint::kAny) {
   2084         // We've seen something for the first time -> deopt.
   2085         // This can only happen if we are not generic already.
   2086         DeoptimizeIf(no_condition, instr, DeoptimizeReason::kUnexpectedObject);
   2087       }
   2088     }
   2089   }
   2090 }
   2091 
   2092 
   2093 void LCodeGen::EmitGoto(int block) {
   2094   if (!IsNextEmittedBlock(block)) {
   2095     __ jmp(chunk_->GetAssemblyLabel(chunk_->LookupDestination(block)));
   2096   }
   2097 }
   2098 
   2099 
   2100 void LCodeGen::DoGoto(LGoto* instr) {
   2101   EmitGoto(instr->block_id());
   2102 }
   2103 
   2104 
   2105 inline Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
   2106   Condition cond = no_condition;
   2107   switch (op) {
   2108     case Token::EQ:
   2109     case Token::EQ_STRICT:
   2110       cond = equal;
   2111       break;
   2112     case Token::NE:
   2113     case Token::NE_STRICT:
   2114       cond = not_equal;
   2115       break;
   2116     case Token::LT:
   2117       cond = is_unsigned ? below : less;
   2118       break;
   2119     case Token::GT:
   2120       cond = is_unsigned ? above : greater;
   2121       break;
   2122     case Token::LTE:
   2123       cond = is_unsigned ? below_equal : less_equal;
   2124       break;
   2125     case Token::GTE:
   2126       cond = is_unsigned ? above_equal : greater_equal;
   2127       break;
   2128     case Token::IN:
   2129     case Token::INSTANCEOF:
   2130     default:
   2131       UNREACHABLE();
   2132   }
   2133   return cond;
   2134 }
   2135 
   2136 
   2137 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
   2138   LOperand* left = instr->left();
   2139   LOperand* right = instr->right();
   2140   bool is_unsigned =
   2141       instr->is_double() ||
   2142       instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
   2143       instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
   2144   Condition cc = TokenToCondition(instr->op(), is_unsigned);
   2145 
   2146   if (left->IsConstantOperand() && right->IsConstantOperand()) {
   2147     // We can statically evaluate the comparison.
   2148     double left_val = ToDouble(LConstantOperand::cast(left));
   2149     double right_val = ToDouble(LConstantOperand::cast(right));
   2150     int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
   2151                          ? instr->TrueDestination(chunk_)
   2152                          : instr->FalseDestination(chunk_);
   2153     EmitGoto(next_block);
   2154   } else {
   2155     if (instr->is_double()) {
   2156       // Don't base result on EFLAGS when a NaN is involved. Instead
   2157       // jump to the false block.
   2158       __ Ucomisd(ToDoubleRegister(left), ToDoubleRegister(right));
   2159       __ j(parity_even, instr->FalseLabel(chunk_));
   2160     } else {
   2161       int32_t value;
   2162       if (right->IsConstantOperand()) {
   2163         value = ToInteger32(LConstantOperand::cast(right));
   2164         if (instr->hydrogen_value()->representation().IsSmi()) {
   2165           __ Cmp(ToRegister(left), Smi::FromInt(value));
   2166         } else {
   2167           __ cmpl(ToRegister(left), Immediate(value));
   2168         }
   2169       } else if (left->IsConstantOperand()) {
   2170         value = ToInteger32(LConstantOperand::cast(left));
   2171         if (instr->hydrogen_value()->representation().IsSmi()) {
   2172           if (right->IsRegister()) {
   2173             __ Cmp(ToRegister(right), Smi::FromInt(value));
   2174           } else {
   2175             __ Cmp(ToOperand(right), Smi::FromInt(value));
   2176           }
   2177         } else if (right->IsRegister()) {
   2178           __ cmpl(ToRegister(right), Immediate(value));
   2179         } else {
   2180           __ cmpl(ToOperand(right), Immediate(value));
   2181         }
   2182         // We commuted the operands, so commute the condition.
   2183         cc = CommuteCondition(cc);
   2184       } else if (instr->hydrogen_value()->representation().IsSmi()) {
   2185         if (right->IsRegister()) {
   2186           __ cmpp(ToRegister(left), ToRegister(right));
   2187         } else {
   2188           __ cmpp(ToRegister(left), ToOperand(right));
   2189         }
   2190       } else {
   2191         if (right->IsRegister()) {
   2192           __ cmpl(ToRegister(left), ToRegister(right));
   2193         } else {
   2194           __ cmpl(ToRegister(left), ToOperand(right));
   2195         }
   2196       }
   2197     }
   2198     EmitBranch(instr, cc);
   2199   }
   2200 }
   2201 
   2202 
   2203 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   2204   Register left = ToRegister(instr->left());
   2205 
   2206   if (instr->right()->IsConstantOperand()) {
   2207     Handle<Object> right = ToHandle(LConstantOperand::cast(instr->right()));
   2208     __ Cmp(left, right);
   2209   } else {
   2210     Register right = ToRegister(instr->right());
   2211     __ cmpp(left, right);
   2212   }
   2213   EmitBranch(instr, equal);
   2214 }
   2215 
   2216 
   2217 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
   2218   if (instr->hydrogen()->representation().IsTagged()) {
   2219     Register input_reg = ToRegister(instr->object());
   2220     __ Cmp(input_reg, factory()->the_hole_value());
   2221     EmitBranch(instr, equal);
   2222     return;
   2223   }
   2224 
   2225   XMMRegister input_reg = ToDoubleRegister(instr->object());
   2226   __ Ucomisd(input_reg, input_reg);
   2227   EmitFalseBranch(instr, parity_odd);
   2228 
   2229   __ subp(rsp, Immediate(kDoubleSize));
   2230   __ Movsd(MemOperand(rsp, 0), input_reg);
   2231   __ addp(rsp, Immediate(kDoubleSize));
   2232 
   2233   int offset = sizeof(kHoleNanUpper32);
   2234   __ cmpl(MemOperand(rsp, -offset), Immediate(kHoleNanUpper32));
   2235   EmitBranch(instr, equal);
   2236 }
   2237 
   2238 
   2239 Condition LCodeGen::EmitIsString(Register input,
   2240                                  Register temp1,
   2241                                  Label* is_not_string,
   2242                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
   2243   if (check_needed == INLINE_SMI_CHECK) {
   2244     __ JumpIfSmi(input, is_not_string);
   2245   }
   2246 
   2247   Condition cond =  masm_->IsObjectStringType(input, temp1, temp1);
   2248 
   2249   return cond;
   2250 }
   2251 
   2252 
   2253 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
   2254   Register reg = ToRegister(instr->value());
   2255   Register temp = ToRegister(instr->temp());
   2256 
   2257   SmiCheck check_needed =
   2258       instr->hydrogen()->value()->type().IsHeapObject()
   2259           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2260 
   2261   Condition true_cond = EmitIsString(
   2262       reg, temp, instr->FalseLabel(chunk_), check_needed);
   2263 
   2264   EmitBranch(instr, true_cond);
   2265 }
   2266 
   2267 
   2268 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   2269   Condition is_smi;
   2270   if (instr->value()->IsRegister()) {
   2271     Register input = ToRegister(instr->value());
   2272     is_smi = masm()->CheckSmi(input);
   2273   } else {
   2274     Operand input = ToOperand(instr->value());
   2275     is_smi = masm()->CheckSmi(input);
   2276   }
   2277   EmitBranch(instr, is_smi);
   2278 }
   2279 
   2280 
   2281 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
   2282   Register input = ToRegister(instr->value());
   2283   Register temp = ToRegister(instr->temp());
   2284 
   2285   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2286     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2287   }
   2288   __ movp(temp, FieldOperand(input, HeapObject::kMapOffset));
   2289   __ testb(FieldOperand(temp, Map::kBitFieldOffset),
   2290            Immediate(1 << Map::kIsUndetectable));
   2291   EmitBranch(instr, not_zero);
   2292 }
   2293 
   2294 
   2295 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   2296   DCHECK(ToRegister(instr->context()).is(rsi));
   2297   DCHECK(ToRegister(instr->left()).is(rdx));
   2298   DCHECK(ToRegister(instr->right()).is(rax));
   2299 
   2300   Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
   2301   CallCode(code, RelocInfo::CODE_TARGET, instr);
   2302   __ CompareRoot(rax, Heap::kTrueValueRootIndex);
   2303   EmitBranch(instr, equal);
   2304 }
   2305 
   2306 
   2307 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   2308   InstanceType from = instr->from();
   2309   InstanceType to = instr->to();
   2310   if (from == FIRST_TYPE) return to;
   2311   DCHECK(from == to || to == LAST_TYPE);
   2312   return from;
   2313 }
   2314 
   2315 
   2316 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
   2317   InstanceType from = instr->from();
   2318   InstanceType to = instr->to();
   2319   if (from == to) return equal;
   2320   if (to == LAST_TYPE) return above_equal;
   2321   if (from == FIRST_TYPE) return below_equal;
   2322   UNREACHABLE();
   2323   return equal;
   2324 }
   2325 
   2326 
   2327 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   2328   Register input = ToRegister(instr->value());
   2329 
   2330   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2331     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2332   }
   2333 
   2334   __ CmpObjectType(input, TestType(instr->hydrogen()), kScratchRegister);
   2335   EmitBranch(instr, BranchCondition(instr->hydrogen()));
   2336 }
   2337 
   2338 // Branches to a label or falls through with the answer in the z flag.
   2339 // Trashes the temp register.
   2340 void LCodeGen::EmitClassOfTest(Label* is_true,
   2341                                Label* is_false,
   2342                                Handle<String> class_name,
   2343                                Register input,
   2344                                Register temp,
   2345                                Register temp2) {
   2346   DCHECK(!input.is(temp));
   2347   DCHECK(!input.is(temp2));
   2348   DCHECK(!temp.is(temp2));
   2349 
   2350   __ JumpIfSmi(input, is_false);
   2351 
   2352   __ CmpObjectType(input, FIRST_FUNCTION_TYPE, temp);
   2353   STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
   2354   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
   2355     __ j(above_equal, is_true);
   2356   } else {
   2357     __ j(above_equal, is_false);
   2358   }
   2359 
   2360   // Check if the constructor in the map is a function.
   2361   __ GetMapConstructor(temp, temp, kScratchRegister);
   2362 
   2363   // Objects with a non-function constructor have class 'Object'.
   2364   __ CmpInstanceType(kScratchRegister, JS_FUNCTION_TYPE);
   2365   if (String::Equals(class_name, isolate()->factory()->Object_string())) {
   2366     __ j(not_equal, is_true);
   2367   } else {
   2368     __ j(not_equal, is_false);
   2369   }
   2370 
   2371   // temp now contains the constructor function. Grab the
   2372   // instance class name from there.
   2373   __ movp(temp, FieldOperand(temp, JSFunction::kSharedFunctionInfoOffset));
   2374   __ movp(temp, FieldOperand(temp,
   2375                              SharedFunctionInfo::kInstanceClassNameOffset));
   2376   // The class name we are testing against is internalized since it's a literal.
   2377   // The name in the constructor is internalized because of the way the context
   2378   // is booted.  This routine isn't expected to work for random API-created
   2379   // classes and it doesn't have to because you can't access it with natives
   2380   // syntax.  Since both sides are internalized it is sufficient to use an
   2381   // identity comparison.
   2382   DCHECK(class_name->IsInternalizedString());
   2383   __ Cmp(temp, class_name);
   2384   // End with the answer in the z flag.
   2385 }
   2386 
   2387 
   2388 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   2389   Register input = ToRegister(instr->value());
   2390   Register temp = ToRegister(instr->temp());
   2391   Register temp2 = ToRegister(instr->temp2());
   2392   Handle<String> class_name = instr->hydrogen()->class_name();
   2393 
   2394   EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
   2395       class_name, input, temp, temp2);
   2396 
   2397   EmitBranch(instr, equal);
   2398 }
   2399 
   2400 
   2401 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   2402   Register reg = ToRegister(instr->value());
   2403 
   2404   __ Cmp(FieldOperand(reg, HeapObject::kMapOffset), instr->map());
   2405   EmitBranch(instr, equal);
   2406 }
   2407 
   2408 
   2409 void LCodeGen::DoHasInPrototypeChainAndBranch(
   2410     LHasInPrototypeChainAndBranch* instr) {
   2411   Register const object = ToRegister(instr->object());
   2412   Register const object_map = kScratchRegister;
   2413   Register const object_prototype = object_map;
   2414   Register const prototype = ToRegister(instr->prototype());
   2415 
   2416   // The {object} must be a spec object.  It's sufficient to know that {object}
   2417   // is not a smi, since all other non-spec objects have {null} prototypes and
   2418   // will be ruled out below.
   2419   if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
   2420     Condition is_smi = __ CheckSmi(object);
   2421     EmitFalseBranch(instr, is_smi);
   2422   }
   2423 
   2424   // Loop through the {object}s prototype chain looking for the {prototype}.
   2425   __ movp(object_map, FieldOperand(object, HeapObject::kMapOffset));
   2426   Label loop;
   2427   __ bind(&loop);
   2428 
   2429   // Deoptimize if the object needs to be access checked.
   2430   __ testb(FieldOperand(object_map, Map::kBitFieldOffset),
   2431            Immediate(1 << Map::kIsAccessCheckNeeded));
   2432   DeoptimizeIf(not_zero, instr, DeoptimizeReason::kAccessCheck);
   2433   // Deoptimize for proxies.
   2434   __ CmpInstanceType(object_map, JS_PROXY_TYPE);
   2435   DeoptimizeIf(equal, instr, DeoptimizeReason::kProxy);
   2436 
   2437   __ movp(object_prototype, FieldOperand(object_map, Map::kPrototypeOffset));
   2438   __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
   2439   EmitFalseBranch(instr, equal);
   2440   __ cmpp(object_prototype, prototype);
   2441   EmitTrueBranch(instr, equal);
   2442   __ movp(object_map, FieldOperand(object_prototype, HeapObject::kMapOffset));
   2443   __ jmp(&loop);
   2444 }
   2445 
   2446 
   2447 void LCodeGen::DoCmpT(LCmpT* instr) {
   2448   DCHECK(ToRegister(instr->context()).is(rsi));
   2449   Token::Value op = instr->op();
   2450 
   2451   Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   2452   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2453 
   2454   Condition condition = TokenToCondition(op, false);
   2455   Label true_value, done;
   2456   __ testp(rax, rax);
   2457   __ j(condition, &true_value, Label::kNear);
   2458   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
   2459   __ jmp(&done, Label::kNear);
   2460   __ bind(&true_value);
   2461   __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
   2462   __ bind(&done);
   2463 }
   2464 
   2465 
   2466 void LCodeGen::DoReturn(LReturn* instr) {
   2467   if (FLAG_trace && info()->IsOptimizing()) {
   2468     // Preserve the return value on the stack and rely on the runtime call
   2469     // to return the value in the same register.  We're leaving the code
   2470     // managed by the register allocator and tearing down the frame, it's
   2471     // safe to write to the context register.
   2472     __ Push(rax);
   2473     __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   2474     __ CallRuntime(Runtime::kTraceExit);
   2475   }
   2476   if (info()->saves_caller_doubles()) {
   2477     RestoreCallerDoubles();
   2478   }
   2479   if (NeedsEagerFrame()) {
   2480     __ movp(rsp, rbp);
   2481     __ popq(rbp);
   2482   }
   2483   if (instr->has_constant_parameter_count()) {
   2484     __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
   2485            rcx);
   2486   } else {
   2487     DCHECK(info()->IsStub());  // Functions would need to drop one more value.
   2488     Register reg = ToRegister(instr->parameter_count());
   2489     // The argument count parameter is a smi
   2490     __ SmiToInteger32(reg, reg);
   2491     Register return_addr_reg = reg.is(rcx) ? rbx : rcx;
   2492     __ PopReturnAddressTo(return_addr_reg);
   2493     __ shlp(reg, Immediate(kPointerSizeLog2));
   2494     __ addp(rsp, reg);
   2495     __ jmp(return_addr_reg);
   2496   }
   2497 }
   2498 
   2499 
   2500 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   2501   Register context = ToRegister(instr->context());
   2502   Register result = ToRegister(instr->result());
   2503   __ movp(result, ContextOperand(context, instr->slot_index()));
   2504   if (instr->hydrogen()->RequiresHoleCheck()) {
   2505     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
   2506     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2507       DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
   2508     } else {
   2509       Label is_not_hole;
   2510       __ j(not_equal, &is_not_hole, Label::kNear);
   2511       __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
   2512       __ bind(&is_not_hole);
   2513     }
   2514   }
   2515 }
   2516 
   2517 
   2518 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   2519   Register context = ToRegister(instr->context());
   2520   Register value = ToRegister(instr->value());
   2521 
   2522   Operand target = ContextOperand(context, instr->slot_index());
   2523 
   2524   Label skip_assignment;
   2525   if (instr->hydrogen()->RequiresHoleCheck()) {
   2526     __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
   2527     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2528       DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
   2529     } else {
   2530       __ j(not_equal, &skip_assignment);
   2531     }
   2532   }
   2533   __ movp(target, value);
   2534 
   2535   if (instr->hydrogen()->NeedsWriteBarrier()) {
   2536     SmiCheck check_needed =
   2537       instr->hydrogen()->value()->type().IsHeapObject()
   2538           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2539     int offset = Context::SlotOffset(instr->slot_index());
   2540     Register scratch = ToRegister(instr->temp());
   2541     __ RecordWriteContextSlot(context,
   2542                               offset,
   2543                               value,
   2544                               scratch,
   2545                               kSaveFPRegs,
   2546                               EMIT_REMEMBERED_SET,
   2547                               check_needed);
   2548   }
   2549 
   2550   __ bind(&skip_assignment);
   2551 }
   2552 
   2553 
   2554 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   2555   HObjectAccess access = instr->hydrogen()->access();
   2556   int offset = access.offset();
   2557 
   2558   if (access.IsExternalMemory()) {
   2559     Register result = ToRegister(instr->result());
   2560     if (instr->object()->IsConstantOperand()) {
   2561       DCHECK(result.is(rax));
   2562       __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
   2563     } else {
   2564       Register object = ToRegister(instr->object());
   2565       __ Load(result, MemOperand(object, offset), access.representation());
   2566     }
   2567     return;
   2568   }
   2569 
   2570   Register object = ToRegister(instr->object());
   2571   if (instr->hydrogen()->representation().IsDouble()) {
   2572     DCHECK(access.IsInobject());
   2573     XMMRegister result = ToDoubleRegister(instr->result());
   2574     __ Movsd(result, FieldOperand(object, offset));
   2575     return;
   2576   }
   2577 
   2578   Register result = ToRegister(instr->result());
   2579   if (!access.IsInobject()) {
   2580     __ movp(result, FieldOperand(object, JSObject::kPropertiesOffset));
   2581     object = result;
   2582   }
   2583 
   2584   Representation representation = access.representation();
   2585   if (representation.IsSmi() && SmiValuesAre32Bits() &&
   2586       instr->hydrogen()->representation().IsInteger32()) {
   2587     if (FLAG_debug_code) {
   2588       Register scratch = kScratchRegister;
   2589       __ Load(scratch, FieldOperand(object, offset), representation);
   2590       __ AssertSmi(scratch);
   2591     }
   2592 
   2593     // Read int value directly from upper half of the smi.
   2594     STATIC_ASSERT(kSmiTag == 0);
   2595     DCHECK(kSmiTagSize + kSmiShiftSize == 32);
   2596     offset += kPointerSize / 2;
   2597     representation = Representation::Integer32();
   2598   }
   2599   __ Load(result, FieldOperand(object, offset), representation);
   2600 }
   2601 
   2602 
   2603 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   2604   Register function = ToRegister(instr->function());
   2605   Register result = ToRegister(instr->result());
   2606 
   2607   // Get the prototype or initial map from the function.
   2608   __ movp(result,
   2609          FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   2610 
   2611   // Check that the function has a prototype or an initial map.
   2612   __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
   2613   DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
   2614 
   2615   // If the function does not have an initial map, we're done.
   2616   Label done;
   2617   __ CmpObjectType(result, MAP_TYPE, kScratchRegister);
   2618   __ j(not_equal, &done, Label::kNear);
   2619 
   2620   // Get the prototype from the initial map.
   2621   __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
   2622 
   2623   // All done.
   2624   __ bind(&done);
   2625 }
   2626 
   2627 
   2628 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
   2629   Register result = ToRegister(instr->result());
   2630   __ LoadRoot(result, instr->index());
   2631 }
   2632 
   2633 
   2634 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
   2635   Register arguments = ToRegister(instr->arguments());
   2636   Register result = ToRegister(instr->result());
   2637 
   2638   if (instr->length()->IsConstantOperand() &&
   2639       instr->index()->IsConstantOperand()) {
   2640     int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   2641     int32_t const_length = ToInteger32(LConstantOperand::cast(instr->length()));
   2642     if (const_index >= 0 && const_index < const_length) {
   2643       StackArgumentsAccessor args(arguments, const_length,
   2644                                   ARGUMENTS_DONT_CONTAIN_RECEIVER);
   2645       __ movp(result, args.GetArgumentOperand(const_index));
   2646     } else if (FLAG_debug_code) {
   2647       __ int3();
   2648     }
   2649   } else {
   2650     Register length = ToRegister(instr->length());
   2651     // There are two words between the frame pointer and the last argument.
   2652     // Subtracting from length accounts for one of them add one more.
   2653     if (instr->index()->IsRegister()) {
   2654       __ subl(length, ToRegister(instr->index()));
   2655     } else {
   2656       __ subl(length, ToOperand(instr->index()));
   2657     }
   2658     StackArgumentsAccessor args(arguments, length,
   2659                                 ARGUMENTS_DONT_CONTAIN_RECEIVER);
   2660     __ movp(result, args.GetArgumentOperand(0));
   2661   }
   2662 }
   2663 
   2664 
   2665 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
   2666   ElementsKind elements_kind = instr->elements_kind();
   2667   LOperand* key = instr->key();
   2668   if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
   2669     Register key_reg = ToRegister(key);
   2670     Representation key_representation =
   2671         instr->hydrogen()->key()->representation();
   2672     if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
   2673       __ SmiToInteger64(key_reg, key_reg);
   2674     } else if (instr->hydrogen()->IsDehoisted()) {
   2675       // Sign extend key because it could be a 32 bit negative value
   2676       // and the dehoisted address computation happens in 64 bits
   2677       __ movsxlq(key_reg, key_reg);
   2678     }
   2679   }
   2680   Operand operand(BuildFastArrayOperand(
   2681       instr->elements(),
   2682       key,
   2683       instr->hydrogen()->key()->representation(),
   2684       elements_kind,
   2685       instr->base_offset()));
   2686 
   2687   if (elements_kind == FLOAT32_ELEMENTS) {
   2688     XMMRegister result(ToDoubleRegister(instr->result()));
   2689     __ Cvtss2sd(result, operand);
   2690   } else if (elements_kind == FLOAT64_ELEMENTS) {
   2691     __ Movsd(ToDoubleRegister(instr->result()), operand);
   2692   } else {
   2693     Register result(ToRegister(instr->result()));
   2694     switch (elements_kind) {
   2695       case INT8_ELEMENTS:
   2696         __ movsxbl(result, operand);
   2697         break;
   2698       case UINT8_ELEMENTS:
   2699       case UINT8_CLAMPED_ELEMENTS:
   2700         __ movzxbl(result, operand);
   2701         break;
   2702       case INT16_ELEMENTS:
   2703         __ movsxwl(result, operand);
   2704         break;
   2705       case UINT16_ELEMENTS:
   2706         __ movzxwl(result, operand);
   2707         break;
   2708       case INT32_ELEMENTS:
   2709         __ movl(result, operand);
   2710         break;
   2711       case UINT32_ELEMENTS:
   2712         __ movl(result, operand);
   2713         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
   2714           __ testl(result, result);
   2715           DeoptimizeIf(negative, instr, DeoptimizeReason::kNegativeValue);
   2716         }
   2717         break;
   2718       case FLOAT32_ELEMENTS:
   2719       case FLOAT64_ELEMENTS:
   2720       case FAST_ELEMENTS:
   2721       case FAST_SMI_ELEMENTS:
   2722       case FAST_DOUBLE_ELEMENTS:
   2723       case FAST_HOLEY_ELEMENTS:
   2724       case FAST_HOLEY_SMI_ELEMENTS:
   2725       case FAST_HOLEY_DOUBLE_ELEMENTS:
   2726       case DICTIONARY_ELEMENTS:
   2727       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
   2728       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
   2729       case FAST_STRING_WRAPPER_ELEMENTS:
   2730       case SLOW_STRING_WRAPPER_ELEMENTS:
   2731       case NO_ELEMENTS:
   2732         UNREACHABLE();
   2733         break;
   2734     }
   2735   }
   2736 }
   2737 
   2738 
   2739 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
   2740   XMMRegister result(ToDoubleRegister(instr->result()));
   2741   LOperand* key = instr->key();
   2742   if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
   2743       instr->hydrogen()->IsDehoisted()) {
   2744     // Sign extend key because it could be a 32 bit negative value
   2745     // and the dehoisted address computation happens in 64 bits
   2746     __ movsxlq(ToRegister(key), ToRegister(key));
   2747   }
   2748   if (instr->hydrogen()->RequiresHoleCheck()) {
   2749     Operand hole_check_operand = BuildFastArrayOperand(
   2750         instr->elements(),
   2751         key,
   2752         instr->hydrogen()->key()->representation(),
   2753         FAST_DOUBLE_ELEMENTS,
   2754         instr->base_offset() + sizeof(kHoleNanLower32));
   2755     __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
   2756     DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
   2757   }
   2758 
   2759   Operand double_load_operand = BuildFastArrayOperand(
   2760       instr->elements(),
   2761       key,
   2762       instr->hydrogen()->key()->representation(),
   2763       FAST_DOUBLE_ELEMENTS,
   2764       instr->base_offset());
   2765   __ Movsd(result, double_load_operand);
   2766 }
   2767 
   2768 
   2769 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
   2770   HLoadKeyed* hinstr = instr->hydrogen();
   2771   Register result = ToRegister(instr->result());
   2772   LOperand* key = instr->key();
   2773   bool requires_hole_check = hinstr->RequiresHoleCheck();
   2774   Representation representation = hinstr->representation();
   2775   int offset = instr->base_offset();
   2776 
   2777   if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
   2778       instr->hydrogen()->IsDehoisted()) {
   2779     // Sign extend key because it could be a 32 bit negative value
   2780     // and the dehoisted address computation happens in 64 bits
   2781     __ movsxlq(ToRegister(key), ToRegister(key));
   2782   }
   2783   if (representation.IsInteger32() && SmiValuesAre32Bits() &&
   2784       hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
   2785     DCHECK(!requires_hole_check);
   2786     if (FLAG_debug_code) {
   2787       Register scratch = kScratchRegister;
   2788       __ Load(scratch,
   2789               BuildFastArrayOperand(instr->elements(),
   2790                                     key,
   2791                                     instr->hydrogen()->key()->representation(),
   2792                                     FAST_ELEMENTS,
   2793                                     offset),
   2794               Representation::Smi());
   2795       __ AssertSmi(scratch);
   2796     }
   2797     // Read int value directly from upper half of the smi.
   2798     STATIC_ASSERT(kSmiTag == 0);
   2799     DCHECK(kSmiTagSize + kSmiShiftSize == 32);
   2800     offset += kPointerSize / 2;
   2801   }
   2802 
   2803   __ Load(result,
   2804           BuildFastArrayOperand(instr->elements(), key,
   2805                                 instr->hydrogen()->key()->representation(),
   2806                                 FAST_ELEMENTS, offset),
   2807           representation);
   2808 
   2809   // Check for the hole value.
   2810   if (requires_hole_check) {
   2811     if (IsFastSmiElementsKind(hinstr->elements_kind())) {
   2812       Condition smi = __ CheckSmi(result);
   2813       DeoptimizeIf(NegateCondition(smi), instr, DeoptimizeReason::kNotASmi);
   2814     } else {
   2815       __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
   2816       DeoptimizeIf(equal, instr, DeoptimizeReason::kHole);
   2817     }
   2818   } else if (hinstr->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
   2819     DCHECK(hinstr->elements_kind() == FAST_HOLEY_ELEMENTS);
   2820     Label done;
   2821     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
   2822     __ j(not_equal, &done);
   2823     if (info()->IsStub()) {
   2824       // A stub can safely convert the hole to undefined only if the array
   2825       // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
   2826       // it needs to bail out.
   2827       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
   2828       __ Cmp(FieldOperand(result, Cell::kValueOffset),
   2829              Smi::FromInt(Isolate::kProtectorValid));
   2830       DeoptimizeIf(not_equal, instr, DeoptimizeReason::kHole);
   2831     }
   2832     __ Move(result, isolate()->factory()->undefined_value());
   2833     __ bind(&done);
   2834   }
   2835 }
   2836 
   2837 
   2838 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
   2839   if (instr->is_fixed_typed_array()) {
   2840     DoLoadKeyedExternalArray(instr);
   2841   } else if (instr->hydrogen()->representation().IsDouble()) {
   2842     DoLoadKeyedFixedDoubleArray(instr);
   2843   } else {
   2844     DoLoadKeyedFixedArray(instr);
   2845   }
   2846 }
   2847 
   2848 
   2849 Operand LCodeGen::BuildFastArrayOperand(
   2850     LOperand* elements_pointer,
   2851     LOperand* key,
   2852     Representation key_representation,
   2853     ElementsKind elements_kind,
   2854     uint32_t offset) {
   2855   Register elements_pointer_reg = ToRegister(elements_pointer);
   2856   int shift_size = ElementsKindToShiftSize(elements_kind);
   2857   if (key->IsConstantOperand()) {
   2858     int32_t constant_value = ToInteger32(LConstantOperand::cast(key));
   2859     if (constant_value & 0xF0000000) {
   2860       Abort(kArrayIndexConstantValueTooBig);
   2861     }
   2862     return Operand(elements_pointer_reg,
   2863                    (constant_value << shift_size) + offset);
   2864   } else {
   2865     // Guaranteed by ArrayInstructionInterface::KeyedAccessIndexRequirement().
   2866     DCHECK(key_representation.IsInteger32());
   2867 
   2868     ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
   2869     return Operand(elements_pointer_reg,
   2870                    ToRegister(key),
   2871                    scale_factor,
   2872                    offset);
   2873   }
   2874 }
   2875 
   2876 
   2877 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   2878   Register result = ToRegister(instr->result());
   2879 
   2880   if (instr->hydrogen()->from_inlined()) {
   2881     __ leap(result, Operand(rsp, -kFPOnStackSize + -kPCOnStackSize));
   2882   } else if (instr->hydrogen()->arguments_adaptor()) {
   2883     // Check for arguments adapter frame.
   2884     Label done, adapted;
   2885     __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
   2886     __ Cmp(Operand(result, CommonFrameConstants::kContextOrFrameTypeOffset),
   2887            Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   2888     __ j(equal, &adapted, Label::kNear);
   2889 
   2890     // No arguments adaptor frame.
   2891     __ movp(result, rbp);
   2892     __ jmp(&done, Label::kNear);
   2893 
   2894     // Arguments adaptor frame present.
   2895     __ bind(&adapted);
   2896     __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
   2897 
   2898     // Result is the frame pointer for the frame if not adapted and for the real
   2899     // frame below the adaptor frame if adapted.
   2900     __ bind(&done);
   2901   } else {
   2902     __ movp(result, rbp);
   2903   }
   2904 }
   2905 
   2906 
   2907 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
   2908   Register result = ToRegister(instr->result());
   2909 
   2910   Label done;
   2911 
   2912   // If no arguments adaptor frame the number of arguments is fixed.
   2913   if (instr->elements()->IsRegister()) {
   2914     __ cmpp(rbp, ToRegister(instr->elements()));
   2915   } else {
   2916     __ cmpp(rbp, ToOperand(instr->elements()));
   2917   }
   2918   __ movl(result, Immediate(scope()->num_parameters()));
   2919   __ j(equal, &done, Label::kNear);
   2920 
   2921   // Arguments adaptor frame present. Get argument length from there.
   2922   __ movp(result, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
   2923   __ SmiToInteger32(result,
   2924                     Operand(result,
   2925                             ArgumentsAdaptorFrameConstants::kLengthOffset));
   2926 
   2927   // Argument length is in result register.
   2928   __ bind(&done);
   2929 }
   2930 
   2931 
   2932 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
   2933   Register receiver = ToRegister(instr->receiver());
   2934   Register function = ToRegister(instr->function());
   2935 
   2936   // If the receiver is null or undefined, we have to pass the global
   2937   // object as a receiver to normal functions. Values have to be
   2938   // passed unchanged to builtins and strict-mode functions.
   2939   Label global_object, receiver_ok;
   2940   Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
   2941 
   2942   if (!instr->hydrogen()->known_function()) {
   2943     // Do not transform the receiver to object for strict mode
   2944     // functions.
   2945     __ movp(kScratchRegister,
   2946             FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
   2947     __ testb(FieldOperand(kScratchRegister,
   2948                           SharedFunctionInfo::kStrictModeByteOffset),
   2949              Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
   2950     __ j(not_equal, &receiver_ok, dist);
   2951 
   2952     // Do not transform the receiver to object for builtins.
   2953     __ testb(FieldOperand(kScratchRegister,
   2954                           SharedFunctionInfo::kNativeByteOffset),
   2955              Immediate(1 << SharedFunctionInfo::kNativeBitWithinByte));
   2956     __ j(not_equal, &receiver_ok, dist);
   2957   }
   2958 
   2959   // Normal function. Replace undefined or null with global receiver.
   2960   __ CompareRoot(receiver, Heap::kNullValueRootIndex);
   2961   __ j(equal, &global_object, Label::kNear);
   2962   __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
   2963   __ j(equal, &global_object, Label::kNear);
   2964 
   2965   // The receiver should be a JS object.
   2966   Condition is_smi = __ CheckSmi(receiver);
   2967   DeoptimizeIf(is_smi, instr, DeoptimizeReason::kSmi);
   2968   __ CmpObjectType(receiver, FIRST_JS_RECEIVER_TYPE, kScratchRegister);
   2969   DeoptimizeIf(below, instr, DeoptimizeReason::kNotAJavaScriptObject);
   2970 
   2971   __ jmp(&receiver_ok, Label::kNear);
   2972   __ bind(&global_object);
   2973   __ movp(receiver, FieldOperand(function, JSFunction::kContextOffset));
   2974   __ movp(receiver, ContextOperand(receiver, Context::NATIVE_CONTEXT_INDEX));
   2975   __ movp(receiver, ContextOperand(receiver, Context::GLOBAL_PROXY_INDEX));
   2976 
   2977   __ bind(&receiver_ok);
   2978 }
   2979 
   2980 
   2981 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   2982   Register receiver = ToRegister(instr->receiver());
   2983   Register function = ToRegister(instr->function());
   2984   Register length = ToRegister(instr->length());
   2985   Register elements = ToRegister(instr->elements());
   2986   DCHECK(receiver.is(rax));  // Used for parameter count.
   2987   DCHECK(function.is(rdi));  // Required by InvokeFunction.
   2988   DCHECK(ToRegister(instr->result()).is(rax));
   2989 
   2990   // Copy the arguments to this function possibly from the
   2991   // adaptor frame below it.
   2992   const uint32_t kArgumentsLimit = 1 * KB;
   2993   __ cmpp(length, Immediate(kArgumentsLimit));
   2994   DeoptimizeIf(above, instr, DeoptimizeReason::kTooManyArguments);
   2995 
   2996   __ Push(receiver);
   2997   __ movp(receiver, length);
   2998 
   2999   // Loop through the arguments pushing them onto the execution
   3000   // stack.
   3001   Label invoke, loop;
   3002   // length is a small non-negative integer, due to the test above.
   3003   __ testl(length, length);
   3004   __ j(zero, &invoke, Label::kNear);
   3005   __ bind(&loop);
   3006   StackArgumentsAccessor args(elements, length,
   3007                               ARGUMENTS_DONT_CONTAIN_RECEIVER);
   3008   __ Push(args.GetArgumentOperand(0));
   3009   __ decl(length);
   3010   __ j(not_zero, &loop);
   3011 
   3012   // Invoke the function.
   3013   __ bind(&invoke);
   3014 
   3015   InvokeFlag flag = CALL_FUNCTION;
   3016   if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
   3017     DCHECK(!info()->saves_caller_doubles());
   3018     // TODO(ishell): drop current frame before pushing arguments to the stack.
   3019     flag = JUMP_FUNCTION;
   3020     ParameterCount actual(rax);
   3021     // It is safe to use rbx, rcx and r8 as scratch registers here given that
   3022     // 1) we are not going to return to caller function anyway,
   3023     // 2) rbx (expected number of arguments) will be initialized below.
   3024     PrepareForTailCall(actual, rbx, rcx, r8);
   3025   }
   3026 
   3027   DCHECK(instr->HasPointerMap());
   3028   LPointerMap* pointers = instr->pointer_map();
   3029   SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
   3030   ParameterCount actual(rax);
   3031   __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
   3032 }
   3033 
   3034 
   3035 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   3036   LOperand* argument = instr->value();
   3037   EmitPushTaggedOperand(argument);
   3038 }
   3039 
   3040 
   3041 void LCodeGen::DoDrop(LDrop* instr) {
   3042   __ Drop(instr->count());
   3043 }
   3044 
   3045 
   3046 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   3047   Register result = ToRegister(instr->result());
   3048   __ movp(result, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   3049 }
   3050 
   3051 
   3052 void LCodeGen::DoContext(LContext* instr) {
   3053   Register result = ToRegister(instr->result());
   3054   if (info()->IsOptimizing()) {
   3055     __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
   3056   } else {
   3057     // If there is no frame, the context must be in rsi.
   3058     DCHECK(result.is(rsi));
   3059   }
   3060 }
   3061 
   3062 
   3063 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   3064   DCHECK(ToRegister(instr->context()).is(rsi));
   3065   __ Push(instr->hydrogen()->pairs());
   3066   __ Push(Smi::FromInt(instr->hydrogen()->flags()));
   3067   __ Push(instr->hydrogen()->feedback_vector());
   3068   CallRuntime(Runtime::kDeclareGlobals, instr);
   3069 }
   3070 
   3071 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   3072                                  int formal_parameter_count, int arity,
   3073                                  bool is_tail_call, LInstruction* instr) {
   3074   bool dont_adapt_arguments =
   3075       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   3076   bool can_invoke_directly =
   3077       dont_adapt_arguments || formal_parameter_count == arity;
   3078 
   3079   Register function_reg = rdi;
   3080   LPointerMap* pointers = instr->pointer_map();
   3081 
   3082   if (can_invoke_directly) {
   3083     // Change context.
   3084     __ movp(rsi, FieldOperand(function_reg, JSFunction::kContextOffset));
   3085 
   3086     // Always initialize new target and number of actual arguments.
   3087     __ LoadRoot(rdx, Heap::kUndefinedValueRootIndex);
   3088     __ Set(rax, arity);
   3089 
   3090     bool is_self_call = function.is_identical_to(info()->closure());
   3091 
   3092     // Invoke function.
   3093     if (is_self_call) {
   3094       Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
   3095       if (is_tail_call) {
   3096         __ Jump(self, RelocInfo::CODE_TARGET);
   3097       } else {
   3098         __ Call(self, RelocInfo::CODE_TARGET);
   3099       }
   3100     } else {
   3101       Operand target = FieldOperand(function_reg, JSFunction::kCodeEntryOffset);
   3102       if (is_tail_call) {
   3103         __ Jump(target);
   3104       } else {
   3105         __ Call(target);
   3106       }
   3107     }
   3108 
   3109     if (!is_tail_call) {
   3110       // Set up deoptimization.
   3111       RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT, 0);
   3112     }
   3113   } else {
   3114     // We need to adapt arguments.
   3115     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3116     ParameterCount actual(arity);
   3117     ParameterCount expected(formal_parameter_count);
   3118     InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
   3119     __ InvokeFunction(function_reg, no_reg, expected, actual, flag, generator);
   3120   }
   3121 }
   3122 
   3123 
   3124 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   3125   DCHECK(ToRegister(instr->result()).is(rax));
   3126 
   3127   if (instr->hydrogen()->IsTailCall()) {
   3128     if (NeedsEagerFrame()) __ leave();
   3129 
   3130     if (instr->target()->IsConstantOperand()) {
   3131       LConstantOperand* target = LConstantOperand::cast(instr->target());
   3132       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   3133       __ jmp(code, RelocInfo::CODE_TARGET);
   3134     } else {
   3135       DCHECK(instr->target()->IsRegister());
   3136       Register target = ToRegister(instr->target());
   3137       __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
   3138       __ jmp(target);
   3139     }
   3140   } else {
   3141     LPointerMap* pointers = instr->pointer_map();
   3142     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3143 
   3144     if (instr->target()->IsConstantOperand()) {
   3145       LConstantOperand* target = LConstantOperand::cast(instr->target());
   3146       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   3147       generator.BeforeCall(__ CallSize(code));
   3148       __ call(code, RelocInfo::CODE_TARGET);
   3149     } else {
   3150       DCHECK(instr->target()->IsRegister());
   3151       Register target = ToRegister(instr->target());
   3152       generator.BeforeCall(__ CallSize(target));
   3153       __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
   3154       __ call(target);
   3155     }
   3156     generator.AfterCall();
   3157   }
   3158 }
   3159 
   3160 
   3161 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
   3162   Register input_reg = ToRegister(instr->value());
   3163   __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
   3164                  Heap::kHeapNumberMapRootIndex);
   3165   DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
   3166 
   3167   Label slow, allocated, done;
   3168   uint32_t available_regs = rax.bit() | rcx.bit() | rdx.bit() | rbx.bit();
   3169   available_regs &= ~input_reg.bit();
   3170   if (instr->context()->IsRegister()) {
   3171     // Make sure that the context isn't overwritten in the AllocateHeapNumber
   3172     // macro below.
   3173     available_regs &= ~ToRegister(instr->context()).bit();
   3174   }
   3175 
   3176   Register tmp =
   3177       Register::from_code(base::bits::CountTrailingZeros32(available_regs));
   3178   available_regs &= ~tmp.bit();
   3179   Register tmp2 =
   3180       Register::from_code(base::bits::CountTrailingZeros32(available_regs));
   3181 
   3182   // Preserve the value of all registers.
   3183   PushSafepointRegistersScope scope(this);
   3184 
   3185   __ movl(tmp, FieldOperand(input_reg, HeapNumber::kExponentOffset));
   3186   // Check the sign of the argument. If the argument is positive, just
   3187   // return it. We do not need to patch the stack since |input| and
   3188   // |result| are the same register and |input| will be restored
   3189   // unchanged by popping safepoint registers.
   3190   __ testl(tmp, Immediate(HeapNumber::kSignMask));
   3191   __ j(zero, &done);
   3192 
   3193   __ AllocateHeapNumber(tmp, tmp2, &slow);
   3194   __ jmp(&allocated, Label::kNear);
   3195 
   3196   // Slow case: Call the runtime system to do the number allocation.
   3197   __ bind(&slow);
   3198   CallRuntimeFromDeferred(
   3199       Runtime::kAllocateHeapNumber, 0, instr, instr->context());
   3200   // Set the pointer to the new heap number in tmp.
   3201   if (!tmp.is(rax)) __ movp(tmp, rax);
   3202   // Restore input_reg after call to runtime.
   3203   __ LoadFromSafepointRegisterSlot(input_reg, input_reg);
   3204 
   3205   __ bind(&allocated);
   3206   __ movq(tmp2, FieldOperand(input_reg, HeapNumber::kValueOffset));
   3207   __ shlq(tmp2, Immediate(1));
   3208   __ shrq(tmp2, Immediate(1));
   3209   __ movq(FieldOperand(tmp, HeapNumber::kValueOffset), tmp2);
   3210   __ StoreToSafepointRegisterSlot(input_reg, tmp);
   3211 
   3212   __ bind(&done);
   3213 }
   3214 
   3215 
   3216 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
   3217   Register input_reg = ToRegister(instr->value());
   3218   __ testl(input_reg, input_reg);
   3219   Label is_positive;
   3220   __ j(not_sign, &is_positive, Label::kNear);
   3221   __ negl(input_reg);  // Sets flags.
   3222   DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow);
   3223   __ bind(&is_positive);
   3224 }
   3225 
   3226 
   3227 void LCodeGen::EmitSmiMathAbs(LMathAbs* instr) {
   3228   Register input_reg = ToRegister(instr->value());
   3229   __ testp(input_reg, input_reg);
   3230   Label is_positive;
   3231   __ j(not_sign, &is_positive, Label::kNear);
   3232   __ negp(input_reg);  // Sets flags.
   3233   DeoptimizeIf(negative, instr, DeoptimizeReason::kOverflow);
   3234   __ bind(&is_positive);
   3235 }
   3236 
   3237 
   3238 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   3239   // Class for deferred case.
   3240   class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
   3241    public:
   3242     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
   3243         : LDeferredCode(codegen), instr_(instr) { }
   3244     void Generate() override {
   3245       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
   3246     }
   3247     LInstruction* instr() override { return instr_; }
   3248 
   3249    private:
   3250     LMathAbs* instr_;
   3251   };
   3252 
   3253   DCHECK(instr->value()->Equals(instr->result()));
   3254   Representation r = instr->hydrogen()->value()->representation();
   3255 
   3256   if (r.IsDouble()) {
   3257     XMMRegister scratch = double_scratch0();
   3258     XMMRegister input_reg = ToDoubleRegister(instr->value());
   3259     __ Xorpd(scratch, scratch);
   3260     __ Subsd(scratch, input_reg);
   3261     __ Andpd(input_reg, scratch);
   3262   } else if (r.IsInteger32()) {
   3263     EmitIntegerMathAbs(instr);
   3264   } else if (r.IsSmi()) {
   3265     EmitSmiMathAbs(instr);
   3266   } else {  // Tagged case.
   3267     DeferredMathAbsTaggedHeapNumber* deferred =
   3268         new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
   3269     Register input_reg = ToRegister(instr->value());
   3270     // Smi check.
   3271     __ JumpIfNotSmi(input_reg, deferred->entry());
   3272     EmitSmiMathAbs(instr);
   3273     __ bind(deferred->exit());
   3274   }
   3275 }
   3276 
   3277 void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
   3278   XMMRegister output_reg = ToDoubleRegister(instr->result());
   3279   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3280   CpuFeatureScope scope(masm(), SSE4_1);
   3281   __ Roundsd(output_reg, input_reg, kRoundDown);
   3282 }
   3283 
   3284 void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
   3285   XMMRegister xmm_scratch = double_scratch0();
   3286   Register output_reg = ToRegister(instr->result());
   3287   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3288 
   3289   if (CpuFeatures::IsSupported(SSE4_1)) {
   3290     CpuFeatureScope scope(masm(), SSE4_1);
   3291     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3292       // Deoptimize if minus zero.
   3293       __ Movq(output_reg, input_reg);
   3294       __ subq(output_reg, Immediate(1));
   3295       DeoptimizeIf(overflow, instr, DeoptimizeReason::kMinusZero);
   3296     }
   3297     __ Roundsd(xmm_scratch, input_reg, kRoundDown);
   3298     __ Cvttsd2si(output_reg, xmm_scratch);
   3299     __ cmpl(output_reg, Immediate(0x1));
   3300     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   3301   } else {
   3302     Label negative_sign, done;
   3303     // Deoptimize on unordered.
   3304     __ Xorpd(xmm_scratch, xmm_scratch);  // Zero the register.
   3305     __ Ucomisd(input_reg, xmm_scratch);
   3306     DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
   3307     __ j(below, &negative_sign, Label::kNear);
   3308 
   3309     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3310       // Check for negative zero.
   3311       Label positive_sign;
   3312       __ j(above, &positive_sign, Label::kNear);
   3313       __ Movmskpd(output_reg, input_reg);
   3314       __ testl(output_reg, Immediate(1));
   3315       DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
   3316       __ Set(output_reg, 0);
   3317       __ jmp(&done);
   3318       __ bind(&positive_sign);
   3319     }
   3320 
   3321     // Use truncating instruction (OK because input is positive).
   3322     __ Cvttsd2si(output_reg, input_reg);
   3323     // Overflow is signalled with minint.
   3324     __ cmpl(output_reg, Immediate(0x1));
   3325     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   3326     __ jmp(&done, Label::kNear);
   3327 
   3328     // Non-zero negative reaches here.
   3329     __ bind(&negative_sign);
   3330     // Truncate, then compare and compensate.
   3331     __ Cvttsd2si(output_reg, input_reg);
   3332     __ Cvtlsi2sd(xmm_scratch, output_reg);
   3333     __ Ucomisd(input_reg, xmm_scratch);
   3334     __ j(equal, &done, Label::kNear);
   3335     __ subl(output_reg, Immediate(1));
   3336     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   3337 
   3338     __ bind(&done);
   3339   }
   3340 }
   3341 
   3342 void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
   3343   XMMRegister xmm_scratch = double_scratch0();
   3344   XMMRegister output_reg = ToDoubleRegister(instr->result());
   3345   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3346   CpuFeatureScope scope(masm(), SSE4_1);
   3347   Label done;
   3348   __ Roundsd(output_reg, input_reg, kRoundUp);
   3349   __ Move(xmm_scratch, -0.5);
   3350   __ Addsd(xmm_scratch, output_reg);
   3351   __ Ucomisd(xmm_scratch, input_reg);
   3352   __ j(below_equal, &done, Label::kNear);
   3353   __ Move(xmm_scratch, 1.0);
   3354   __ Subsd(output_reg, xmm_scratch);
   3355   __ bind(&done);
   3356 }
   3357 
   3358 void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
   3359   const XMMRegister xmm_scratch = double_scratch0();
   3360   Register output_reg = ToRegister(instr->result());
   3361   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3362   XMMRegister input_temp = ToDoubleRegister(instr->temp());
   3363   static int64_t one_half = V8_INT64_C(0x3FE0000000000000);  // 0.5
   3364   static int64_t minus_one_half = V8_INT64_C(0xBFE0000000000000);  // -0.5
   3365 
   3366   Label done, round_to_zero, below_one_half;
   3367   Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
   3368   __ movq(kScratchRegister, one_half);
   3369   __ Movq(xmm_scratch, kScratchRegister);
   3370   __ Ucomisd(xmm_scratch, input_reg);
   3371   __ j(above, &below_one_half, Label::kNear);
   3372 
   3373   // CVTTSD2SI rounds towards zero, since 0.5 <= x, we use floor(0.5 + x).
   3374   __ Addsd(xmm_scratch, input_reg);
   3375   __ Cvttsd2si(output_reg, xmm_scratch);
   3376   // Overflow is signalled with minint.
   3377   __ cmpl(output_reg, Immediate(0x1));
   3378   DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   3379   __ jmp(&done, dist);
   3380 
   3381   __ bind(&below_one_half);
   3382   __ movq(kScratchRegister, minus_one_half);
   3383   __ Movq(xmm_scratch, kScratchRegister);
   3384   __ Ucomisd(xmm_scratch, input_reg);
   3385   __ j(below_equal, &round_to_zero, Label::kNear);
   3386 
   3387   // CVTTSD2SI rounds towards zero, we use ceil(x - (-0.5)) and then
   3388   // compare and compensate.
   3389   __ Movapd(input_temp, input_reg);  // Do not alter input_reg.
   3390   __ Subsd(input_temp, xmm_scratch);
   3391   __ Cvttsd2si(output_reg, input_temp);
   3392   // Catch minint due to overflow, and to prevent overflow when compensating.
   3393   __ cmpl(output_reg, Immediate(0x1));
   3394   DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   3395 
   3396   __ Cvtlsi2sd(xmm_scratch, output_reg);
   3397   __ Ucomisd(xmm_scratch, input_temp);
   3398   __ j(equal, &done, dist);
   3399   __ subl(output_reg, Immediate(1));
   3400   // No overflow because we already ruled out minint.
   3401   __ jmp(&done, dist);
   3402 
   3403   __ bind(&round_to_zero);
   3404   // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
   3405   // we can ignore the difference between a result of -0 and +0.
   3406   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3407     __ Movq(output_reg, input_reg);
   3408     __ testq(output_reg, output_reg);
   3409     DeoptimizeIf(negative, instr, DeoptimizeReason::kMinusZero);
   3410   }
   3411   __ Set(output_reg, 0);
   3412   __ bind(&done);
   3413 }
   3414 
   3415 
   3416 void LCodeGen::DoMathFround(LMathFround* instr) {
   3417   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3418   XMMRegister output_reg = ToDoubleRegister(instr->result());
   3419   __ Cvtsd2ss(output_reg, input_reg);
   3420   __ Cvtss2sd(output_reg, output_reg);
   3421 }
   3422 
   3423 
   3424 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   3425   XMMRegister output = ToDoubleRegister(instr->result());
   3426   if (instr->value()->IsDoubleRegister()) {
   3427     XMMRegister input = ToDoubleRegister(instr->value());
   3428     __ Sqrtsd(output, input);
   3429   } else {
   3430     Operand input = ToOperand(instr->value());
   3431     __ Sqrtsd(output, input);
   3432   }
   3433 }
   3434 
   3435 
   3436 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   3437   XMMRegister xmm_scratch = double_scratch0();
   3438   XMMRegister input_reg = ToDoubleRegister(instr->value());
   3439   DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
   3440 
   3441   // Note that according to ECMA-262 15.8.2.13:
   3442   // Math.pow(-Infinity, 0.5) == Infinity
   3443   // Math.sqrt(-Infinity) == NaN
   3444   Label done, sqrt;
   3445   // Check base for -Infinity.  According to IEEE-754, double-precision
   3446   // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
   3447   __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000));
   3448   __ Movq(xmm_scratch, kScratchRegister);
   3449   __ Ucomisd(xmm_scratch, input_reg);
   3450   // Comparing -Infinity with NaN results in "unordered", which sets the
   3451   // zero flag as if both were equal.  However, it also sets the carry flag.
   3452   __ j(not_equal, &sqrt, Label::kNear);
   3453   __ j(carry, &sqrt, Label::kNear);
   3454   // If input is -Infinity, return Infinity.
   3455   __ Xorpd(input_reg, input_reg);
   3456   __ Subsd(input_reg, xmm_scratch);
   3457   __ jmp(&done, Label::kNear);
   3458 
   3459   // Square root.
   3460   __ bind(&sqrt);
   3461   __ Xorpd(xmm_scratch, xmm_scratch);
   3462   __ Addsd(input_reg, xmm_scratch);  // Convert -0 to +0.
   3463   __ Sqrtsd(input_reg, input_reg);
   3464   __ bind(&done);
   3465 }
   3466 
   3467 
   3468 void LCodeGen::DoPower(LPower* instr) {
   3469   Representation exponent_type = instr->hydrogen()->right()->representation();
   3470   // Having marked this as a call, we can use any registers.
   3471   // Just make sure that the input/output registers are the expected ones.
   3472 
   3473   Register tagged_exponent = MathPowTaggedDescriptor::exponent();
   3474   DCHECK(!instr->right()->IsRegister() ||
   3475          ToRegister(instr->right()).is(tagged_exponent));
   3476   DCHECK(!instr->right()->IsDoubleRegister() ||
   3477          ToDoubleRegister(instr->right()).is(xmm1));
   3478   DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
   3479   DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
   3480 
   3481   if (exponent_type.IsSmi()) {
   3482     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3483     __ CallStub(&stub);
   3484   } else if (exponent_type.IsTagged()) {
   3485     Label no_deopt;
   3486     __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
   3487     __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
   3488     DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
   3489     __ bind(&no_deopt);
   3490     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3491     __ CallStub(&stub);
   3492   } else if (exponent_type.IsInteger32()) {
   3493     MathPowStub stub(isolate(), MathPowStub::INTEGER);
   3494     __ CallStub(&stub);
   3495   } else {
   3496     DCHECK(exponent_type.IsDouble());
   3497     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
   3498     __ CallStub(&stub);
   3499   }
   3500 }
   3501 
   3502 void LCodeGen::DoMathCos(LMathCos* instr) {
   3503   DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
   3504   DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
   3505   __ PrepareCallCFunction(1);
   3506   __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 1);
   3507 }
   3508 
   3509 void LCodeGen::DoMathExp(LMathExp* instr) {
   3510   DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
   3511   DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
   3512   __ PrepareCallCFunction(1);
   3513   __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 1);
   3514 }
   3515 
   3516 void LCodeGen::DoMathSin(LMathSin* instr) {
   3517   DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
   3518   DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
   3519   __ PrepareCallCFunction(1);
   3520   __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 1);
   3521 }
   3522 
   3523 void LCodeGen::DoMathLog(LMathLog* instr) {
   3524   DCHECK(ToDoubleRegister(instr->value()).is(xmm0));
   3525   DCHECK(ToDoubleRegister(instr->result()).is(xmm0));
   3526   __ PrepareCallCFunction(1);
   3527   __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 1);
   3528 }
   3529 
   3530 
   3531 void LCodeGen::DoMathClz32(LMathClz32* instr) {
   3532   Register input = ToRegister(instr->value());
   3533   Register result = ToRegister(instr->result());
   3534 
   3535   __ Lzcntl(result, input);
   3536 }
   3537 
   3538 void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
   3539                                   Register scratch1, Register scratch2,
   3540                                   Register scratch3) {
   3541 #if DEBUG
   3542   if (actual.is_reg()) {
   3543     DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
   3544   } else {
   3545     DCHECK(!AreAliased(scratch1, scratch2, scratch3));
   3546   }
   3547 #endif
   3548   if (FLAG_code_comments) {
   3549     if (actual.is_reg()) {
   3550       Comment(";;; PrepareForTailCall, actual: %s {",
   3551               RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
   3552                   actual.reg().code()));
   3553     } else {
   3554       Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
   3555     }
   3556   }
   3557 
   3558   // Check if next frame is an arguments adaptor frame.
   3559   Register caller_args_count_reg = scratch1;
   3560   Label no_arguments_adaptor, formal_parameter_count_loaded;
   3561   __ movp(scratch2, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
   3562   __ Cmp(Operand(scratch2, StandardFrameConstants::kContextOffset),
   3563          Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   3564   __ j(not_equal, &no_arguments_adaptor, Label::kNear);
   3565 
   3566   // Drop current frame and load arguments count from arguments adaptor frame.
   3567   __ movp(rbp, scratch2);
   3568   __ SmiToInteger32(
   3569       caller_args_count_reg,
   3570       Operand(rbp, ArgumentsAdaptorFrameConstants::kLengthOffset));
   3571   __ jmp(&formal_parameter_count_loaded, Label::kNear);
   3572 
   3573   __ bind(&no_arguments_adaptor);
   3574   // Load caller's formal parameter count.
   3575   __ movp(caller_args_count_reg,
   3576           Immediate(info()->literal()->parameter_count()));
   3577 
   3578   __ bind(&formal_parameter_count_loaded);
   3579   __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3,
   3580                         ReturnAddressState::kNotOnStack);
   3581   Comment(";;; }");
   3582 }
   3583 
   3584 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   3585   HInvokeFunction* hinstr = instr->hydrogen();
   3586   DCHECK(ToRegister(instr->context()).is(rsi));
   3587   DCHECK(ToRegister(instr->function()).is(rdi));
   3588   DCHECK(instr->HasPointerMap());
   3589 
   3590   bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
   3591 
   3592   if (is_tail_call) {
   3593     DCHECK(!info()->saves_caller_doubles());
   3594     ParameterCount actual(instr->arity());
   3595     // It is safe to use rbx, rcx and r8 as scratch registers here given that
   3596     // 1) we are not going to return to caller function anyway,
   3597     // 2) rbx (expected number of arguments) will be initialized below.
   3598     PrepareForTailCall(actual, rbx, rcx, r8);
   3599   }
   3600 
   3601   Handle<JSFunction> known_function = hinstr->known_function();
   3602   if (known_function.is_null()) {
   3603     LPointerMap* pointers = instr->pointer_map();
   3604     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3605     ParameterCount actual(instr->arity());
   3606     InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
   3607     __ InvokeFunction(rdi, no_reg, actual, flag, generator);
   3608   } else {
   3609     CallKnownFunction(known_function, hinstr->formal_parameter_count(),
   3610                       instr->arity(), is_tail_call, instr);
   3611   }
   3612 }
   3613 
   3614 
   3615 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   3616   DCHECK(ToRegister(instr->context()).is(rsi));
   3617   DCHECK(ToRegister(instr->constructor()).is(rdi));
   3618   DCHECK(ToRegister(instr->result()).is(rax));
   3619 
   3620   __ Set(rax, instr->arity());
   3621   __ Move(rbx, instr->hydrogen()->site());
   3622 
   3623   ElementsKind kind = instr->hydrogen()->elements_kind();
   3624   AllocationSiteOverrideMode override_mode =
   3625       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
   3626           ? DISABLE_ALLOCATION_SITES
   3627           : DONT_OVERRIDE;
   3628 
   3629   if (instr->arity() == 0) {
   3630     ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
   3631     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3632   } else if (instr->arity() == 1) {
   3633     Label done;
   3634     if (IsFastPackedElementsKind(kind)) {
   3635       Label packed_case;
   3636       // We might need a change here
   3637       // look at the first argument
   3638       __ movp(rcx, Operand(rsp, 0));
   3639       __ testp(rcx, rcx);
   3640       __ j(zero, &packed_case, Label::kNear);
   3641 
   3642       ElementsKind holey_kind = GetHoleyElementsKind(kind);
   3643       ArraySingleArgumentConstructorStub stub(isolate(),
   3644                                               holey_kind,
   3645                                               override_mode);
   3646       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3647       __ jmp(&done, Label::kNear);
   3648       __ bind(&packed_case);
   3649     }
   3650 
   3651     ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
   3652     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3653     __ bind(&done);
   3654   } else {
   3655     ArrayNArgumentsConstructorStub stub(isolate());
   3656     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3657   }
   3658 }
   3659 
   3660 
   3661 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
   3662   DCHECK(ToRegister(instr->context()).is(rsi));
   3663   CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
   3664 }
   3665 
   3666 
   3667 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
   3668   Register function = ToRegister(instr->function());
   3669   Register code_object = ToRegister(instr->code_object());
   3670   __ leap(code_object, FieldOperand(code_object, Code::kHeaderSize));
   3671   __ movp(FieldOperand(function, JSFunction::kCodeEntryOffset), code_object);
   3672 }
   3673 
   3674 
   3675 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
   3676   Register result = ToRegister(instr->result());
   3677   Register base = ToRegister(instr->base_object());
   3678   if (instr->offset()->IsConstantOperand()) {
   3679     LConstantOperand* offset = LConstantOperand::cast(instr->offset());
   3680     __ leap(result, Operand(base, ToInteger32(offset)));
   3681   } else {
   3682     Register offset = ToRegister(instr->offset());
   3683     __ leap(result, Operand(base, offset, times_1, 0));
   3684   }
   3685 }
   3686 
   3687 
   3688 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
   3689   HStoreNamedField* hinstr = instr->hydrogen();
   3690   Representation representation = instr->representation();
   3691 
   3692   HObjectAccess access = hinstr->access();
   3693   int offset = access.offset();
   3694 
   3695   if (access.IsExternalMemory()) {
   3696     DCHECK(!hinstr->NeedsWriteBarrier());
   3697     Register value = ToRegister(instr->value());
   3698     if (instr->object()->IsConstantOperand()) {
   3699       DCHECK(value.is(rax));
   3700       LConstantOperand* object = LConstantOperand::cast(instr->object());
   3701       __ store_rax(ToExternalReference(object));
   3702     } else {
   3703       Register object = ToRegister(instr->object());
   3704       __ Store(MemOperand(object, offset), value, representation);
   3705     }
   3706     return;
   3707   }
   3708 
   3709   Register object = ToRegister(instr->object());
   3710   __ AssertNotSmi(object);
   3711 
   3712   DCHECK(!representation.IsSmi() ||
   3713          !instr->value()->IsConstantOperand() ||
   3714          IsInteger32Constant(LConstantOperand::cast(instr->value())));
   3715   if (!FLAG_unbox_double_fields && representation.IsDouble()) {
   3716     DCHECK(access.IsInobject());
   3717     DCHECK(!hinstr->has_transition());
   3718     DCHECK(!hinstr->NeedsWriteBarrier());
   3719     XMMRegister value = ToDoubleRegister(instr->value());
   3720     __ Movsd(FieldOperand(object, offset), value);
   3721     return;
   3722   }
   3723 
   3724   if (hinstr->has_transition()) {
   3725     Handle<Map> transition = hinstr->transition_map();
   3726     AddDeprecationDependency(transition);
   3727     if (!hinstr->NeedsWriteBarrierForMap()) {
   3728       __ Move(FieldOperand(object, HeapObject::kMapOffset), transition);
   3729     } else {
   3730       Register temp = ToRegister(instr->temp());
   3731       __ Move(kScratchRegister, transition);
   3732       __ movp(FieldOperand(object, HeapObject::kMapOffset), kScratchRegister);
   3733       // Update the write barrier for the map field.
   3734       __ RecordWriteForMap(object,
   3735                            kScratchRegister,
   3736                            temp,
   3737                            kSaveFPRegs);
   3738     }
   3739   }
   3740 
   3741   // Do the store.
   3742   Register write_register = object;
   3743   if (!access.IsInobject()) {
   3744     write_register = ToRegister(instr->temp());
   3745     __ movp(write_register, FieldOperand(object, JSObject::kPropertiesOffset));
   3746   }
   3747 
   3748   if (representation.IsSmi() && SmiValuesAre32Bits() &&
   3749       hinstr->value()->representation().IsInteger32()) {
   3750     DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
   3751     if (FLAG_debug_code) {
   3752       Register scratch = kScratchRegister;
   3753       __ Load(scratch, FieldOperand(write_register, offset), representation);
   3754       __ AssertSmi(scratch);
   3755     }
   3756     // Store int value directly to upper half of the smi.
   3757     STATIC_ASSERT(kSmiTag == 0);
   3758     DCHECK(kSmiTagSize + kSmiShiftSize == 32);
   3759     offset += kPointerSize / 2;
   3760     representation = Representation::Integer32();
   3761   }
   3762 
   3763   Operand operand = FieldOperand(write_register, offset);
   3764 
   3765   if (FLAG_unbox_double_fields && representation.IsDouble()) {
   3766     DCHECK(access.IsInobject());
   3767     XMMRegister value = ToDoubleRegister(instr->value());
   3768     __ Movsd(operand, value);
   3769 
   3770   } else if (instr->value()->IsRegister()) {
   3771     Register value = ToRegister(instr->value());
   3772     __ Store(operand, value, representation);
   3773   } else {
   3774     LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
   3775     if (IsInteger32Constant(operand_value)) {
   3776       DCHECK(!hinstr->NeedsWriteBarrier());
   3777       int32_t value = ToInteger32(operand_value);
   3778       if (representation.IsSmi()) {
   3779         __ Move(operand, Smi::FromInt(value));
   3780 
   3781       } else {
   3782         __ movl(operand, Immediate(value));
   3783       }
   3784 
   3785     } else if (IsExternalConstant(operand_value)) {
   3786       DCHECK(!hinstr->NeedsWriteBarrier());
   3787       ExternalReference ptr = ToExternalReference(operand_value);
   3788       __ Move(kScratchRegister, ptr);
   3789       __ movp(operand, kScratchRegister);
   3790     } else {
   3791       Handle<Object> handle_value = ToHandle(operand_value);
   3792       DCHECK(!hinstr->NeedsWriteBarrier());
   3793       __ Move(operand, handle_value);
   3794     }
   3795   }
   3796 
   3797   if (hinstr->NeedsWriteBarrier()) {
   3798     Register value = ToRegister(instr->value());
   3799     Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
   3800     // Update the write barrier for the object for in-object properties.
   3801     __ RecordWriteField(write_register,
   3802                         offset,
   3803                         value,
   3804                         temp,
   3805                         kSaveFPRegs,
   3806                         EMIT_REMEMBERED_SET,
   3807                         hinstr->SmiCheckForWriteBarrier(),
   3808                         hinstr->PointersToHereCheckForValue());
   3809   }
   3810 }
   3811 
   3812 
   3813 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   3814   Representation representation = instr->hydrogen()->length()->representation();
   3815   DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
   3816   DCHECK(representation.IsSmiOrInteger32());
   3817 
   3818   Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
   3819   if (instr->length()->IsConstantOperand()) {
   3820     int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
   3821     Register index = ToRegister(instr->index());
   3822     if (representation.IsSmi()) {
   3823       __ Cmp(index, Smi::FromInt(length));
   3824     } else {
   3825       __ cmpl(index, Immediate(length));
   3826     }
   3827     cc = CommuteCondition(cc);
   3828   } else if (instr->index()->IsConstantOperand()) {
   3829     int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
   3830     if (instr->length()->IsRegister()) {
   3831       Register length = ToRegister(instr->length());
   3832       if (representation.IsSmi()) {
   3833         __ Cmp(length, Smi::FromInt(index));
   3834       } else {
   3835         __ cmpl(length, Immediate(index));
   3836       }
   3837     } else {
   3838       Operand length = ToOperand(instr->length());
   3839       if (representation.IsSmi()) {
   3840         __ Cmp(length, Smi::FromInt(index));
   3841       } else {
   3842         __ cmpl(length, Immediate(index));
   3843       }
   3844     }
   3845   } else {
   3846     Register index = ToRegister(instr->index());
   3847     if (instr->length()->IsRegister()) {
   3848       Register length = ToRegister(instr->length());
   3849       if (representation.IsSmi()) {
   3850         __ cmpp(length, index);
   3851       } else {
   3852         __ cmpl(length, index);
   3853       }
   3854     } else {
   3855       Operand length = ToOperand(instr->length());
   3856       if (representation.IsSmi()) {
   3857         __ cmpp(length, index);
   3858       } else {
   3859         __ cmpl(length, index);
   3860       }
   3861     }
   3862   }
   3863   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
   3864     Label done;
   3865     __ j(NegateCondition(cc), &done, Label::kNear);
   3866     __ int3();
   3867     __ bind(&done);
   3868   } else {
   3869     DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
   3870   }
   3871 }
   3872 
   3873 
   3874 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
   3875   ElementsKind elements_kind = instr->elements_kind();
   3876   LOperand* key = instr->key();
   3877   if (kPointerSize == kInt32Size && !key->IsConstantOperand()) {
   3878     Register key_reg = ToRegister(key);
   3879     Representation key_representation =
   3880         instr->hydrogen()->key()->representation();
   3881     if (ExternalArrayOpRequiresTemp(key_representation, elements_kind)) {
   3882       __ SmiToInteger64(key_reg, key_reg);
   3883     } else if (instr->hydrogen()->IsDehoisted()) {
   3884       // Sign extend key because it could be a 32 bit negative value
   3885       // and the dehoisted address computation happens in 64 bits
   3886       __ movsxlq(key_reg, key_reg);
   3887     }
   3888   }
   3889   Operand operand(BuildFastArrayOperand(
   3890       instr->elements(),
   3891       key,
   3892       instr->hydrogen()->key()->representation(),
   3893       elements_kind,
   3894       instr->base_offset()));
   3895 
   3896   if (elements_kind == FLOAT32_ELEMENTS) {
   3897     XMMRegister value(ToDoubleRegister(instr->value()));
   3898     __ Cvtsd2ss(value, value);
   3899     __ Movss(operand, value);
   3900   } else if (elements_kind == FLOAT64_ELEMENTS) {
   3901     __ Movsd(operand, ToDoubleRegister(instr->value()));
   3902   } else {
   3903     Register value(ToRegister(instr->value()));
   3904     switch (elements_kind) {
   3905       case INT8_ELEMENTS:
   3906       case UINT8_ELEMENTS:
   3907       case UINT8_CLAMPED_ELEMENTS:
   3908         __ movb(operand, value);
   3909         break;
   3910       case INT16_ELEMENTS:
   3911       case UINT16_ELEMENTS:
   3912         __ movw(operand, value);
   3913         break;
   3914       case INT32_ELEMENTS:
   3915       case UINT32_ELEMENTS:
   3916         __ movl(operand, value);
   3917         break;
   3918       case FLOAT32_ELEMENTS:
   3919       case FLOAT64_ELEMENTS:
   3920       case FAST_ELEMENTS:
   3921       case FAST_SMI_ELEMENTS:
   3922       case FAST_DOUBLE_ELEMENTS:
   3923       case FAST_HOLEY_ELEMENTS:
   3924       case FAST_HOLEY_SMI_ELEMENTS:
   3925       case FAST_HOLEY_DOUBLE_ELEMENTS:
   3926       case DICTIONARY_ELEMENTS:
   3927       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
   3928       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
   3929       case FAST_STRING_WRAPPER_ELEMENTS:
   3930       case SLOW_STRING_WRAPPER_ELEMENTS:
   3931       case NO_ELEMENTS:
   3932         UNREACHABLE();
   3933         break;
   3934     }
   3935   }
   3936 }
   3937 
   3938 
   3939 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
   3940   XMMRegister value = ToDoubleRegister(instr->value());
   3941   LOperand* key = instr->key();
   3942   if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
   3943       instr->hydrogen()->IsDehoisted()) {
   3944     // Sign extend key because it could be a 32 bit negative value
   3945     // and the dehoisted address computation happens in 64 bits
   3946     __ movsxlq(ToRegister(key), ToRegister(key));
   3947   }
   3948   if (instr->NeedsCanonicalization()) {
   3949     XMMRegister xmm_scratch = double_scratch0();
   3950     // Turn potential sNaN value into qNaN.
   3951     __ Xorpd(xmm_scratch, xmm_scratch);
   3952     __ Subsd(value, xmm_scratch);
   3953   }
   3954 
   3955   Operand double_store_operand = BuildFastArrayOperand(
   3956       instr->elements(),
   3957       key,
   3958       instr->hydrogen()->key()->representation(),
   3959       FAST_DOUBLE_ELEMENTS,
   3960       instr->base_offset());
   3961 
   3962   __ Movsd(double_store_operand, value);
   3963 }
   3964 
   3965 
   3966 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
   3967   HStoreKeyed* hinstr = instr->hydrogen();
   3968   LOperand* key = instr->key();
   3969   int offset = instr->base_offset();
   3970   Representation representation = hinstr->value()->representation();
   3971 
   3972   if (kPointerSize == kInt32Size && !key->IsConstantOperand() &&
   3973       instr->hydrogen()->IsDehoisted()) {
   3974     // Sign extend key because it could be a 32 bit negative value
   3975     // and the dehoisted address computation happens in 64 bits
   3976     __ movsxlq(ToRegister(key), ToRegister(key));
   3977   }
   3978   if (representation.IsInteger32() && SmiValuesAre32Bits()) {
   3979     DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
   3980     DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
   3981     if (FLAG_debug_code) {
   3982       Register scratch = kScratchRegister;
   3983       __ Load(scratch,
   3984               BuildFastArrayOperand(instr->elements(),
   3985                                     key,
   3986                                     instr->hydrogen()->key()->representation(),
   3987                                     FAST_ELEMENTS,
   3988                                     offset),
   3989               Representation::Smi());
   3990       __ AssertSmi(scratch);
   3991     }
   3992     // Store int value directly to upper half of the smi.
   3993     STATIC_ASSERT(kSmiTag == 0);
   3994     DCHECK(kSmiTagSize + kSmiShiftSize == 32);
   3995     offset += kPointerSize / 2;
   3996   }
   3997 
   3998   Operand operand =
   3999       BuildFastArrayOperand(instr->elements(),
   4000                             key,
   4001                             instr->hydrogen()->key()->representation(),
   4002                             FAST_ELEMENTS,
   4003                             offset);
   4004   if (instr->value()->IsRegister()) {
   4005     __ Store(operand, ToRegister(instr->value()), representation);
   4006   } else {
   4007     LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
   4008     if (IsInteger32Constant(operand_value)) {
   4009       int32_t value = ToInteger32(operand_value);
   4010       if (representation.IsSmi()) {
   4011         __ Move(operand, Smi::FromInt(value));
   4012 
   4013       } else {
   4014         __ movl(operand, Immediate(value));
   4015       }
   4016     } else {
   4017       Handle<Object> handle_value = ToHandle(operand_value);
   4018       __ Move(operand, handle_value);
   4019     }
   4020   }
   4021 
   4022   if (hinstr->NeedsWriteBarrier()) {
   4023     Register elements = ToRegister(instr->elements());
   4024     DCHECK(instr->value()->IsRegister());
   4025     Register value = ToRegister(instr->value());
   4026     DCHECK(!key->IsConstantOperand());
   4027     SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
   4028             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   4029     // Compute address of modified element and store it into key register.
   4030     Register key_reg(ToRegister(key));
   4031     __ leap(key_reg, operand);
   4032     __ RecordWrite(elements,
   4033                    key_reg,
   4034                    value,
   4035                    kSaveFPRegs,
   4036                    EMIT_REMEMBERED_SET,
   4037                    check_needed,
   4038                    hinstr->PointersToHereCheckForValue());
   4039   }
   4040 }
   4041 
   4042 
   4043 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
   4044   if (instr->is_fixed_typed_array()) {
   4045     DoStoreKeyedExternalArray(instr);
   4046   } else if (instr->hydrogen()->value()->representation().IsDouble()) {
   4047     DoStoreKeyedFixedDoubleArray(instr);
   4048   } else {
   4049     DoStoreKeyedFixedArray(instr);
   4050   }
   4051 }
   4052 
   4053 
   4054 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
   4055   class DeferredMaybeGrowElements final : public LDeferredCode {
   4056    public:
   4057     DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
   4058         : LDeferredCode(codegen), instr_(instr) {}
   4059     void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
   4060     LInstruction* instr() override { return instr_; }
   4061 
   4062    private:
   4063     LMaybeGrowElements* instr_;
   4064   };
   4065 
   4066   Register result = rax;
   4067   DeferredMaybeGrowElements* deferred =
   4068       new (zone()) DeferredMaybeGrowElements(this, instr);
   4069   LOperand* key = instr->key();
   4070   LOperand* current_capacity = instr->current_capacity();
   4071 
   4072   DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
   4073   DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
   4074   DCHECK(key->IsConstantOperand() || key->IsRegister());
   4075   DCHECK(current_capacity->IsConstantOperand() ||
   4076          current_capacity->IsRegister());
   4077 
   4078   if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
   4079     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
   4080     int32_t constant_capacity =
   4081         ToInteger32(LConstantOperand::cast(current_capacity));
   4082     if (constant_key >= constant_capacity) {
   4083       // Deferred case.
   4084       __ jmp(deferred->entry());
   4085     }
   4086   } else if (key->IsConstantOperand()) {
   4087     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
   4088     __ cmpl(ToRegister(current_capacity), Immediate(constant_key));
   4089     __ j(less_equal, deferred->entry());
   4090   } else if (current_capacity->IsConstantOperand()) {
   4091     int32_t constant_capacity =
   4092         ToInteger32(LConstantOperand::cast(current_capacity));
   4093     __ cmpl(ToRegister(key), Immediate(constant_capacity));
   4094     __ j(greater_equal, deferred->entry());
   4095   } else {
   4096     __ cmpl(ToRegister(key), ToRegister(current_capacity));
   4097     __ j(greater_equal, deferred->entry());
   4098   }
   4099 
   4100   if (instr->elements()->IsRegister()) {
   4101     __ movp(result, ToRegister(instr->elements()));
   4102   } else {
   4103     __ movp(result, ToOperand(instr->elements()));
   4104   }
   4105 
   4106   __ bind(deferred->exit());
   4107 }
   4108 
   4109 
   4110 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
   4111   // TODO(3095996): Get rid of this. For now, we need to make the
   4112   // result register contain a valid pointer because it is already
   4113   // contained in the register pointer map.
   4114   Register result = rax;
   4115   __ Move(result, Smi::kZero);
   4116 
   4117   // We have to call a stub.
   4118   {
   4119     PushSafepointRegistersScope scope(this);
   4120     if (instr->object()->IsConstantOperand()) {
   4121       LConstantOperand* constant_object =
   4122           LConstantOperand::cast(instr->object());
   4123       if (IsSmiConstant(constant_object)) {
   4124         Smi* immediate = ToSmi(constant_object);
   4125         __ Move(result, immediate);
   4126       } else {
   4127         Handle<Object> handle_value = ToHandle(constant_object);
   4128         __ Move(result, handle_value);
   4129       }
   4130     } else if (instr->object()->IsRegister()) {
   4131       __ Move(result, ToRegister(instr->object()));
   4132     } else {
   4133       __ movp(result, ToOperand(instr->object()));
   4134     }
   4135 
   4136     LOperand* key = instr->key();
   4137     if (key->IsConstantOperand()) {
   4138       __ Move(rbx, ToSmi(LConstantOperand::cast(key)));
   4139     } else {
   4140       __ Move(rbx, ToRegister(key));
   4141       __ Integer32ToSmi(rbx, rbx);
   4142     }
   4143 
   4144     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
   4145     __ CallStub(&stub);
   4146     RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
   4147     __ StoreToSafepointRegisterSlot(result, result);
   4148   }
   4149 
   4150   // Deopt on smi, which means the elements array changed to dictionary mode.
   4151   Condition is_smi = __ CheckSmi(result);
   4152   DeoptimizeIf(is_smi, instr, DeoptimizeReason::kSmi);
   4153 }
   4154 
   4155 
   4156 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
   4157   Register object_reg = ToRegister(instr->object());
   4158 
   4159   Handle<Map> from_map = instr->original_map();
   4160   Handle<Map> to_map = instr->transitioned_map();
   4161   ElementsKind from_kind = instr->from_kind();
   4162   ElementsKind to_kind = instr->to_kind();
   4163 
   4164   Label not_applicable;
   4165   __ Cmp(FieldOperand(object_reg, HeapObject::kMapOffset), from_map);
   4166   __ j(not_equal, &not_applicable);
   4167   if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
   4168     Register new_map_reg = ToRegister(instr->new_map_temp());
   4169     __ Move(new_map_reg, to_map, RelocInfo::EMBEDDED_OBJECT);
   4170     __ movp(FieldOperand(object_reg, HeapObject::kMapOffset), new_map_reg);
   4171     // Write barrier.
   4172     __ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()),
   4173                          kDontSaveFPRegs);
   4174   } else {
   4175     DCHECK(object_reg.is(rax));
   4176     DCHECK(ToRegister(instr->context()).is(rsi));
   4177     PushSafepointRegistersScope scope(this);
   4178     __ Move(rbx, to_map);
   4179     TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
   4180     __ CallStub(&stub);
   4181     RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
   4182   }
   4183   __ bind(&not_applicable);
   4184 }
   4185 
   4186 
   4187 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
   4188   Register object = ToRegister(instr->object());
   4189   Register temp = ToRegister(instr->temp());
   4190   Label no_memento_found;
   4191   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
   4192   DeoptimizeIf(equal, instr, DeoptimizeReason::kMementoFound);
   4193   __ bind(&no_memento_found);
   4194 }
   4195 
   4196 
   4197 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   4198   DCHECK(ToRegister(instr->context()).is(rsi));
   4199   DCHECK(ToRegister(instr->left()).is(rdx));
   4200   DCHECK(ToRegister(instr->right()).is(rax));
   4201   StringAddStub stub(isolate(),
   4202                      instr->hydrogen()->flags(),
   4203                      instr->hydrogen()->pretenure_flag());
   4204   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   4205 }
   4206 
   4207 
   4208 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   4209   class DeferredStringCharCodeAt final : public LDeferredCode {
   4210    public:
   4211     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
   4212         : LDeferredCode(codegen), instr_(instr) { }
   4213     void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
   4214     LInstruction* instr() override { return instr_; }
   4215 
   4216    private:
   4217     LStringCharCodeAt* instr_;
   4218   };
   4219 
   4220   DeferredStringCharCodeAt* deferred =
   4221       new(zone()) DeferredStringCharCodeAt(this, instr);
   4222 
   4223   StringCharLoadGenerator::Generate(masm(),
   4224                                     ToRegister(instr->string()),
   4225                                     ToRegister(instr->index()),
   4226                                     ToRegister(instr->result()),
   4227                                     deferred->entry());
   4228   __ bind(deferred->exit());
   4229 }
   4230 
   4231 
   4232 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
   4233   Register string = ToRegister(instr->string());
   4234   Register result = ToRegister(instr->result());
   4235 
   4236   // TODO(3095996): Get rid of this. For now, we need to make the
   4237   // result register contain a valid pointer because it is already
   4238   // contained in the register pointer map.
   4239   __ Set(result, 0);
   4240 
   4241   PushSafepointRegistersScope scope(this);
   4242   __ Push(string);
   4243   // Push the index as a smi. This is safe because of the checks in
   4244   // DoStringCharCodeAt above.
   4245   STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue);
   4246   if (instr->index()->IsConstantOperand()) {
   4247     int32_t const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   4248     __ Push(Smi::FromInt(const_index));
   4249   } else {
   4250     Register index = ToRegister(instr->index());
   4251     __ Integer32ToSmi(index, index);
   4252     __ Push(index);
   4253   }
   4254   CallRuntimeFromDeferred(
   4255       Runtime::kStringCharCodeAtRT, 2, instr, instr->context());
   4256   __ AssertSmi(rax);
   4257   __ SmiToInteger32(rax, rax);
   4258   __ StoreToSafepointRegisterSlot(result, rax);
   4259 }
   4260 
   4261 
   4262 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
   4263   class DeferredStringCharFromCode final : public LDeferredCode {
   4264    public:
   4265     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
   4266         : LDeferredCode(codegen), instr_(instr) { }
   4267     void Generate() override {
   4268       codegen()->DoDeferredStringCharFromCode(instr_);
   4269     }
   4270     LInstruction* instr() override { return instr_; }
   4271 
   4272    private:
   4273     LStringCharFromCode* instr_;
   4274   };
   4275 
   4276   DeferredStringCharFromCode* deferred =
   4277       new(zone()) DeferredStringCharFromCode(this, instr);
   4278 
   4279   DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
   4280   Register char_code = ToRegister(instr->char_code());
   4281   Register result = ToRegister(instr->result());
   4282   DCHECK(!char_code.is(result));
   4283 
   4284   __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
   4285   __ j(above, deferred->entry());
   4286   __ movsxlq(char_code, char_code);
   4287   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
   4288   __ movp(result, FieldOperand(result,
   4289                                char_code, times_pointer_size,
   4290                                FixedArray::kHeaderSize));
   4291   __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
   4292   __ j(equal, deferred->entry());
   4293   __ bind(deferred->exit());
   4294 }
   4295 
   4296 
   4297 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
   4298   Register char_code = ToRegister(instr->char_code());
   4299   Register result = ToRegister(instr->result());
   4300 
   4301   // TODO(3095996): Get rid of this. For now, we need to make the
   4302   // result register contain a valid pointer because it is already
   4303   // contained in the register pointer map.
   4304   __ Set(result, 0);
   4305 
   4306   PushSafepointRegistersScope scope(this);
   4307   __ Integer32ToSmi(char_code, char_code);
   4308   __ Push(char_code);
   4309   CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
   4310                           instr->context());
   4311   __ StoreToSafepointRegisterSlot(result, rax);
   4312 }
   4313 
   4314 
   4315 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   4316   LOperand* input = instr->value();
   4317   DCHECK(input->IsRegister() || input->IsStackSlot());
   4318   LOperand* output = instr->result();
   4319   DCHECK(output->IsDoubleRegister());
   4320   if (input->IsRegister()) {
   4321     __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
   4322   } else {
   4323     __ Cvtlsi2sd(ToDoubleRegister(output), ToOperand(input));
   4324   }
   4325 }
   4326 
   4327 
   4328 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
   4329   LOperand* input = instr->value();
   4330   LOperand* output = instr->result();
   4331 
   4332   __ LoadUint32(ToDoubleRegister(output), ToRegister(input));
   4333 }
   4334 
   4335 
   4336 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
   4337   class DeferredNumberTagI final : public LDeferredCode {
   4338    public:
   4339     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
   4340         : LDeferredCode(codegen), instr_(instr) { }
   4341     void Generate() override {
   4342       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
   4343                                        instr_->temp2(), SIGNED_INT32);
   4344     }
   4345     LInstruction* instr() override { return instr_; }
   4346 
   4347    private:
   4348     LNumberTagI* instr_;
   4349   };
   4350 
   4351   LOperand* input = instr->value();
   4352   DCHECK(input->IsRegister() && input->Equals(instr->result()));
   4353   Register reg = ToRegister(input);
   4354 
   4355   if (SmiValuesAre32Bits()) {
   4356     __ Integer32ToSmi(reg, reg);
   4357   } else {
   4358     DCHECK(SmiValuesAre31Bits());
   4359     DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
   4360     __ Integer32ToSmi(reg, reg);
   4361     __ j(overflow, deferred->entry());
   4362     __ bind(deferred->exit());
   4363   }
   4364 }
   4365 
   4366 
   4367 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
   4368   class DeferredNumberTagU final : public LDeferredCode {
   4369    public:
   4370     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
   4371         : LDeferredCode(codegen), instr_(instr) { }
   4372     void Generate() override {
   4373       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
   4374                                        instr_->temp2(), UNSIGNED_INT32);
   4375     }
   4376     LInstruction* instr() override { return instr_; }
   4377 
   4378    private:
   4379     LNumberTagU* instr_;
   4380   };
   4381 
   4382   LOperand* input = instr->value();
   4383   DCHECK(input->IsRegister() && input->Equals(instr->result()));
   4384   Register reg = ToRegister(input);
   4385 
   4386   DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
   4387   __ cmpl(reg, Immediate(Smi::kMaxValue));
   4388   __ j(above, deferred->entry());
   4389   __ Integer32ToSmi(reg, reg);
   4390   __ bind(deferred->exit());
   4391 }
   4392 
   4393 
   4394 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
   4395                                      LOperand* value,
   4396                                      LOperand* temp1,
   4397                                      LOperand* temp2,
   4398                                      IntegerSignedness signedness) {
   4399   Label done, slow;
   4400   Register reg = ToRegister(value);
   4401   Register tmp = ToRegister(temp1);
   4402   XMMRegister temp_xmm = ToDoubleRegister(temp2);
   4403 
   4404   // Load value into temp_xmm which will be preserved across potential call to
   4405   // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
   4406   // XMM registers on x64).
   4407   if (signedness == SIGNED_INT32) {
   4408     DCHECK(SmiValuesAre31Bits());
   4409     // There was overflow, so bits 30 and 31 of the original integer
   4410     // disagree. Try to allocate a heap number in new space and store
   4411     // the value in there. If that fails, call the runtime system.
   4412     __ SmiToInteger32(reg, reg);
   4413     __ xorl(reg, Immediate(0x80000000));
   4414     __ Cvtlsi2sd(temp_xmm, reg);
   4415   } else {
   4416     DCHECK(signedness == UNSIGNED_INT32);
   4417     __ LoadUint32(temp_xmm, reg);
   4418   }
   4419 
   4420   if (FLAG_inline_new) {
   4421     __ AllocateHeapNumber(reg, tmp, &slow);
   4422     __ jmp(&done, kPointerSize == kInt64Size ? Label::kNear : Label::kFar);
   4423   }
   4424 
   4425   // Slow case: Call the runtime system to do the number allocation.
   4426   __ bind(&slow);
   4427   {
   4428     // Put a valid pointer value in the stack slot where the result
   4429     // register is stored, as this register is in the pointer map, but contains
   4430     // an integer value.
   4431     __ Set(reg, 0);
   4432 
   4433     // Preserve the value of all registers.
   4434     PushSafepointRegistersScope scope(this);
   4435     // Reset the context register.
   4436     if (!reg.is(rsi)) {
   4437       __ Set(rsi, 0);
   4438     }
   4439     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4440     RecordSafepointWithRegisters(
   4441         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4442     __ StoreToSafepointRegisterSlot(reg, rax);
   4443   }
   4444 
   4445   // Done. Put the value in temp_xmm into the value of the allocated heap
   4446   // number.
   4447   __ bind(&done);
   4448   __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), temp_xmm);
   4449 }
   4450 
   4451 
   4452 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
   4453   class DeferredNumberTagD final : public LDeferredCode {
   4454    public:
   4455     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
   4456         : LDeferredCode(codegen), instr_(instr) { }
   4457     void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
   4458     LInstruction* instr() override { return instr_; }
   4459 
   4460    private:
   4461     LNumberTagD* instr_;
   4462   };
   4463 
   4464   XMMRegister input_reg = ToDoubleRegister(instr->value());
   4465   Register reg = ToRegister(instr->result());
   4466   Register tmp = ToRegister(instr->temp());
   4467 
   4468   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
   4469   if (FLAG_inline_new) {
   4470     __ AllocateHeapNumber(reg, tmp, deferred->entry());
   4471   } else {
   4472     __ jmp(deferred->entry());
   4473   }
   4474   __ bind(deferred->exit());
   4475   __ Movsd(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
   4476 }
   4477 
   4478 
   4479 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
   4480   // TODO(3095996): Get rid of this. For now, we need to make the
   4481   // result register contain a valid pointer because it is already
   4482   // contained in the register pointer map.
   4483   Register reg = ToRegister(instr->result());
   4484   __ Move(reg, Smi::kZero);
   4485 
   4486   {
   4487     PushSafepointRegistersScope scope(this);
   4488     // Reset the context register.
   4489     if (!reg.is(rsi)) {
   4490       __ Move(rsi, 0);
   4491     }
   4492     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4493     RecordSafepointWithRegisters(
   4494         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4495     __ movp(kScratchRegister, rax);
   4496   }
   4497   __ movp(reg, kScratchRegister);
   4498 }
   4499 
   4500 
   4501 void LCodeGen::DoSmiTag(LSmiTag* instr) {
   4502   HChange* hchange = instr->hydrogen();
   4503   Register input = ToRegister(instr->value());
   4504   Register output = ToRegister(instr->result());
   4505   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4506       hchange->value()->CheckFlag(HValue::kUint32)) {
   4507     Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
   4508     DeoptimizeIf(NegateCondition(is_smi), instr, DeoptimizeReason::kOverflow);
   4509   }
   4510   __ Integer32ToSmi(output, input);
   4511   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4512       !hchange->value()->CheckFlag(HValue::kUint32)) {
   4513     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   4514   }
   4515 }
   4516 
   4517 
   4518 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
   4519   DCHECK(instr->value()->Equals(instr->result()));
   4520   Register input = ToRegister(instr->value());
   4521   if (instr->needs_check()) {
   4522     Condition is_smi = __ CheckSmi(input);
   4523     DeoptimizeIf(NegateCondition(is_smi), instr, DeoptimizeReason::kNotASmi);
   4524   } else {
   4525     __ AssertSmi(input);
   4526   }
   4527   __ SmiToInteger32(input, input);
   4528 }
   4529 
   4530 
   4531 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
   4532                                 XMMRegister result_reg, NumberUntagDMode mode) {
   4533   bool can_convert_undefined_to_nan = instr->truncating();
   4534   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
   4535 
   4536   Label convert, load_smi, done;
   4537 
   4538   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
   4539     // Smi check.
   4540     __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
   4541 
   4542     // Heap number map check.
   4543     __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
   4544                    Heap::kHeapNumberMapRootIndex);
   4545 
   4546     // On x64 it is safe to load at heap number offset before evaluating the map
   4547     // check, since all heap objects are at least two words long.
   4548     __ Movsd(result_reg, FieldOperand(input_reg, HeapNumber::kValueOffset));
   4549 
   4550     if (can_convert_undefined_to_nan) {
   4551       __ j(not_equal, &convert, Label::kNear);
   4552     } else {
   4553       DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
   4554     }
   4555 
   4556     if (deoptimize_on_minus_zero) {
   4557       XMMRegister xmm_scratch = double_scratch0();
   4558       __ Xorpd(xmm_scratch, xmm_scratch);
   4559       __ Ucomisd(xmm_scratch, result_reg);
   4560       __ j(not_equal, &done, Label::kNear);
   4561       __ Movmskpd(kScratchRegister, result_reg);
   4562       __ testl(kScratchRegister, Immediate(1));
   4563       DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
   4564     }
   4565     __ jmp(&done, Label::kNear);
   4566 
   4567     if (can_convert_undefined_to_nan) {
   4568       __ bind(&convert);
   4569 
   4570       // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
   4571       __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
   4572       DeoptimizeIf(not_equal, instr,
   4573                    DeoptimizeReason::kNotAHeapNumberUndefined);
   4574 
   4575       __ Xorpd(result_reg, result_reg);
   4576       __ Divsd(result_reg, result_reg);
   4577       __ jmp(&done, Label::kNear);
   4578     }
   4579   } else {
   4580     DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
   4581   }
   4582 
   4583   // Smi to XMM conversion
   4584   __ bind(&load_smi);
   4585   __ SmiToInteger32(kScratchRegister, input_reg);
   4586   __ Cvtlsi2sd(result_reg, kScratchRegister);
   4587   __ bind(&done);
   4588 }
   4589 
   4590 
   4591 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr, Label* done) {
   4592   Register input_reg = ToRegister(instr->value());
   4593 
   4594   if (instr->truncating()) {
   4595     Register input_map_reg = kScratchRegister;
   4596     Label truncate;
   4597     Label::Distance truncate_distance =
   4598         DeoptEveryNTimes() ? Label::kFar : Label::kNear;
   4599     __ movp(input_map_reg, FieldOperand(input_reg, HeapObject::kMapOffset));
   4600     __ JumpIfRoot(input_map_reg, Heap::kHeapNumberMapRootIndex, &truncate,
   4601                   truncate_distance);
   4602     __ CmpInstanceType(input_map_reg, ODDBALL_TYPE);
   4603     DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotANumberOrOddball);
   4604     __ bind(&truncate);
   4605     __ TruncateHeapNumberToI(input_reg, input_reg);
   4606   } else {
   4607     XMMRegister scratch = ToDoubleRegister(instr->temp());
   4608     DCHECK(!scratch.is(double_scratch0()));
   4609     __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
   4610                    Heap::kHeapNumberMapRootIndex);
   4611     DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumber);
   4612     __ Movsd(double_scratch0(),
   4613              FieldOperand(input_reg, HeapNumber::kValueOffset));
   4614     __ Cvttsd2si(input_reg, double_scratch0());
   4615     __ Cvtlsi2sd(scratch, input_reg);
   4616     __ Ucomisd(double_scratch0(), scratch);
   4617     DeoptimizeIf(not_equal, instr, DeoptimizeReason::kLostPrecision);
   4618     DeoptimizeIf(parity_even, instr, DeoptimizeReason::kNaN);
   4619     if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
   4620       __ testl(input_reg, input_reg);
   4621       __ j(not_zero, done);
   4622       __ Movmskpd(input_reg, double_scratch0());
   4623       __ andl(input_reg, Immediate(1));
   4624       DeoptimizeIf(not_zero, instr, DeoptimizeReason::kMinusZero);
   4625     }
   4626   }
   4627 }
   4628 
   4629 
   4630 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
   4631   class DeferredTaggedToI final : public LDeferredCode {
   4632    public:
   4633     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
   4634         : LDeferredCode(codegen), instr_(instr) { }
   4635     void Generate() override { codegen()->DoDeferredTaggedToI(instr_, done()); }
   4636     LInstruction* instr() override { return instr_; }
   4637 
   4638    private:
   4639     LTaggedToI* instr_;
   4640   };
   4641 
   4642   LOperand* input = instr->value();
   4643   DCHECK(input->IsRegister());
   4644   DCHECK(input->Equals(instr->result()));
   4645   Register input_reg = ToRegister(input);
   4646 
   4647   if (instr->hydrogen()->value()->representation().IsSmi()) {
   4648     __ SmiToInteger32(input_reg, input_reg);
   4649   } else {
   4650     DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
   4651     __ JumpIfNotSmi(input_reg, deferred->entry());
   4652     __ SmiToInteger32(input_reg, input_reg);
   4653     __ bind(deferred->exit());
   4654   }
   4655 }
   4656 
   4657 
   4658 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   4659   LOperand* input = instr->value();
   4660   DCHECK(input->IsRegister());
   4661   LOperand* result = instr->result();
   4662   DCHECK(result->IsDoubleRegister());
   4663 
   4664   Register input_reg = ToRegister(input);
   4665   XMMRegister result_reg = ToDoubleRegister(result);
   4666 
   4667   HValue* value = instr->hydrogen()->value();
   4668   NumberUntagDMode mode = value->representation().IsSmi()
   4669       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
   4670 
   4671   EmitNumberUntagD(instr, input_reg, result_reg, mode);
   4672 }
   4673 
   4674 
   4675 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
   4676   LOperand* input = instr->value();
   4677   DCHECK(input->IsDoubleRegister());
   4678   LOperand* result = instr->result();
   4679   DCHECK(result->IsRegister());
   4680 
   4681   XMMRegister input_reg = ToDoubleRegister(input);
   4682   Register result_reg = ToRegister(result);
   4683 
   4684   if (instr->truncating()) {
   4685     __ TruncateDoubleToI(result_reg, input_reg);
   4686   } else {
   4687     Label lost_precision, is_nan, minus_zero, done;
   4688     XMMRegister xmm_scratch = double_scratch0();
   4689     Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
   4690     __ DoubleToI(result_reg, input_reg, xmm_scratch,
   4691                  instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
   4692                  &is_nan, &minus_zero, dist);
   4693     __ jmp(&done, dist);
   4694     __ bind(&lost_precision);
   4695     DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
   4696     __ bind(&is_nan);
   4697     DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
   4698     __ bind(&minus_zero);
   4699     DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
   4700     __ bind(&done);
   4701   }
   4702 }
   4703 
   4704 
   4705 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
   4706   LOperand* input = instr->value();
   4707   DCHECK(input->IsDoubleRegister());
   4708   LOperand* result = instr->result();
   4709   DCHECK(result->IsRegister());
   4710 
   4711   XMMRegister input_reg = ToDoubleRegister(input);
   4712   Register result_reg = ToRegister(result);
   4713 
   4714   Label lost_precision, is_nan, minus_zero, done;
   4715   XMMRegister xmm_scratch = double_scratch0();
   4716   Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
   4717   __ DoubleToI(result_reg, input_reg, xmm_scratch,
   4718                instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
   4719                &minus_zero, dist);
   4720   __ jmp(&done, dist);
   4721   __ bind(&lost_precision);
   4722   DeoptimizeIf(no_condition, instr, DeoptimizeReason::kLostPrecision);
   4723   __ bind(&is_nan);
   4724   DeoptimizeIf(no_condition, instr, DeoptimizeReason::kNaN);
   4725   __ bind(&minus_zero);
   4726   DeoptimizeIf(no_condition, instr, DeoptimizeReason::kMinusZero);
   4727   __ bind(&done);
   4728   __ Integer32ToSmi(result_reg, result_reg);
   4729   DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   4730 }
   4731 
   4732 
   4733 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   4734   LOperand* input = instr->value();
   4735   Condition cc = masm()->CheckSmi(ToRegister(input));
   4736   DeoptimizeIf(NegateCondition(cc), instr, DeoptimizeReason::kNotASmi);
   4737 }
   4738 
   4739 
   4740 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
   4741   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   4742     LOperand* input = instr->value();
   4743     Condition cc = masm()->CheckSmi(ToRegister(input));
   4744     DeoptimizeIf(cc, instr, DeoptimizeReason::kSmi);
   4745   }
   4746 }
   4747 
   4748 
   4749 void LCodeGen::DoCheckArrayBufferNotNeutered(
   4750     LCheckArrayBufferNotNeutered* instr) {
   4751   Register view = ToRegister(instr->view());
   4752 
   4753   __ movp(kScratchRegister,
   4754           FieldOperand(view, JSArrayBufferView::kBufferOffset));
   4755   __ testb(FieldOperand(kScratchRegister, JSArrayBuffer::kBitFieldOffset),
   4756            Immediate(1 << JSArrayBuffer::WasNeutered::kShift));
   4757   DeoptimizeIf(not_zero, instr, DeoptimizeReason::kOutOfBounds);
   4758 }
   4759 
   4760 
   4761 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   4762   Register input = ToRegister(instr->value());
   4763 
   4764   __ movp(kScratchRegister, FieldOperand(input, HeapObject::kMapOffset));
   4765 
   4766   if (instr->hydrogen()->is_interval_check()) {
   4767     InstanceType first;
   4768     InstanceType last;
   4769     instr->hydrogen()->GetCheckInterval(&first, &last);
   4770 
   4771     __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
   4772             Immediate(static_cast<int8_t>(first)));
   4773 
   4774     // If there is only one type in the interval check for equality.
   4775     if (first == last) {
   4776       DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
   4777     } else {
   4778       DeoptimizeIf(below, instr, DeoptimizeReason::kWrongInstanceType);
   4779       // Omit check for the last type.
   4780       if (last != LAST_TYPE) {
   4781         __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
   4782                 Immediate(static_cast<int8_t>(last)));
   4783         DeoptimizeIf(above, instr, DeoptimizeReason::kWrongInstanceType);
   4784       }
   4785     }
   4786   } else {
   4787     uint8_t mask;
   4788     uint8_t tag;
   4789     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
   4790 
   4791     if (base::bits::IsPowerOfTwo32(mask)) {
   4792       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
   4793       __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
   4794                Immediate(mask));
   4795       DeoptimizeIf(tag == 0 ? not_zero : zero, instr,
   4796                    DeoptimizeReason::kWrongInstanceType);
   4797     } else {
   4798       __ movzxbl(kScratchRegister,
   4799                  FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
   4800       __ andb(kScratchRegister, Immediate(mask));
   4801       __ cmpb(kScratchRegister, Immediate(tag));
   4802       DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongInstanceType);
   4803     }
   4804   }
   4805 }
   4806 
   4807 
   4808 void LCodeGen::DoCheckValue(LCheckValue* instr) {
   4809   Register reg = ToRegister(instr->value());
   4810   __ Cmp(reg, instr->hydrogen()->object().handle());
   4811   DeoptimizeIf(not_equal, instr, DeoptimizeReason::kValueMismatch);
   4812 }
   4813 
   4814 
   4815 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   4816   {
   4817     PushSafepointRegistersScope scope(this);
   4818     __ Push(object);
   4819     __ Set(rsi, 0);
   4820     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
   4821     RecordSafepointWithRegisters(
   4822         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
   4823 
   4824     __ testp(rax, Immediate(kSmiTagMask));
   4825   }
   4826   DeoptimizeIf(zero, instr, DeoptimizeReason::kInstanceMigrationFailed);
   4827 }
   4828 
   4829 
   4830 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
   4831   class DeferredCheckMaps final : public LDeferredCode {
   4832    public:
   4833     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
   4834         : LDeferredCode(codegen), instr_(instr), object_(object) {
   4835       SetExit(check_maps());
   4836     }
   4837     void Generate() override {
   4838       codegen()->DoDeferredInstanceMigration(instr_, object_);
   4839     }
   4840     Label* check_maps() { return &check_maps_; }
   4841     LInstruction* instr() override { return instr_; }
   4842 
   4843    private:
   4844     LCheckMaps* instr_;
   4845     Label check_maps_;
   4846     Register object_;
   4847   };
   4848 
   4849   if (instr->hydrogen()->IsStabilityCheck()) {
   4850     const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   4851     for (int i = 0; i < maps->size(); ++i) {
   4852       AddStabilityDependency(maps->at(i).handle());
   4853     }
   4854     return;
   4855   }
   4856 
   4857   LOperand* input = instr->value();
   4858   DCHECK(input->IsRegister());
   4859   Register reg = ToRegister(input);
   4860 
   4861   DeferredCheckMaps* deferred = NULL;
   4862   if (instr->hydrogen()->HasMigrationTarget()) {
   4863     deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
   4864     __ bind(deferred->check_maps());
   4865   }
   4866 
   4867   const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   4868   Label success;
   4869   for (int i = 0; i < maps->size() - 1; i++) {
   4870     Handle<Map> map = maps->at(i).handle();
   4871     __ CompareMap(reg, map);
   4872     __ j(equal, &success, Label::kNear);
   4873   }
   4874 
   4875   Handle<Map> map = maps->at(maps->size() - 1).handle();
   4876   __ CompareMap(reg, map);
   4877   if (instr->hydrogen()->HasMigrationTarget()) {
   4878     __ j(not_equal, deferred->entry());
   4879   } else {
   4880     DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
   4881   }
   4882 
   4883   __ bind(&success);
   4884 }
   4885 
   4886 
   4887 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
   4888   XMMRegister value_reg = ToDoubleRegister(instr->unclamped());
   4889   XMMRegister xmm_scratch = double_scratch0();
   4890   Register result_reg = ToRegister(instr->result());
   4891   __ ClampDoubleToUint8(value_reg, xmm_scratch, result_reg);
   4892 }
   4893 
   4894 
   4895 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
   4896   DCHECK(instr->unclamped()->Equals(instr->result()));
   4897   Register value_reg = ToRegister(instr->result());
   4898   __ ClampUint8(value_reg);
   4899 }
   4900 
   4901 
   4902 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
   4903   DCHECK(instr->unclamped()->Equals(instr->result()));
   4904   Register input_reg = ToRegister(instr->unclamped());
   4905   XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
   4906   XMMRegister xmm_scratch = double_scratch0();
   4907   Label is_smi, done, heap_number;
   4908   Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
   4909   __ JumpIfSmi(input_reg, &is_smi, dist);
   4910 
   4911   // Check for heap number
   4912   __ Cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
   4913          factory()->heap_number_map());
   4914   __ j(equal, &heap_number, Label::kNear);
   4915 
   4916   // Check for undefined. Undefined is converted to zero for clamping
   4917   // conversions.
   4918   __ Cmp(input_reg, factory()->undefined_value());
   4919   DeoptimizeIf(not_equal, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
   4920   __ xorl(input_reg, input_reg);
   4921   __ jmp(&done, Label::kNear);
   4922 
   4923   // Heap number
   4924   __ bind(&heap_number);
   4925   __ Movsd(xmm_scratch, FieldOperand(input_reg, HeapNumber::kValueOffset));
   4926   __ ClampDoubleToUint8(xmm_scratch, temp_xmm_reg, input_reg);
   4927   __ jmp(&done, Label::kNear);
   4928 
   4929   // smi
   4930   __ bind(&is_smi);
   4931   __ SmiToInteger32(input_reg, input_reg);
   4932   __ ClampUint8(input_reg);
   4933 
   4934   __ bind(&done);
   4935 }
   4936 
   4937 
   4938 void LCodeGen::DoAllocate(LAllocate* instr) {
   4939   class DeferredAllocate final : public LDeferredCode {
   4940    public:
   4941     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
   4942         : LDeferredCode(codegen), instr_(instr) { }
   4943     void Generate() override { codegen()->DoDeferredAllocate(instr_); }
   4944     LInstruction* instr() override { return instr_; }
   4945 
   4946    private:
   4947     LAllocate* instr_;
   4948   };
   4949 
   4950   DeferredAllocate* deferred =
   4951       new(zone()) DeferredAllocate(this, instr);
   4952 
   4953   Register result = ToRegister(instr->result());
   4954   Register temp = ToRegister(instr->temp());
   4955 
   4956   // Allocate memory for the object.
   4957   AllocationFlags flags = NO_ALLOCATION_FLAGS;
   4958   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   4959     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   4960   }
   4961   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   4962     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   4963     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   4964   }
   4965 
   4966   if (instr->hydrogen()->IsAllocationFoldingDominator()) {
   4967     flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
   4968   }
   4969   DCHECK(!instr->hydrogen()->IsAllocationFolded());
   4970 
   4971   if (instr->size()->IsConstantOperand()) {
   4972     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   4973     CHECK(size <= kMaxRegularHeapObjectSize);
   4974     __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
   4975   } else {
   4976     Register size = ToRegister(instr->size());
   4977     __ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
   4978   }
   4979 
   4980   __ bind(deferred->exit());
   4981 
   4982   if (instr->hydrogen()->MustPrefillWithFiller()) {
   4983     if (instr->size()->IsConstantOperand()) {
   4984       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   4985       __ movl(temp, Immediate((size / kPointerSize) - 1));
   4986     } else {
   4987       temp = ToRegister(instr->size());
   4988       __ sarp(temp, Immediate(kPointerSizeLog2));
   4989       __ decl(temp);
   4990     }
   4991     Label loop;
   4992     __ bind(&loop);
   4993     __ Move(FieldOperand(result, temp, times_pointer_size, 0),
   4994         isolate()->factory()->one_pointer_filler_map());
   4995     __ decl(temp);
   4996     __ j(not_zero, &loop);
   4997   }
   4998 }
   4999 
   5000 void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
   5001   DCHECK(instr->hydrogen()->IsAllocationFolded());
   5002   DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
   5003   Register result = ToRegister(instr->result());
   5004   Register temp = ToRegister(instr->temp());
   5005 
   5006   AllocationFlags flags = ALLOCATION_FOLDED;
   5007   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   5008     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   5009   }
   5010   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5011     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5012     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   5013   }
   5014   if (instr->size()->IsConstantOperand()) {
   5015     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5016     CHECK(size <= kMaxRegularHeapObjectSize);
   5017     __ FastAllocate(size, result, temp, flags);
   5018   } else {
   5019     Register size = ToRegister(instr->size());
   5020     __ FastAllocate(size, result, temp, flags);
   5021   }
   5022 }
   5023 
   5024 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
   5025   Register result = ToRegister(instr->result());
   5026 
   5027   // TODO(3095996): Get rid of this. For now, we need to make the
   5028   // result register contain a valid pointer because it is already
   5029   // contained in the register pointer map.
   5030   __ Move(result, Smi::kZero);
   5031 
   5032   PushSafepointRegistersScope scope(this);
   5033   if (instr->size()->IsRegister()) {
   5034     Register size = ToRegister(instr->size());
   5035     DCHECK(!size.is(result));
   5036     __ Integer32ToSmi(size, size);
   5037     __ Push(size);
   5038   } else {
   5039     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5040     __ Push(Smi::FromInt(size));
   5041   }
   5042 
   5043   int flags = 0;
   5044   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5045     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5046     flags = AllocateTargetSpace::update(flags, OLD_SPACE);
   5047   } else {
   5048     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
   5049   }
   5050   __ Push(Smi::FromInt(flags));
   5051 
   5052   CallRuntimeFromDeferred(
   5053       Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   5054   __ StoreToSafepointRegisterSlot(result, rax);
   5055 
   5056   if (instr->hydrogen()->IsAllocationFoldingDominator()) {
   5057     AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
   5058     if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5059       DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5060       allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
   5061     }
   5062     // If the allocation folding dominator allocate triggered a GC, allocation
   5063     // happend in the runtime. We have to reset the top pointer to virtually
   5064     // undo the allocation.
   5065     ExternalReference allocation_top =
   5066         AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
   5067     __ subp(rax, Immediate(kHeapObjectTag));
   5068     __ Store(allocation_top, rax);
   5069     __ addp(rax, Immediate(kHeapObjectTag));
   5070   }
   5071 }
   5072 
   5073 
   5074 void LCodeGen::DoTypeof(LTypeof* instr) {
   5075   DCHECK(ToRegister(instr->context()).is(rsi));
   5076   DCHECK(ToRegister(instr->value()).is(rbx));
   5077   Label end, do_call;
   5078   Register value_register = ToRegister(instr->value());
   5079   __ JumpIfNotSmi(value_register, &do_call);
   5080   __ Move(rax, isolate()->factory()->number_string());
   5081   __ jmp(&end);
   5082   __ bind(&do_call);
   5083   Callable callable = CodeFactory::Typeof(isolate());
   5084   CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
   5085   __ bind(&end);
   5086 }
   5087 
   5088 
   5089 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
   5090   DCHECK(!operand->IsDoubleRegister());
   5091   if (operand->IsConstantOperand()) {
   5092     __ Push(ToHandle(LConstantOperand::cast(operand)));
   5093   } else if (operand->IsRegister()) {
   5094     __ Push(ToRegister(operand));
   5095   } else {
   5096     __ Push(ToOperand(operand));
   5097   }
   5098 }
   5099 
   5100 
   5101 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   5102   Register input = ToRegister(instr->value());
   5103   Condition final_branch_condition = EmitTypeofIs(instr, input);
   5104   if (final_branch_condition != no_condition) {
   5105     EmitBranch(instr, final_branch_condition);
   5106   }
   5107 }
   5108 
   5109 
   5110 Condition LCodeGen::EmitTypeofIs(LTypeofIsAndBranch* instr, Register input) {
   5111   Label* true_label = instr->TrueLabel(chunk_);
   5112   Label* false_label = instr->FalseLabel(chunk_);
   5113   Handle<String> type_name = instr->type_literal();
   5114   int left_block = instr->TrueDestination(chunk_);
   5115   int right_block = instr->FalseDestination(chunk_);
   5116   int next_block = GetNextEmittedBlock();
   5117 
   5118   Label::Distance true_distance = left_block == next_block ? Label::kNear
   5119                                                            : Label::kFar;
   5120   Label::Distance false_distance = right_block == next_block ? Label::kNear
   5121                                                              : Label::kFar;
   5122   Condition final_branch_condition = no_condition;
   5123   Factory* factory = isolate()->factory();
   5124   if (String::Equals(type_name, factory->number_string())) {
   5125     __ JumpIfSmi(input, true_label, true_distance);
   5126     __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),
   5127                    Heap::kHeapNumberMapRootIndex);
   5128 
   5129     final_branch_condition = equal;
   5130 
   5131   } else if (String::Equals(type_name, factory->string_string())) {
   5132     __ JumpIfSmi(input, false_label, false_distance);
   5133     __ CmpObjectType(input, FIRST_NONSTRING_TYPE, input);
   5134     final_branch_condition = below;
   5135 
   5136   } else if (String::Equals(type_name, factory->symbol_string())) {
   5137     __ JumpIfSmi(input, false_label, false_distance);
   5138     __ CmpObjectType(input, SYMBOL_TYPE, input);
   5139     final_branch_condition = equal;
   5140 
   5141   } else if (String::Equals(type_name, factory->boolean_string())) {
   5142     __ CompareRoot(input, Heap::kTrueValueRootIndex);
   5143     __ j(equal, true_label, true_distance);
   5144     __ CompareRoot(input, Heap::kFalseValueRootIndex);
   5145     final_branch_condition = equal;
   5146 
   5147   } else if (String::Equals(type_name, factory->undefined_string())) {
   5148     __ CompareRoot(input, Heap::kNullValueRootIndex);
   5149     __ j(equal, false_label, false_distance);
   5150     __ JumpIfSmi(input, false_label, false_distance);
   5151     // Check for undetectable objects => true.
   5152     __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
   5153     __ testb(FieldOperand(input, Map::kBitFieldOffset),
   5154              Immediate(1 << Map::kIsUndetectable));
   5155     final_branch_condition = not_zero;
   5156 
   5157   } else if (String::Equals(type_name, factory->function_string())) {
   5158     __ JumpIfSmi(input, false_label, false_distance);
   5159     // Check for callable and not undetectable objects => true.
   5160     __ movp(input, FieldOperand(input, HeapObject::kMapOffset));
   5161     __ movzxbl(input, FieldOperand(input, Map::kBitFieldOffset));
   5162     __ andb(input,
   5163             Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
   5164     __ cmpb(input, Immediate(1 << Map::kIsCallable));
   5165     final_branch_condition = equal;
   5166 
   5167   } else if (String::Equals(type_name, factory->object_string())) {
   5168     __ JumpIfSmi(input, false_label, false_distance);
   5169     __ CompareRoot(input, Heap::kNullValueRootIndex);
   5170     __ j(equal, true_label, true_distance);
   5171     STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
   5172     __ CmpObjectType(input, FIRST_JS_RECEIVER_TYPE, input);
   5173     __ j(below, false_label, false_distance);
   5174     // Check for callable or undetectable objects => false.
   5175     __ testb(FieldOperand(input, Map::kBitFieldOffset),
   5176              Immediate((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
   5177     final_branch_condition = zero;
   5178 
   5179 // clang-format off
   5180 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)       \
   5181   } else if (String::Equals(type_name, factory->type##_string())) { \
   5182     __ JumpIfSmi(input, false_label, false_distance);               \
   5183     __ CompareRoot(FieldOperand(input, HeapObject::kMapOffset),     \
   5184                    Heap::k##Type##MapRootIndex);                    \
   5185     final_branch_condition = equal;
   5186   SIMD128_TYPES(SIMD128_TYPE)
   5187 #undef SIMD128_TYPE
   5188     // clang-format on
   5189 
   5190   } else {
   5191     __ jmp(false_label, false_distance);
   5192   }
   5193 
   5194   return final_branch_condition;
   5195 }
   5196 
   5197 
   5198 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
   5199   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
   5200     // Ensure that we have enough space after the previous lazy-bailout
   5201     // instruction for patching the code here.
   5202     int current_pc = masm()->pc_offset();
   5203     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
   5204       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
   5205       __ Nop(padding_size);
   5206     }
   5207   }
   5208   last_lazy_deopt_pc_ = masm()->pc_offset();
   5209 }
   5210 
   5211 
   5212 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   5213   last_lazy_deopt_pc_ = masm()->pc_offset();
   5214   DCHECK(instr->HasEnvironment());
   5215   LEnvironment* env = instr->environment();
   5216   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5217   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5218 }
   5219 
   5220 
   5221 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
   5222   Deoptimizer::BailoutType type = instr->hydrogen()->type();
   5223   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
   5224   // needed return address), even though the implementation of LAZY and EAGER is
   5225   // now identical. When LAZY is eventually completely folded into EAGER, remove
   5226   // the special case below.
   5227   if (info()->IsStub() && type == Deoptimizer::EAGER) {
   5228     type = Deoptimizer::LAZY;
   5229   }
   5230   DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
   5231 }
   5232 
   5233 
   5234 void LCodeGen::DoDummy(LDummy* instr) {
   5235   // Nothing to see here, move on!
   5236 }
   5237 
   5238 
   5239 void LCodeGen::DoDummyUse(LDummyUse* instr) {
   5240   // Nothing to see here, move on!
   5241 }
   5242 
   5243 
   5244 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   5245   PushSafepointRegistersScope scope(this);
   5246   __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
   5247   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   5248   RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
   5249   DCHECK(instr->HasEnvironment());
   5250   LEnvironment* env = instr->environment();
   5251   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5252 }
   5253 
   5254 
   5255 void LCodeGen::DoStackCheck(LStackCheck* instr) {
   5256   class DeferredStackCheck final : public LDeferredCode {
   5257    public:
   5258     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
   5259         : LDeferredCode(codegen), instr_(instr) { }
   5260     void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
   5261     LInstruction* instr() override { return instr_; }
   5262 
   5263    private:
   5264     LStackCheck* instr_;
   5265   };
   5266 
   5267   DCHECK(instr->HasEnvironment());
   5268   LEnvironment* env = instr->environment();
   5269   // There is no LLazyBailout instruction for stack-checks. We have to
   5270   // prepare for lazy deoptimization explicitly here.
   5271   if (instr->hydrogen()->is_function_entry()) {
   5272     // Perform stack overflow check.
   5273     Label done;
   5274     __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
   5275     __ j(above_equal, &done, Label::kNear);
   5276 
   5277     DCHECK(instr->context()->IsRegister());
   5278     DCHECK(ToRegister(instr->context()).is(rsi));
   5279     CallCode(isolate()->builtins()->StackCheck(),
   5280              RelocInfo::CODE_TARGET,
   5281              instr);
   5282     __ bind(&done);
   5283   } else {
   5284     DCHECK(instr->hydrogen()->is_backwards_branch());
   5285     // Perform stack overflow check if this goto needs it before jumping.
   5286     DeferredStackCheck* deferred_stack_check =
   5287         new(zone()) DeferredStackCheck(this, instr);
   5288     __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
   5289     __ j(below, deferred_stack_check->entry());
   5290     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   5291     __ bind(instr->done_label());
   5292     deferred_stack_check->SetExit(instr->done_label());
   5293     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5294     // Don't record a deoptimization index for the safepoint here.
   5295     // This will be done explicitly when emitting call and the safepoint in
   5296     // the deferred code.
   5297   }
   5298 }
   5299 
   5300 
   5301 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
   5302   // This is a pseudo-instruction that ensures that the environment here is
   5303   // properly registered for deoptimization and records the assembler's PC
   5304   // offset.
   5305   LEnvironment* environment = instr->environment();
   5306 
   5307   // If the environment were already registered, we would have no way of
   5308   // backpatching it with the spill slot operands.
   5309   DCHECK(!environment->HasBeenRegistered());
   5310   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   5311 
   5312   GenerateOsrPrologue();
   5313 }
   5314 
   5315 
   5316 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   5317   DCHECK(ToRegister(instr->context()).is(rsi));
   5318 
   5319   Label use_cache, call_runtime;
   5320   __ CheckEnumCache(&call_runtime);
   5321 
   5322   __ movp(rax, FieldOperand(rax, HeapObject::kMapOffset));
   5323   __ jmp(&use_cache, Label::kNear);
   5324 
   5325   // Get the set of properties to enumerate.
   5326   __ bind(&call_runtime);
   5327   __ Push(rax);
   5328   CallRuntime(Runtime::kForInEnumerate, instr);
   5329   __ bind(&use_cache);
   5330 }
   5331 
   5332 
   5333 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
   5334   Register map = ToRegister(instr->map());
   5335   Register result = ToRegister(instr->result());
   5336   Label load_cache, done;
   5337   __ EnumLength(result, map);
   5338   __ Cmp(result, Smi::kZero);
   5339   __ j(not_equal, &load_cache, Label::kNear);
   5340   __ LoadRoot(result, Heap::kEmptyFixedArrayRootIndex);
   5341   __ jmp(&done, Label::kNear);
   5342   __ bind(&load_cache);
   5343   __ LoadInstanceDescriptors(map, result);
   5344   __ movp(result,
   5345           FieldOperand(result, DescriptorArray::kEnumCacheOffset));
   5346   __ movp(result,
   5347           FieldOperand(result, FixedArray::SizeFor(instr->idx())));
   5348   __ bind(&done);
   5349   Condition cc = masm()->CheckSmi(result);
   5350   DeoptimizeIf(cc, instr, DeoptimizeReason::kNoCache);
   5351 }
   5352 
   5353 
   5354 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
   5355   Register object = ToRegister(instr->value());
   5356   __ cmpp(ToRegister(instr->map()),
   5357           FieldOperand(object, HeapObject::kMapOffset));
   5358   DeoptimizeIf(not_equal, instr, DeoptimizeReason::kWrongMap);
   5359 }
   5360 
   5361 
   5362 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
   5363                                            Register object,
   5364                                            Register index) {
   5365   PushSafepointRegistersScope scope(this);
   5366   __ Push(object);
   5367   __ Push(index);
   5368   __ xorp(rsi, rsi);
   5369   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
   5370   RecordSafepointWithRegisters(
   5371       instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
   5372   __ StoreToSafepointRegisterSlot(object, rax);
   5373 }
   5374 
   5375 
   5376 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
   5377   class DeferredLoadMutableDouble final : public LDeferredCode {
   5378    public:
   5379     DeferredLoadMutableDouble(LCodeGen* codegen,
   5380                               LLoadFieldByIndex* instr,
   5381                               Register object,
   5382                               Register index)
   5383         : LDeferredCode(codegen),
   5384           instr_(instr),
   5385           object_(object),
   5386           index_(index) {
   5387     }
   5388     void Generate() override {
   5389       codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
   5390     }
   5391     LInstruction* instr() override { return instr_; }
   5392 
   5393    private:
   5394     LLoadFieldByIndex* instr_;
   5395     Register object_;
   5396     Register index_;
   5397   };
   5398 
   5399   Register object = ToRegister(instr->object());
   5400   Register index = ToRegister(instr->index());
   5401 
   5402   DeferredLoadMutableDouble* deferred;
   5403   deferred = new(zone()) DeferredLoadMutableDouble(this, instr, object, index);
   5404 
   5405   Label out_of_object, done;
   5406   __ Move(kScratchRegister, Smi::FromInt(1));
   5407   __ testp(index, kScratchRegister);
   5408   __ j(not_zero, deferred->entry());
   5409 
   5410   __ sarp(index, Immediate(1));
   5411 
   5412   __ SmiToInteger32(index, index);
   5413   __ cmpl(index, Immediate(0));
   5414   __ j(less, &out_of_object, Label::kNear);
   5415   __ movp(object, FieldOperand(object,
   5416                                index,
   5417                                times_pointer_size,
   5418                                JSObject::kHeaderSize));
   5419   __ jmp(&done, Label::kNear);
   5420 
   5421   __ bind(&out_of_object);
   5422   __ movp(object, FieldOperand(object, JSObject::kPropertiesOffset));
   5423   __ negl(index);
   5424   // Index is now equal to out of object property index plus 1.
   5425   __ movp(object, FieldOperand(object,
   5426                                index,
   5427                                times_pointer_size,
   5428                                FixedArray::kHeaderSize - kPointerSize));
   5429   __ bind(deferred->exit());
   5430   __ bind(&done);
   5431 }
   5432 
   5433 #undef __
   5434 
   5435 }  // namespace internal
   5436 }  // namespace v8
   5437 
   5438 #endif  // V8_TARGET_ARCH_X64
   5439