Home | History | Annotate | Download | only in arm64
      1 // Copyright 2013 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/crankshaft/arm64/lithium-codegen-arm64.h"
      6 
      7 #include "src/arm64/frames-arm64.h"
      8 #include "src/base/bits.h"
      9 #include "src/code-factory.h"
     10 #include "src/code-stubs.h"
     11 #include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h"
     12 #include "src/crankshaft/hydrogen-osr.h"
     13 #include "src/ic/ic.h"
     14 #include "src/ic/stub-cache.h"
     15 #include "src/profiler/cpu-profiler.h"
     16 
     17 namespace v8 {
     18 namespace internal {
     19 
     20 
     21 class SafepointGenerator final : public CallWrapper {
     22  public:
     23   SafepointGenerator(LCodeGen* codegen,
     24                      LPointerMap* pointers,
     25                      Safepoint::DeoptMode mode)
     26       : codegen_(codegen),
     27         pointers_(pointers),
     28         deopt_mode_(mode) { }
     29   virtual ~SafepointGenerator() { }
     30 
     31   virtual void BeforeCall(int call_size) const { }
     32 
     33   virtual void AfterCall() const {
     34     codegen_->RecordSafepoint(pointers_, deopt_mode_);
     35   }
     36 
     37  private:
     38   LCodeGen* codegen_;
     39   LPointerMap* pointers_;
     40   Safepoint::DeoptMode deopt_mode_;
     41 };
     42 
     43 
     44 #define __ masm()->
     45 
     46 // Emit code to branch if the given condition holds.
     47 // The code generated here doesn't modify the flags and they must have
     48 // been set by some prior instructions.
     49 //
     50 // The EmitInverted function simply inverts the condition.
     51 class BranchOnCondition : public BranchGenerator {
     52  public:
     53   BranchOnCondition(LCodeGen* codegen, Condition cond)
     54     : BranchGenerator(codegen),
     55       cond_(cond) { }
     56 
     57   virtual void Emit(Label* label) const {
     58     __ B(cond_, label);
     59   }
     60 
     61   virtual void EmitInverted(Label* label) const {
     62     if (cond_ != al) {
     63       __ B(NegateCondition(cond_), label);
     64     }
     65   }
     66 
     67  private:
     68   Condition cond_;
     69 };
     70 
     71 
     72 // Emit code to compare lhs and rhs and branch if the condition holds.
     73 // This uses MacroAssembler's CompareAndBranch function so it will handle
     74 // converting the comparison to Cbz/Cbnz if the right-hand side is 0.
     75 //
     76 // EmitInverted still compares the two operands but inverts the condition.
     77 class CompareAndBranch : public BranchGenerator {
     78  public:
     79   CompareAndBranch(LCodeGen* codegen,
     80                    Condition cond,
     81                    const Register& lhs,
     82                    const Operand& rhs)
     83     : BranchGenerator(codegen),
     84       cond_(cond),
     85       lhs_(lhs),
     86       rhs_(rhs) { }
     87 
     88   virtual void Emit(Label* label) const {
     89     __ CompareAndBranch(lhs_, rhs_, cond_, label);
     90   }
     91 
     92   virtual void EmitInverted(Label* label) const {
     93     __ CompareAndBranch(lhs_, rhs_, NegateCondition(cond_), label);
     94   }
     95 
     96  private:
     97   Condition cond_;
     98   const Register& lhs_;
     99   const Operand& rhs_;
    100 };
    101 
    102 
    103 // Test the input with the given mask and branch if the condition holds.
    104 // If the condition is 'eq' or 'ne' this will use MacroAssembler's
    105 // TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
    106 // conversion to Tbz/Tbnz when possible.
    107 class TestAndBranch : public BranchGenerator {
    108  public:
    109   TestAndBranch(LCodeGen* codegen,
    110                 Condition cond,
    111                 const Register& value,
    112                 uint64_t mask)
    113     : BranchGenerator(codegen),
    114       cond_(cond),
    115       value_(value),
    116       mask_(mask) { }
    117 
    118   virtual void Emit(Label* label) const {
    119     switch (cond_) {
    120       case eq:
    121         __ TestAndBranchIfAllClear(value_, mask_, label);
    122         break;
    123       case ne:
    124         __ TestAndBranchIfAnySet(value_, mask_, label);
    125         break;
    126       default:
    127         __ Tst(value_, mask_);
    128         __ B(cond_, label);
    129     }
    130   }
    131 
    132   virtual void EmitInverted(Label* label) const {
    133     // The inverse of "all clear" is "any set" and vice versa.
    134     switch (cond_) {
    135       case eq:
    136         __ TestAndBranchIfAnySet(value_, mask_, label);
    137         break;
    138       case ne:
    139         __ TestAndBranchIfAllClear(value_, mask_, label);
    140         break;
    141       default:
    142         __ Tst(value_, mask_);
    143         __ B(NegateCondition(cond_), label);
    144     }
    145   }
    146 
    147  private:
    148   Condition cond_;
    149   const Register& value_;
    150   uint64_t mask_;
    151 };
    152 
    153 
    154 // Test the input and branch if it is non-zero and not a NaN.
    155 class BranchIfNonZeroNumber : public BranchGenerator {
    156  public:
    157   BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
    158                         const FPRegister& scratch)
    159     : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
    160 
    161   virtual void Emit(Label* label) const {
    162     __ Fabs(scratch_, value_);
    163     // Compare with 0.0. Because scratch_ is positive, the result can be one of
    164     // nZCv (equal), nzCv (greater) or nzCV (unordered).
    165     __ Fcmp(scratch_, 0.0);
    166     __ B(gt, label);
    167   }
    168 
    169   virtual void EmitInverted(Label* label) const {
    170     __ Fabs(scratch_, value_);
    171     __ Fcmp(scratch_, 0.0);
    172     __ B(le, label);
    173   }
    174 
    175  private:
    176   const FPRegister& value_;
    177   const FPRegister& scratch_;
    178 };
    179 
    180 
    181 // Test the input and branch if it is a heap number.
    182 class BranchIfHeapNumber : public BranchGenerator {
    183  public:
    184   BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
    185       : BranchGenerator(codegen), value_(value) { }
    186 
    187   virtual void Emit(Label* label) const {
    188     __ JumpIfHeapNumber(value_, label);
    189   }
    190 
    191   virtual void EmitInverted(Label* label) const {
    192     __ JumpIfNotHeapNumber(value_, label);
    193   }
    194 
    195  private:
    196   const Register& value_;
    197 };
    198 
    199 
    200 // Test the input and branch if it is the specified root value.
    201 class BranchIfRoot : public BranchGenerator {
    202  public:
    203   BranchIfRoot(LCodeGen* codegen, const Register& value,
    204                Heap::RootListIndex index)
    205       : BranchGenerator(codegen), value_(value), index_(index) { }
    206 
    207   virtual void Emit(Label* label) const {
    208     __ JumpIfRoot(value_, index_, label);
    209   }
    210 
    211   virtual void EmitInverted(Label* label) const {
    212     __ JumpIfNotRoot(value_, index_, label);
    213   }
    214 
    215  private:
    216   const Register& value_;
    217   const Heap::RootListIndex index_;
    218 };
    219 
    220 
    221 void LCodeGen::WriteTranslation(LEnvironment* environment,
    222                                 Translation* translation) {
    223   if (environment == NULL) return;
    224 
    225   // The translation includes one command per value in the environment.
    226   int translation_size = environment->translation_size();
    227 
    228   WriteTranslation(environment->outer(), translation);
    229   WriteTranslationFrame(environment, translation);
    230 
    231   int object_index = 0;
    232   int dematerialized_index = 0;
    233   for (int i = 0; i < translation_size; ++i) {
    234     LOperand* value = environment->values()->at(i);
    235     AddToTranslation(
    236         environment, translation, value, environment->HasTaggedValueAt(i),
    237         environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
    238   }
    239 }
    240 
    241 
    242 void LCodeGen::AddToTranslation(LEnvironment* environment,
    243                                 Translation* translation,
    244                                 LOperand* op,
    245                                 bool is_tagged,
    246                                 bool is_uint32,
    247                                 int* object_index_pointer,
    248                                 int* dematerialized_index_pointer) {
    249   if (op == LEnvironment::materialization_marker()) {
    250     int object_index = (*object_index_pointer)++;
    251     if (environment->ObjectIsDuplicateAt(object_index)) {
    252       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
    253       translation->DuplicateObject(dupe_of);
    254       return;
    255     }
    256     int object_length = environment->ObjectLengthAt(object_index);
    257     if (environment->ObjectIsArgumentsAt(object_index)) {
    258       translation->BeginArgumentsObject(object_length);
    259     } else {
    260       translation->BeginCapturedObject(object_length);
    261     }
    262     int dematerialized_index = *dematerialized_index_pointer;
    263     int env_offset = environment->translation_size() + dematerialized_index;
    264     *dematerialized_index_pointer += object_length;
    265     for (int i = 0; i < object_length; ++i) {
    266       LOperand* value = environment->values()->at(env_offset + i);
    267       AddToTranslation(environment,
    268                        translation,
    269                        value,
    270                        environment->HasTaggedValueAt(env_offset + i),
    271                        environment->HasUint32ValueAt(env_offset + i),
    272                        object_index_pointer,
    273                        dematerialized_index_pointer);
    274     }
    275     return;
    276   }
    277 
    278   if (op->IsStackSlot()) {
    279     int index = op->index();
    280     if (index >= 0) {
    281       index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
    282     }
    283     if (is_tagged) {
    284       translation->StoreStackSlot(index);
    285     } else if (is_uint32) {
    286       translation->StoreUint32StackSlot(index);
    287     } else {
    288       translation->StoreInt32StackSlot(index);
    289     }
    290   } else if (op->IsDoubleStackSlot()) {
    291     int index = op->index();
    292     if (index >= 0) {
    293       index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
    294     }
    295     translation->StoreDoubleStackSlot(index);
    296   } else if (op->IsRegister()) {
    297     Register reg = ToRegister(op);
    298     if (is_tagged) {
    299       translation->StoreRegister(reg);
    300     } else if (is_uint32) {
    301       translation->StoreUint32Register(reg);
    302     } else {
    303       translation->StoreInt32Register(reg);
    304     }
    305   } else if (op->IsDoubleRegister()) {
    306     DoubleRegister reg = ToDoubleRegister(op);
    307     translation->StoreDoubleRegister(reg);
    308   } else if (op->IsConstantOperand()) {
    309     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
    310     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
    311     translation->StoreLiteral(src_index);
    312   } else {
    313     UNREACHABLE();
    314   }
    315 }
    316 
    317 
    318 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
    319                                                     Safepoint::DeoptMode mode) {
    320   environment->set_has_been_used();
    321   if (!environment->HasBeenRegistered()) {
    322     int frame_count = 0;
    323     int jsframe_count = 0;
    324     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
    325       ++frame_count;
    326       if (e->frame_type() == JS_FUNCTION) {
    327         ++jsframe_count;
    328       }
    329     }
    330     Translation translation(&translations_, frame_count, jsframe_count, zone());
    331     WriteTranslation(environment, &translation);
    332     int deoptimization_index = deoptimizations_.length();
    333     int pc_offset = masm()->pc_offset();
    334     environment->Register(deoptimization_index,
    335                           translation.index(),
    336                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
    337     deoptimizations_.Add(environment, zone());
    338   }
    339 }
    340 
    341 
    342 void LCodeGen::CallCode(Handle<Code> code,
    343                         RelocInfo::Mode mode,
    344                         LInstruction* instr) {
    345   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
    346 }
    347 
    348 
    349 void LCodeGen::CallCodeGeneric(Handle<Code> code,
    350                                RelocInfo::Mode mode,
    351                                LInstruction* instr,
    352                                SafepointMode safepoint_mode) {
    353   DCHECK(instr != NULL);
    354 
    355   Assembler::BlockPoolsScope scope(masm_);
    356   __ Call(code, mode);
    357   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
    358 
    359   if ((code->kind() == Code::BINARY_OP_IC) ||
    360       (code->kind() == Code::COMPARE_IC)) {
    361     // Signal that we don't inline smi code before these stubs in the
    362     // optimizing code generator.
    363     InlineSmiCheckInfo::EmitNotInlined(masm());
    364   }
    365 }
    366 
    367 
    368 void LCodeGen::DoCallFunction(LCallFunction* instr) {
    369   DCHECK(ToRegister(instr->context()).is(cp));
    370   DCHECK(ToRegister(instr->function()).Is(x1));
    371   DCHECK(ToRegister(instr->result()).Is(x0));
    372 
    373   int arity = instr->arity();
    374   ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
    375   if (instr->hydrogen()->HasVectorAndSlot()) {
    376     Register slot_register = ToRegister(instr->temp_slot());
    377     Register vector_register = ToRegister(instr->temp_vector());
    378     DCHECK(slot_register.is(x3));
    379     DCHECK(vector_register.is(x2));
    380 
    381     AllowDeferredHandleDereference vector_structure_check;
    382     Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
    383     int index = vector->GetIndex(instr->hydrogen()->slot());
    384 
    385     __ Mov(vector_register, vector);
    386     __ Mov(slot_register, Operand(Smi::FromInt(index)));
    387 
    388     Handle<Code> ic =
    389         CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
    390     CallCode(ic, RelocInfo::CODE_TARGET, instr);
    391   } else {
    392     __ Mov(x0, arity);
    393     CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
    394   }
    395   RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
    396 }
    397 
    398 
    399 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
    400   DCHECK(instr->IsMarkedAsCall());
    401   DCHECK(ToRegister(instr->context()).is(cp));
    402   DCHECK(ToRegister(instr->constructor()).is(x1));
    403 
    404   __ Mov(x0, Operand(instr->arity()));
    405   if (instr->arity() == 1) {
    406     // We only need the allocation site for the case we have a length argument.
    407     // The case may bail out to the runtime, which will determine the correct
    408     // elements kind with the site.
    409     __ Mov(x2, instr->hydrogen()->site());
    410   } else {
    411     __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
    412   }
    413 
    414 
    415   ElementsKind kind = instr->hydrogen()->elements_kind();
    416   AllocationSiteOverrideMode override_mode =
    417       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
    418           ? DISABLE_ALLOCATION_SITES
    419           : DONT_OVERRIDE;
    420 
    421   if (instr->arity() == 0) {
    422     ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
    423     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    424   } else if (instr->arity() == 1) {
    425     Label done;
    426     if (IsFastPackedElementsKind(kind)) {
    427       Label packed_case;
    428 
    429       // We might need to create a holey array; look at the first argument.
    430       __ Peek(x10, 0);
    431       __ Cbz(x10, &packed_case);
    432 
    433       ElementsKind holey_kind = GetHoleyElementsKind(kind);
    434       ArraySingleArgumentConstructorStub stub(isolate(),
    435                                               holey_kind,
    436                                               override_mode);
    437       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    438       __ B(&done);
    439       __ Bind(&packed_case);
    440     }
    441 
    442     ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
    443     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    444     __ Bind(&done);
    445   } else {
    446     ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
    447     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    448   }
    449   RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
    450 
    451   DCHECK(ToRegister(instr->result()).is(x0));
    452 }
    453 
    454 
    455 void LCodeGen::CallRuntime(const Runtime::Function* function,
    456                            int num_arguments,
    457                            LInstruction* instr,
    458                            SaveFPRegsMode save_doubles) {
    459   DCHECK(instr != NULL);
    460 
    461   __ CallRuntime(function, num_arguments, save_doubles);
    462 
    463   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
    464 }
    465 
    466 
    467 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
    468   if (context->IsRegister()) {
    469     __ Mov(cp, ToRegister(context));
    470   } else if (context->IsStackSlot()) {
    471     __ Ldr(cp, ToMemOperand(context, kMustUseFramePointer));
    472   } else if (context->IsConstantOperand()) {
    473     HConstant* constant =
    474         chunk_->LookupConstant(LConstantOperand::cast(context));
    475     __ LoadHeapObject(cp,
    476                       Handle<HeapObject>::cast(constant->handle(isolate())));
    477   } else {
    478     UNREACHABLE();
    479   }
    480 }
    481 
    482 
    483 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
    484                                        int argc,
    485                                        LInstruction* instr,
    486                                        LOperand* context) {
    487   LoadContextFromDeferred(context);
    488   __ CallRuntimeSaveDoubles(id);
    489   RecordSafepointWithRegisters(
    490       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
    491 }
    492 
    493 
    494 void LCodeGen::RecordAndWritePosition(int position) {
    495   if (position == RelocInfo::kNoPosition) return;
    496   masm()->positions_recorder()->RecordPosition(position);
    497   masm()->positions_recorder()->WriteRecordedPositions();
    498 }
    499 
    500 
    501 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
    502                                             SafepointMode safepoint_mode) {
    503   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
    504     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
    505   } else {
    506     DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
    507     RecordSafepointWithRegisters(
    508         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
    509   }
    510 }
    511 
    512 
    513 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
    514                                Safepoint::Kind kind,
    515                                int arguments,
    516                                Safepoint::DeoptMode deopt_mode) {
    517   DCHECK(expected_safepoint_kind_ == kind);
    518 
    519   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
    520   Safepoint safepoint = safepoints_.DefineSafepoint(
    521       masm(), kind, arguments, deopt_mode);
    522 
    523   for (int i = 0; i < operands->length(); i++) {
    524     LOperand* pointer = operands->at(i);
    525     if (pointer->IsStackSlot()) {
    526       safepoint.DefinePointerSlot(pointer->index(), zone());
    527     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
    528       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
    529     }
    530   }
    531 }
    532 
    533 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
    534                                Safepoint::DeoptMode deopt_mode) {
    535   RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
    536 }
    537 
    538 
    539 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
    540   LPointerMap empty_pointers(zone());
    541   RecordSafepoint(&empty_pointers, deopt_mode);
    542 }
    543 
    544 
    545 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
    546                                             int arguments,
    547                                             Safepoint::DeoptMode deopt_mode) {
    548   RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
    549 }
    550 
    551 
    552 bool LCodeGen::GenerateCode() {
    553   LPhase phase("Z_Code generation", chunk());
    554   DCHECK(is_unused());
    555   status_ = GENERATING;
    556 
    557   // Open a frame scope to indicate that there is a frame on the stack.  The
    558   // NONE indicates that the scope shouldn't actually generate code to set up
    559   // the frame (that is done in GeneratePrologue).
    560   FrameScope frame_scope(masm_, StackFrame::NONE);
    561 
    562   return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
    563          GenerateJumpTable() && GenerateSafepointTable();
    564 }
    565 
    566 
    567 void LCodeGen::SaveCallerDoubles() {
    568   DCHECK(info()->saves_caller_doubles());
    569   DCHECK(NeedsEagerFrame());
    570   Comment(";;; Save clobbered callee double registers");
    571   BitVector* doubles = chunk()->allocated_double_registers();
    572   BitVector::Iterator iterator(doubles);
    573   int count = 0;
    574   while (!iterator.Done()) {
    575     // TODO(all): Is this supposed to save just the callee-saved doubles? It
    576     // looks like it's saving all of them.
    577     FPRegister value = FPRegister::from_code(iterator.Current());
    578     __ Poke(value, count * kDoubleSize);
    579     iterator.Advance();
    580     count++;
    581   }
    582 }
    583 
    584 
    585 void LCodeGen::RestoreCallerDoubles() {
    586   DCHECK(info()->saves_caller_doubles());
    587   DCHECK(NeedsEagerFrame());
    588   Comment(";;; Restore clobbered callee double registers");
    589   BitVector* doubles = chunk()->allocated_double_registers();
    590   BitVector::Iterator iterator(doubles);
    591   int count = 0;
    592   while (!iterator.Done()) {
    593     // TODO(all): Is this supposed to restore just the callee-saved doubles? It
    594     // looks like it's restoring all of them.
    595     FPRegister value = FPRegister::from_code(iterator.Current());
    596     __ Peek(value, count * kDoubleSize);
    597     iterator.Advance();
    598     count++;
    599   }
    600 }
    601 
    602 
    603 bool LCodeGen::GeneratePrologue() {
    604   DCHECK(is_generating());
    605 
    606   if (info()->IsOptimizing()) {
    607     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
    608 
    609 #ifdef DEBUG
    610     if (strlen(FLAG_stop_at) > 0 &&
    611         info()->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
    612       __ Debug("stop-at", __LINE__, BREAK);
    613     }
    614 #endif
    615   }
    616 
    617   DCHECK(__ StackPointer().Is(jssp));
    618   info()->set_prologue_offset(masm_->pc_offset());
    619   if (NeedsEagerFrame()) {
    620     if (info()->IsStub()) {
    621       __ StubPrologue();
    622     } else {
    623       __ Prologue(info()->GeneratePreagedPrologue());
    624     }
    625     frame_is_built_ = true;
    626   }
    627 
    628   // Reserve space for the stack slots needed by the code.
    629   int slots = GetStackSlotCount();
    630   if (slots > 0) {
    631     __ Claim(slots, kPointerSize);
    632   }
    633 
    634   if (info()->saves_caller_doubles()) {
    635     SaveCallerDoubles();
    636   }
    637   return !is_aborted();
    638 }
    639 
    640 
    641 void LCodeGen::DoPrologue(LPrologue* instr) {
    642   Comment(";;; Prologue begin");
    643 
    644   // Allocate a local context if needed.
    645   if (info()->num_heap_slots() > 0) {
    646     Comment(";;; Allocate local context");
    647     bool need_write_barrier = true;
    648     // Argument to NewContext is the function, which is in x1.
    649     int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
    650     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
    651     if (info()->scope()->is_script_scope()) {
    652       __ Mov(x10, Operand(info()->scope()->GetScopeInfo(info()->isolate())));
    653       __ Push(x1, x10);
    654       __ CallRuntime(Runtime::kNewScriptContext);
    655       deopt_mode = Safepoint::kLazyDeopt;
    656     } else if (slots <= FastNewContextStub::kMaximumSlots) {
    657       FastNewContextStub stub(isolate(), slots);
    658       __ CallStub(&stub);
    659       // Result of FastNewContextStub is always in new space.
    660       need_write_barrier = false;
    661     } else {
    662       __ Push(x1);
    663       __ CallRuntime(Runtime::kNewFunctionContext);
    664     }
    665     RecordSafepoint(deopt_mode);
    666     // Context is returned in x0. It replaces the context passed to us. It's
    667     // saved in the stack and kept live in cp.
    668     __ Mov(cp, x0);
    669     __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
    670     // Copy any necessary parameters into the context.
    671     int num_parameters = scope()->num_parameters();
    672     int first_parameter = scope()->has_this_declaration() ? -1 : 0;
    673     for (int i = first_parameter; i < num_parameters; i++) {
    674       Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
    675       if (var->IsContextSlot()) {
    676         Register value = x0;
    677         Register scratch = x3;
    678 
    679         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
    680             (num_parameters - 1 - i) * kPointerSize;
    681         // Load parameter from stack.
    682         __ Ldr(value, MemOperand(fp, parameter_offset));
    683         // Store it in the context.
    684         MemOperand target = ContextMemOperand(cp, var->index());
    685         __ Str(value, target);
    686         // Update the write barrier. This clobbers value and scratch.
    687         if (need_write_barrier) {
    688           __ RecordWriteContextSlot(cp, static_cast<int>(target.offset()),
    689                                     value, scratch, GetLinkRegisterState(),
    690                                     kSaveFPRegs);
    691         } else if (FLAG_debug_code) {
    692           Label done;
    693           __ JumpIfInNewSpace(cp, &done);
    694           __ Abort(kExpectedNewSpaceObject);
    695           __ bind(&done);
    696         }
    697       }
    698     }
    699     Comment(";;; End allocate local context");
    700   }
    701 
    702   Comment(";;; Prologue end");
    703 }
    704 
    705 
    706 void LCodeGen::GenerateOsrPrologue() {
    707   // Generate the OSR entry prologue at the first unknown OSR value, or if there
    708   // are none, at the OSR entrypoint instruction.
    709   if (osr_pc_offset_ >= 0) return;
    710 
    711   osr_pc_offset_ = masm()->pc_offset();
    712 
    713   // Adjust the frame size, subsuming the unoptimized frame into the
    714   // optimized frame.
    715   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
    716   DCHECK(slots >= 0);
    717   __ Claim(slots);
    718 }
    719 
    720 
    721 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
    722   if (instr->IsCall()) {
    723     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
    724   }
    725   if (!instr->IsLazyBailout() && !instr->IsGap()) {
    726     safepoints_.BumpLastLazySafepointIndex();
    727   }
    728 }
    729 
    730 
    731 bool LCodeGen::GenerateDeferredCode() {
    732   DCHECK(is_generating());
    733   if (deferred_.length() > 0) {
    734     for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
    735       LDeferredCode* code = deferred_[i];
    736 
    737       HValue* value =
    738           instructions_->at(code->instruction_index())->hydrogen_value();
    739       RecordAndWritePosition(
    740           chunk()->graph()->SourcePositionToScriptPosition(value->position()));
    741 
    742       Comment(";;; <@%d,#%d> "
    743               "-------------------- Deferred %s --------------------",
    744               code->instruction_index(),
    745               code->instr()->hydrogen_value()->id(),
    746               code->instr()->Mnemonic());
    747 
    748       __ Bind(code->entry());
    749 
    750       if (NeedsDeferredFrame()) {
    751         Comment(";;; Build frame");
    752         DCHECK(!frame_is_built_);
    753         DCHECK(info()->IsStub());
    754         frame_is_built_ = true;
    755         __ Push(lr, fp, cp);
    756         __ Mov(fp, Smi::FromInt(StackFrame::STUB));
    757         __ Push(fp);
    758         __ Add(fp, __ StackPointer(),
    759                StandardFrameConstants::kFixedFrameSizeFromFp);
    760         Comment(";;; Deferred code");
    761       }
    762 
    763       code->Generate();
    764 
    765       if (NeedsDeferredFrame()) {
    766         Comment(";;; Destroy frame");
    767         DCHECK(frame_is_built_);
    768         __ Pop(xzr, cp, fp, lr);
    769         frame_is_built_ = false;
    770       }
    771 
    772       __ B(code->exit());
    773     }
    774   }
    775 
    776   // Force constant pool emission at the end of the deferred code to make
    777   // sure that no constant pools are emitted after deferred code because
    778   // deferred code generation is the last step which generates code. The two
    779   // following steps will only output data used by crakshaft.
    780   masm()->CheckConstPool(true, false);
    781 
    782   return !is_aborted();
    783 }
    784 
    785 
    786 bool LCodeGen::GenerateJumpTable() {
    787   Label needs_frame, call_deopt_entry;
    788 
    789   if (jump_table_.length() > 0) {
    790     Comment(";;; -------------------- Jump table --------------------");
    791     Address base = jump_table_[0]->address;
    792 
    793     UseScratchRegisterScope temps(masm());
    794     Register entry_offset = temps.AcquireX();
    795 
    796     int length = jump_table_.length();
    797     for (int i = 0; i < length; i++) {
    798       Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
    799       __ Bind(&table_entry->label);
    800 
    801       Address entry = table_entry->address;
    802       DeoptComment(table_entry->deopt_info);
    803 
    804       // Second-level deopt table entries are contiguous and small, so instead
    805       // of loading the full, absolute address of each one, load the base
    806       // address and add an immediate offset.
    807       __ Mov(entry_offset, entry - base);
    808 
    809       if (table_entry->needs_frame) {
    810         DCHECK(!info()->saves_caller_doubles());
    811         Comment(";;; call deopt with frame");
    812         // Save lr before Bl, fp will be adjusted in the needs_frame code.
    813         __ Push(lr, fp);
    814         // Reuse the existing needs_frame code.
    815         __ Bl(&needs_frame);
    816       } else {
    817         // There is nothing special to do, so just continue to the second-level
    818         // table.
    819         __ Bl(&call_deopt_entry);
    820       }
    821       info()->LogDeoptCallPosition(masm()->pc_offset(),
    822                                    table_entry->deopt_info.inlining_id);
    823 
    824       masm()->CheckConstPool(false, false);
    825     }
    826 
    827     if (needs_frame.is_linked()) {
    828       // This variant of deopt can only be used with stubs. Since we don't
    829       // have a function pointer to install in the stack frame that we're
    830       // building, install a special marker there instead.
    831       DCHECK(info()->IsStub());
    832 
    833       Comment(";;; needs_frame common code");
    834       UseScratchRegisterScope temps(masm());
    835       Register stub_marker = temps.AcquireX();
    836       __ Bind(&needs_frame);
    837       __ Mov(stub_marker, Smi::FromInt(StackFrame::STUB));
    838       __ Push(cp, stub_marker);
    839       __ Add(fp, __ StackPointer(), 2 * kPointerSize);
    840     }
    841 
    842     // Generate common code for calling the second-level deopt table.
    843     __ Bind(&call_deopt_entry);
    844 
    845     if (info()->saves_caller_doubles()) {
    846       DCHECK(info()->IsStub());
    847       RestoreCallerDoubles();
    848     }
    849 
    850     Register deopt_entry = temps.AcquireX();
    851     __ Mov(deopt_entry, Operand(reinterpret_cast<uint64_t>(base),
    852                                 RelocInfo::RUNTIME_ENTRY));
    853     __ Add(deopt_entry, deopt_entry, entry_offset);
    854     __ Br(deopt_entry);
    855   }
    856 
    857   // Force constant pool emission at the end of the deopt jump table to make
    858   // sure that no constant pools are emitted after.
    859   masm()->CheckConstPool(true, false);
    860 
    861   // The deoptimization jump table is the last part of the instruction
    862   // sequence. Mark the generated code as done unless we bailed out.
    863   if (!is_aborted()) status_ = DONE;
    864   return !is_aborted();
    865 }
    866 
    867 
    868 bool LCodeGen::GenerateSafepointTable() {
    869   DCHECK(is_done());
    870   // We do not know how much data will be emitted for the safepoint table, so
    871   // force emission of the veneer pool.
    872   masm()->CheckVeneerPool(true, true);
    873   safepoints_.Emit(masm(), GetStackSlotCount());
    874   return !is_aborted();
    875 }
    876 
    877 
    878 void LCodeGen::FinishCode(Handle<Code> code) {
    879   DCHECK(is_done());
    880   code->set_stack_slots(GetStackSlotCount());
    881   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
    882   PopulateDeoptimizationData(code);
    883 }
    884 
    885 
    886 void LCodeGen::DeoptimizeBranch(
    887     LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
    888     BranchType branch_type, Register reg, int bit,
    889     Deoptimizer::BailoutType* override_bailout_type) {
    890   LEnvironment* environment = instr->environment();
    891   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
    892   Deoptimizer::BailoutType bailout_type =
    893     info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
    894 
    895   if (override_bailout_type != NULL) {
    896     bailout_type = *override_bailout_type;
    897   }
    898 
    899   DCHECK(environment->HasBeenRegistered());
    900   int id = environment->deoptimization_index();
    901   Address entry =
    902       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
    903 
    904   if (entry == NULL) {
    905     Abort(kBailoutWasNotPrepared);
    906   }
    907 
    908   if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
    909     Label not_zero;
    910     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
    911 
    912     __ Push(x0, x1, x2);
    913     __ Mrs(x2, NZCV);
    914     __ Mov(x0, count);
    915     __ Ldr(w1, MemOperand(x0));
    916     __ Subs(x1, x1, 1);
    917     __ B(gt, &not_zero);
    918     __ Mov(w1, FLAG_deopt_every_n_times);
    919     __ Str(w1, MemOperand(x0));
    920     __ Pop(x2, x1, x0);
    921     DCHECK(frame_is_built_);
    922     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
    923     __ Unreachable();
    924 
    925     __ Bind(&not_zero);
    926     __ Str(w1, MemOperand(x0));
    927     __ Msr(NZCV, x2);
    928     __ Pop(x2, x1, x0);
    929   }
    930 
    931   if (info()->ShouldTrapOnDeopt()) {
    932     Label dont_trap;
    933     __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
    934     __ Debug("trap_on_deopt", __LINE__, BREAK);
    935     __ Bind(&dont_trap);
    936   }
    937 
    938   Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
    939 
    940   DCHECK(info()->IsStub() || frame_is_built_);
    941   // Go through jump table if we need to build frame, or restore caller doubles.
    942   if (branch_type == always &&
    943       frame_is_built_ && !info()->saves_caller_doubles()) {
    944     DeoptComment(deopt_info);
    945     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
    946     info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
    947   } else {
    948     Deoptimizer::JumpTableEntry* table_entry =
    949         new (zone()) Deoptimizer::JumpTableEntry(
    950             entry, deopt_info, bailout_type, !frame_is_built_);
    951     // We often have several deopts to the same entry, reuse the last
    952     // jump entry if this is the case.
    953     if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
    954         jump_table_.is_empty() ||
    955         !table_entry->IsEquivalentTo(*jump_table_.last())) {
    956       jump_table_.Add(table_entry, zone());
    957     }
    958     __ B(&jump_table_.last()->label, branch_type, reg, bit);
    959   }
    960 }
    961 
    962 
    963 void LCodeGen::Deoptimize(LInstruction* instr,
    964                           Deoptimizer::DeoptReason deopt_reason,
    965                           Deoptimizer::BailoutType* override_bailout_type) {
    966   DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1,
    967                    override_bailout_type);
    968 }
    969 
    970 
    971 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
    972                             Deoptimizer::DeoptReason deopt_reason) {
    973   DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond));
    974 }
    975 
    976 
    977 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
    978                                 Deoptimizer::DeoptReason deopt_reason) {
    979   DeoptimizeBranch(instr, deopt_reason, reg_zero, rt);
    980 }
    981 
    982 
    983 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
    984                                    Deoptimizer::DeoptReason deopt_reason) {
    985   DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt);
    986 }
    987 
    988 
    989 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
    990                                     Deoptimizer::DeoptReason deopt_reason) {
    991   int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
    992   DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason);
    993 }
    994 
    995 
    996 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
    997                                Deoptimizer::DeoptReason deopt_reason) {
    998   DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
    999 }
   1000 
   1001 
   1002 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
   1003                                   Deoptimizer::DeoptReason deopt_reason) {
   1004   DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
   1005 }
   1006 
   1007 
   1008 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
   1009                                 LInstruction* instr,
   1010                                 Deoptimizer::DeoptReason deopt_reason) {
   1011   __ CompareRoot(rt, index);
   1012   DeoptimizeIf(eq, instr, deopt_reason);
   1013 }
   1014 
   1015 
   1016 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
   1017                                    LInstruction* instr,
   1018                                    Deoptimizer::DeoptReason deopt_reason) {
   1019   __ CompareRoot(rt, index);
   1020   DeoptimizeIf(ne, instr, deopt_reason);
   1021 }
   1022 
   1023 
   1024 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
   1025                                      Deoptimizer::DeoptReason deopt_reason) {
   1026   __ TestForMinusZero(input);
   1027   DeoptimizeIf(vs, instr, deopt_reason);
   1028 }
   1029 
   1030 
   1031 void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
   1032   __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
   1033   DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
   1034 }
   1035 
   1036 
   1037 void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
   1038                                   Deoptimizer::DeoptReason deopt_reason) {
   1039   DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit);
   1040 }
   1041 
   1042 
   1043 void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
   1044                                     Deoptimizer::DeoptReason deopt_reason) {
   1045   DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit);
   1046 }
   1047 
   1048 
   1049 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
   1050   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
   1051     // Ensure that we have enough space after the previous lazy-bailout
   1052     // instruction for patching the code here.
   1053     intptr_t current_pc = masm()->pc_offset();
   1054 
   1055     if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
   1056       ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
   1057       DCHECK((padding_size % kInstructionSize) == 0);
   1058       InstructionAccurateScope instruction_accurate(
   1059           masm(), padding_size / kInstructionSize);
   1060 
   1061       while (padding_size > 0) {
   1062         __ nop();
   1063         padding_size -= kInstructionSize;
   1064       }
   1065     }
   1066   }
   1067   last_lazy_deopt_pc_ = masm()->pc_offset();
   1068 }
   1069 
   1070 
   1071 Register LCodeGen::ToRegister(LOperand* op) const {
   1072   // TODO(all): support zero register results, as ToRegister32.
   1073   DCHECK((op != NULL) && op->IsRegister());
   1074   return Register::from_code(op->index());
   1075 }
   1076 
   1077 
   1078 Register LCodeGen::ToRegister32(LOperand* op) const {
   1079   DCHECK(op != NULL);
   1080   if (op->IsConstantOperand()) {
   1081     // If this is a constant operand, the result must be the zero register.
   1082     DCHECK(ToInteger32(LConstantOperand::cast(op)) == 0);
   1083     return wzr;
   1084   } else {
   1085     return ToRegister(op).W();
   1086   }
   1087 }
   1088 
   1089 
   1090 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
   1091   HConstant* constant = chunk_->LookupConstant(op);
   1092   return Smi::FromInt(constant->Integer32Value());
   1093 }
   1094 
   1095 
   1096 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
   1097   DCHECK((op != NULL) && op->IsDoubleRegister());
   1098   return DoubleRegister::from_code(op->index());
   1099 }
   1100 
   1101 
   1102 Operand LCodeGen::ToOperand(LOperand* op) {
   1103   DCHECK(op != NULL);
   1104   if (op->IsConstantOperand()) {
   1105     LConstantOperand* const_op = LConstantOperand::cast(op);
   1106     HConstant* constant = chunk()->LookupConstant(const_op);
   1107     Representation r = chunk_->LookupLiteralRepresentation(const_op);
   1108     if (r.IsSmi()) {
   1109       DCHECK(constant->HasSmiValue());
   1110       return Operand(Smi::FromInt(constant->Integer32Value()));
   1111     } else if (r.IsInteger32()) {
   1112       DCHECK(constant->HasInteger32Value());
   1113       return Operand(constant->Integer32Value());
   1114     } else if (r.IsDouble()) {
   1115       Abort(kToOperandUnsupportedDoubleImmediate);
   1116     }
   1117     DCHECK(r.IsTagged());
   1118     return Operand(constant->handle(isolate()));
   1119   } else if (op->IsRegister()) {
   1120     return Operand(ToRegister(op));
   1121   } else if (op->IsDoubleRegister()) {
   1122     Abort(kToOperandIsDoubleRegisterUnimplemented);
   1123     return Operand(0);
   1124   }
   1125   // Stack slots not implemented, use ToMemOperand instead.
   1126   UNREACHABLE();
   1127   return Operand(0);
   1128 }
   1129 
   1130 
   1131 Operand LCodeGen::ToOperand32(LOperand* op) {
   1132   DCHECK(op != NULL);
   1133   if (op->IsRegister()) {
   1134     return Operand(ToRegister32(op));
   1135   } else if (op->IsConstantOperand()) {
   1136     LConstantOperand* const_op = LConstantOperand::cast(op);
   1137     HConstant* constant = chunk()->LookupConstant(const_op);
   1138     Representation r = chunk_->LookupLiteralRepresentation(const_op);
   1139     if (r.IsInteger32()) {
   1140       return Operand(constant->Integer32Value());
   1141     } else {
   1142       // Other constants not implemented.
   1143       Abort(kToOperand32UnsupportedImmediate);
   1144     }
   1145   }
   1146   // Other cases are not implemented.
   1147   UNREACHABLE();
   1148   return Operand(0);
   1149 }
   1150 
   1151 
   1152 static int64_t ArgumentsOffsetWithoutFrame(int index) {
   1153   DCHECK(index < 0);
   1154   return -(index + 1) * kPointerSize;
   1155 }
   1156 
   1157 
   1158 MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
   1159   DCHECK(op != NULL);
   1160   DCHECK(!op->IsRegister());
   1161   DCHECK(!op->IsDoubleRegister());
   1162   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
   1163   if (NeedsEagerFrame()) {
   1164     int fp_offset = StackSlotOffset(op->index());
   1165     // Loads and stores have a bigger reach in positive offset than negative.
   1166     // We try to access using jssp (positive offset) first, then fall back to
   1167     // fp (negative offset) if that fails.
   1168     //
   1169     // We can reference a stack slot from jssp only if we know how much we've
   1170     // put on the stack. We don't know this in the following cases:
   1171     // - stack_mode != kCanUseStackPointer: this is the case when deferred
   1172     //   code has saved the registers.
   1173     // - saves_caller_doubles(): some double registers have been pushed, jssp
   1174     //   references the end of the double registers and not the end of the stack
   1175     //   slots.
   1176     // In both of the cases above, we _could_ add the tracking information
   1177     // required so that we can use jssp here, but in practice it isn't worth it.
   1178     if ((stack_mode == kCanUseStackPointer) &&
   1179         !info()->saves_caller_doubles()) {
   1180       int jssp_offset_to_fp =
   1181           StandardFrameConstants::kFixedFrameSizeFromFp +
   1182           (pushed_arguments_ + GetStackSlotCount()) * kPointerSize;
   1183       int jssp_offset = fp_offset + jssp_offset_to_fp;
   1184       if (masm()->IsImmLSScaled(jssp_offset, LSDoubleWord)) {
   1185         return MemOperand(masm()->StackPointer(), jssp_offset);
   1186       }
   1187     }
   1188     return MemOperand(fp, fp_offset);
   1189   } else {
   1190     // Retrieve parameter without eager stack-frame relative to the
   1191     // stack-pointer.
   1192     return MemOperand(masm()->StackPointer(),
   1193                       ArgumentsOffsetWithoutFrame(op->index()));
   1194   }
   1195 }
   1196 
   1197 
   1198 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
   1199   HConstant* constant = chunk_->LookupConstant(op);
   1200   DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
   1201   return constant->handle(isolate());
   1202 }
   1203 
   1204 
   1205 template <class LI>
   1206 Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info) {
   1207   if (shift_info->shift() == NO_SHIFT) {
   1208     return ToOperand32(right);
   1209   } else {
   1210     return Operand(
   1211         ToRegister32(right),
   1212         shift_info->shift(),
   1213         JSShiftAmountFromLConstant(shift_info->shift_amount()));
   1214   }
   1215 }
   1216 
   1217 
   1218 bool LCodeGen::IsSmi(LConstantOperand* op) const {
   1219   return chunk_->LookupLiteralRepresentation(op).IsSmi();
   1220 }
   1221 
   1222 
   1223 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
   1224   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
   1225 }
   1226 
   1227 
   1228 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
   1229   HConstant* constant = chunk_->LookupConstant(op);
   1230   return constant->Integer32Value();
   1231 }
   1232 
   1233 
   1234 double LCodeGen::ToDouble(LConstantOperand* op) const {
   1235   HConstant* constant = chunk_->LookupConstant(op);
   1236   DCHECK(constant->HasDoubleValue());
   1237   return constant->DoubleValue();
   1238 }
   1239 
   1240 
   1241 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
   1242   Condition cond = nv;
   1243   switch (op) {
   1244     case Token::EQ:
   1245     case Token::EQ_STRICT:
   1246       cond = eq;
   1247       break;
   1248     case Token::NE:
   1249     case Token::NE_STRICT:
   1250       cond = ne;
   1251       break;
   1252     case Token::LT:
   1253       cond = is_unsigned ? lo : lt;
   1254       break;
   1255     case Token::GT:
   1256       cond = is_unsigned ? hi : gt;
   1257       break;
   1258     case Token::LTE:
   1259       cond = is_unsigned ? ls : le;
   1260       break;
   1261     case Token::GTE:
   1262       cond = is_unsigned ? hs : ge;
   1263       break;
   1264     case Token::IN:
   1265     case Token::INSTANCEOF:
   1266     default:
   1267       UNREACHABLE();
   1268   }
   1269   return cond;
   1270 }
   1271 
   1272 
   1273 template<class InstrType>
   1274 void LCodeGen::EmitBranchGeneric(InstrType instr,
   1275                                  const BranchGenerator& branch) {
   1276   int left_block = instr->TrueDestination(chunk_);
   1277   int right_block = instr->FalseDestination(chunk_);
   1278 
   1279   int next_block = GetNextEmittedBlock();
   1280 
   1281   if (right_block == left_block) {
   1282     EmitGoto(left_block);
   1283   } else if (left_block == next_block) {
   1284     branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
   1285   } else {
   1286     branch.Emit(chunk_->GetAssemblyLabel(left_block));
   1287     if (right_block != next_block) {
   1288       __ B(chunk_->GetAssemblyLabel(right_block));
   1289     }
   1290   }
   1291 }
   1292 
   1293 
   1294 template<class InstrType>
   1295 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
   1296   DCHECK((condition != al) && (condition != nv));
   1297   BranchOnCondition branch(this, condition);
   1298   EmitBranchGeneric(instr, branch);
   1299 }
   1300 
   1301 
   1302 template<class InstrType>
   1303 void LCodeGen::EmitCompareAndBranch(InstrType instr,
   1304                                     Condition condition,
   1305                                     const Register& lhs,
   1306                                     const Operand& rhs) {
   1307   DCHECK((condition != al) && (condition != nv));
   1308   CompareAndBranch branch(this, condition, lhs, rhs);
   1309   EmitBranchGeneric(instr, branch);
   1310 }
   1311 
   1312 
   1313 template<class InstrType>
   1314 void LCodeGen::EmitTestAndBranch(InstrType instr,
   1315                                  Condition condition,
   1316                                  const Register& value,
   1317                                  uint64_t mask) {
   1318   DCHECK((condition != al) && (condition != nv));
   1319   TestAndBranch branch(this, condition, value, mask);
   1320   EmitBranchGeneric(instr, branch);
   1321 }
   1322 
   1323 
   1324 template<class InstrType>
   1325 void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
   1326                                          const FPRegister& value,
   1327                                          const FPRegister& scratch) {
   1328   BranchIfNonZeroNumber branch(this, value, scratch);
   1329   EmitBranchGeneric(instr, branch);
   1330 }
   1331 
   1332 
   1333 template<class InstrType>
   1334 void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
   1335                                       const Register& value) {
   1336   BranchIfHeapNumber branch(this, value);
   1337   EmitBranchGeneric(instr, branch);
   1338 }
   1339 
   1340 
   1341 template<class InstrType>
   1342 void LCodeGen::EmitBranchIfRoot(InstrType instr,
   1343                                 const Register& value,
   1344                                 Heap::RootListIndex index) {
   1345   BranchIfRoot branch(this, value, index);
   1346   EmitBranchGeneric(instr, branch);
   1347 }
   1348 
   1349 
   1350 void LCodeGen::DoGap(LGap* gap) {
   1351   for (int i = LGap::FIRST_INNER_POSITION;
   1352        i <= LGap::LAST_INNER_POSITION;
   1353        i++) {
   1354     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
   1355     LParallelMove* move = gap->GetParallelMove(inner_pos);
   1356     if (move != NULL) {
   1357       resolver_.Resolve(move);
   1358     }
   1359   }
   1360 }
   1361 
   1362 
   1363 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
   1364   Register arguments = ToRegister(instr->arguments());
   1365   Register result = ToRegister(instr->result());
   1366 
   1367   // The pointer to the arguments array come from DoArgumentsElements.
   1368   // It does not point directly to the arguments and there is an offest of
   1369   // two words that we must take into account when accessing an argument.
   1370   // Subtracting the index from length accounts for one, so we add one more.
   1371 
   1372   if (instr->length()->IsConstantOperand() &&
   1373       instr->index()->IsConstantOperand()) {
   1374     int index = ToInteger32(LConstantOperand::cast(instr->index()));
   1375     int length = ToInteger32(LConstantOperand::cast(instr->length()));
   1376     int offset = ((length - index) + 1) * kPointerSize;
   1377     __ Ldr(result, MemOperand(arguments, offset));
   1378   } else if (instr->index()->IsConstantOperand()) {
   1379     Register length = ToRegister32(instr->length());
   1380     int index = ToInteger32(LConstantOperand::cast(instr->index()));
   1381     int loc = index - 1;
   1382     if (loc != 0) {
   1383       __ Sub(result.W(), length, loc);
   1384       __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
   1385     } else {
   1386       __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2));
   1387     }
   1388   } else {
   1389     Register length = ToRegister32(instr->length());
   1390     Operand index = ToOperand32(instr->index());
   1391     __ Sub(result.W(), length, index);
   1392     __ Add(result.W(), result.W(), 1);
   1393     __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
   1394   }
   1395 }
   1396 
   1397 
   1398 void LCodeGen::DoAddE(LAddE* instr) {
   1399   Register result = ToRegister(instr->result());
   1400   Register left = ToRegister(instr->left());
   1401   Operand right = Operand(x0);  // Dummy initialization.
   1402   if (instr->hydrogen()->external_add_type() == AddOfExternalAndTagged) {
   1403     right = Operand(ToRegister(instr->right()));
   1404   } else if (instr->right()->IsConstantOperand()) {
   1405     right = ToInteger32(LConstantOperand::cast(instr->right()));
   1406   } else {
   1407     right = Operand(ToRegister32(instr->right()), SXTW);
   1408   }
   1409 
   1410   DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
   1411   __ Add(result, left, right);
   1412 }
   1413 
   1414 
   1415 void LCodeGen::DoAddI(LAddI* instr) {
   1416   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1417   Register result = ToRegister32(instr->result());
   1418   Register left = ToRegister32(instr->left());
   1419   Operand right = ToShiftedRightOperand32(instr->right(), instr);
   1420 
   1421   if (can_overflow) {
   1422     __ Adds(result, left, right);
   1423     DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
   1424   } else {
   1425     __ Add(result, left, right);
   1426   }
   1427 }
   1428 
   1429 
   1430 void LCodeGen::DoAddS(LAddS* instr) {
   1431   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1432   Register result = ToRegister(instr->result());
   1433   Register left = ToRegister(instr->left());
   1434   Operand right = ToOperand(instr->right());
   1435   if (can_overflow) {
   1436     __ Adds(result, left, right);
   1437     DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
   1438   } else {
   1439     __ Add(result, left, right);
   1440   }
   1441 }
   1442 
   1443 
   1444 void LCodeGen::DoAllocate(LAllocate* instr) {
   1445   class DeferredAllocate: public LDeferredCode {
   1446    public:
   1447     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
   1448         : LDeferredCode(codegen), instr_(instr) { }
   1449     virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
   1450     virtual LInstruction* instr() { return instr_; }
   1451    private:
   1452     LAllocate* instr_;
   1453   };
   1454 
   1455   DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
   1456 
   1457   Register result = ToRegister(instr->result());
   1458   Register temp1 = ToRegister(instr->temp1());
   1459   Register temp2 = ToRegister(instr->temp2());
   1460 
   1461   // Allocate memory for the object.
   1462   AllocationFlags flags = TAG_OBJECT;
   1463   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   1464     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   1465   }
   1466 
   1467   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   1468     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   1469     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   1470   }
   1471 
   1472   if (instr->size()->IsConstantOperand()) {
   1473     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   1474     CHECK(size <= Page::kMaxRegularHeapObjectSize);
   1475     __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
   1476   } else {
   1477     Register size = ToRegister32(instr->size());
   1478     __ Sxtw(size.X(), size);
   1479     __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
   1480   }
   1481 
   1482   __ Bind(deferred->exit());
   1483 
   1484   if (instr->hydrogen()->MustPrefillWithFiller()) {
   1485     Register start = temp1;
   1486     Register end = temp2;
   1487     Register filler = ToRegister(instr->temp3());
   1488 
   1489     __ Sub(start, result, kHeapObjectTag);
   1490 
   1491     if (instr->size()->IsConstantOperand()) {
   1492       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   1493       __ Add(end, start, size);
   1494     } else {
   1495       __ Add(end, start, ToRegister(instr->size()));
   1496     }
   1497     __ LoadRoot(filler, Heap::kOnePointerFillerMapRootIndex);
   1498     __ InitializeFieldsWithFiller(start, end, filler);
   1499   } else {
   1500     DCHECK(instr->temp3() == NULL);
   1501   }
   1502 }
   1503 
   1504 
   1505 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
   1506   // TODO(3095996): Get rid of this. For now, we need to make the
   1507   // result register contain a valid pointer because it is already
   1508   // contained in the register pointer map.
   1509   __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
   1510 
   1511   PushSafepointRegistersScope scope(this);
   1512   // We're in a SafepointRegistersScope so we can use any scratch registers.
   1513   Register size = x0;
   1514   if (instr->size()->IsConstantOperand()) {
   1515     __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
   1516   } else {
   1517     __ SmiTag(size, ToRegister32(instr->size()).X());
   1518   }
   1519   int flags = AllocateDoubleAlignFlag::encode(
   1520       instr->hydrogen()->MustAllocateDoubleAligned());
   1521   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   1522     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   1523     flags = AllocateTargetSpace::update(flags, OLD_SPACE);
   1524   } else {
   1525     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
   1526   }
   1527   __ Mov(x10, Smi::FromInt(flags));
   1528   __ Push(size, x10);
   1529 
   1530   CallRuntimeFromDeferred(
   1531       Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   1532   __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
   1533 }
   1534 
   1535 
   1536 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   1537   Register receiver = ToRegister(instr->receiver());
   1538   Register function = ToRegister(instr->function());
   1539   Register length = ToRegister32(instr->length());
   1540 
   1541   Register elements = ToRegister(instr->elements());
   1542   Register scratch = x5;
   1543   DCHECK(receiver.Is(x0));  // Used for parameter count.
   1544   DCHECK(function.Is(x1));  // Required by InvokeFunction.
   1545   DCHECK(ToRegister(instr->result()).Is(x0));
   1546   DCHECK(instr->IsMarkedAsCall());
   1547 
   1548   // Copy the arguments to this function possibly from the
   1549   // adaptor frame below it.
   1550   const uint32_t kArgumentsLimit = 1 * KB;
   1551   __ Cmp(length, kArgumentsLimit);
   1552   DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
   1553 
   1554   // Push the receiver and use the register to keep the original
   1555   // number of arguments.
   1556   __ Push(receiver);
   1557   Register argc = receiver;
   1558   receiver = NoReg;
   1559   __ Sxtw(argc, length);
   1560   // The arguments are at a one pointer size offset from elements.
   1561   __ Add(elements, elements, 1 * kPointerSize);
   1562 
   1563   // Loop through the arguments pushing them onto the execution
   1564   // stack.
   1565   Label invoke, loop;
   1566   // length is a small non-negative integer, due to the test above.
   1567   __ Cbz(length, &invoke);
   1568   __ Bind(&loop);
   1569   __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
   1570   __ Push(scratch);
   1571   __ Subs(length, length, 1);
   1572   __ B(ne, &loop);
   1573 
   1574   __ Bind(&invoke);
   1575   DCHECK(instr->HasPointerMap());
   1576   LPointerMap* pointers = instr->pointer_map();
   1577   SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
   1578   // The number of arguments is stored in argc (receiver) which is x0, as
   1579   // expected by InvokeFunction.
   1580   ParameterCount actual(argc);
   1581   __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
   1582                     safepoint_generator);
   1583 }
   1584 
   1585 
   1586 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   1587   Register result = ToRegister(instr->result());
   1588 
   1589   if (instr->hydrogen()->from_inlined()) {
   1590     // When we are inside an inlined function, the arguments are the last things
   1591     // that have been pushed on the stack. Therefore the arguments array can be
   1592     // accessed directly from jssp.
   1593     // However in the normal case, it is accessed via fp but there are two words
   1594     // on the stack between fp and the arguments (the saved lr and fp) and the
   1595     // LAccessArgumentsAt implementation take that into account.
   1596     // In the inlined case we need to subtract the size of 2 words to jssp to
   1597     // get a pointer which will work well with LAccessArgumentsAt.
   1598     DCHECK(masm()->StackPointer().Is(jssp));
   1599     __ Sub(result, jssp, 2 * kPointerSize);
   1600   } else {
   1601     DCHECK(instr->temp() != NULL);
   1602     Register previous_fp = ToRegister(instr->temp());
   1603 
   1604     __ Ldr(previous_fp,
   1605            MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   1606     __ Ldr(result,
   1607            MemOperand(previous_fp, StandardFrameConstants::kContextOffset));
   1608     __ Cmp(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   1609     __ Csel(result, fp, previous_fp, ne);
   1610   }
   1611 }
   1612 
   1613 
   1614 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
   1615   Register elements = ToRegister(instr->elements());
   1616   Register result = ToRegister32(instr->result());
   1617   Label done;
   1618 
   1619   // If no arguments adaptor frame the number of arguments is fixed.
   1620   __ Cmp(fp, elements);
   1621   __ Mov(result, scope()->num_parameters());
   1622   __ B(eq, &done);
   1623 
   1624   // Arguments adaptor frame present. Get argument length from there.
   1625   __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   1626   __ Ldr(result,
   1627          UntagSmiMemOperand(result.X(),
   1628                             ArgumentsAdaptorFrameConstants::kLengthOffset));
   1629 
   1630   // Argument length is in result register.
   1631   __ Bind(&done);
   1632 }
   1633 
   1634 
   1635 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
   1636   DoubleRegister left = ToDoubleRegister(instr->left());
   1637   DoubleRegister right = ToDoubleRegister(instr->right());
   1638   DoubleRegister result = ToDoubleRegister(instr->result());
   1639 
   1640   switch (instr->op()) {
   1641     case Token::ADD: __ Fadd(result, left, right); break;
   1642     case Token::SUB: __ Fsub(result, left, right); break;
   1643     case Token::MUL: __ Fmul(result, left, right); break;
   1644     case Token::DIV: __ Fdiv(result, left, right); break;
   1645     case Token::MOD: {
   1646       // The ECMA-262 remainder operator is the remainder from a truncating
   1647       // (round-towards-zero) division. Note that this differs from IEEE-754.
   1648       //
   1649       // TODO(jbramley): See if it's possible to do this inline, rather than by
   1650       // calling a helper function. With frintz (to produce the intermediate
   1651       // quotient) and fmsub (to calculate the remainder without loss of
   1652       // precision), it should be possible. However, we would need support for
   1653       // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
   1654       // support that yet.
   1655       DCHECK(left.Is(d0));
   1656       DCHECK(right.Is(d1));
   1657       __ CallCFunction(
   1658           ExternalReference::mod_two_doubles_operation(isolate()),
   1659           0, 2);
   1660       DCHECK(result.Is(d0));
   1661       break;
   1662     }
   1663     default:
   1664       UNREACHABLE();
   1665       break;
   1666   }
   1667 }
   1668 
   1669 
   1670 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
   1671   DCHECK(ToRegister(instr->context()).is(cp));
   1672   DCHECK(ToRegister(instr->left()).is(x1));
   1673   DCHECK(ToRegister(instr->right()).is(x0));
   1674   DCHECK(ToRegister(instr->result()).is(x0));
   1675 
   1676   Handle<Code> code =
   1677       CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
   1678   CallCode(code, RelocInfo::CODE_TARGET, instr);
   1679 }
   1680 
   1681 
   1682 void LCodeGen::DoBitI(LBitI* instr) {
   1683   Register result = ToRegister32(instr->result());
   1684   Register left = ToRegister32(instr->left());
   1685   Operand right = ToShiftedRightOperand32(instr->right(), instr);
   1686 
   1687   switch (instr->op()) {
   1688     case Token::BIT_AND: __ And(result, left, right); break;
   1689     case Token::BIT_OR:  __ Orr(result, left, right); break;
   1690     case Token::BIT_XOR: __ Eor(result, left, right); break;
   1691     default:
   1692       UNREACHABLE();
   1693       break;
   1694   }
   1695 }
   1696 
   1697 
   1698 void LCodeGen::DoBitS(LBitS* instr) {
   1699   Register result = ToRegister(instr->result());
   1700   Register left = ToRegister(instr->left());
   1701   Operand right = ToOperand(instr->right());
   1702 
   1703   switch (instr->op()) {
   1704     case Token::BIT_AND: __ And(result, left, right); break;
   1705     case Token::BIT_OR:  __ Orr(result, left, right); break;
   1706     case Token::BIT_XOR: __ Eor(result, left, right); break;
   1707     default:
   1708       UNREACHABLE();
   1709       break;
   1710   }
   1711 }
   1712 
   1713 
   1714 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
   1715   Condition cond = instr->hydrogen()->allow_equality() ? hi : hs;
   1716   DCHECK(instr->hydrogen()->index()->representation().IsInteger32());
   1717   DCHECK(instr->hydrogen()->length()->representation().IsInteger32());
   1718   if (instr->index()->IsConstantOperand()) {
   1719     Operand index = ToOperand32(instr->index());
   1720     Register length = ToRegister32(instr->length());
   1721     __ Cmp(length, index);
   1722     cond = CommuteCondition(cond);
   1723   } else {
   1724     Register index = ToRegister32(instr->index());
   1725     Operand length = ToOperand32(instr->length());
   1726     __ Cmp(index, length);
   1727   }
   1728   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
   1729     __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
   1730   } else {
   1731     DeoptimizeIf(cond, instr, Deoptimizer::kOutOfBounds);
   1732   }
   1733 }
   1734 
   1735 
   1736 void LCodeGen::DoBranch(LBranch* instr) {
   1737   Representation r = instr->hydrogen()->value()->representation();
   1738   Label* true_label = instr->TrueLabel(chunk_);
   1739   Label* false_label = instr->FalseLabel(chunk_);
   1740 
   1741   if (r.IsInteger32()) {
   1742     DCHECK(!info()->IsStub());
   1743     EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
   1744   } else if (r.IsSmi()) {
   1745     DCHECK(!info()->IsStub());
   1746     STATIC_ASSERT(kSmiTag == 0);
   1747     EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
   1748   } else if (r.IsDouble()) {
   1749     DoubleRegister value = ToDoubleRegister(instr->value());
   1750     // Test the double value. Zero and NaN are false.
   1751     EmitBranchIfNonZeroNumber(instr, value, double_scratch());
   1752   } else {
   1753     DCHECK(r.IsTagged());
   1754     Register value = ToRegister(instr->value());
   1755     HType type = instr->hydrogen()->value()->type();
   1756 
   1757     if (type.IsBoolean()) {
   1758       DCHECK(!info()->IsStub());
   1759       __ CompareRoot(value, Heap::kTrueValueRootIndex);
   1760       EmitBranch(instr, eq);
   1761     } else if (type.IsSmi()) {
   1762       DCHECK(!info()->IsStub());
   1763       EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
   1764     } else if (type.IsJSArray()) {
   1765       DCHECK(!info()->IsStub());
   1766       EmitGoto(instr->TrueDestination(chunk()));
   1767     } else if (type.IsHeapNumber()) {
   1768       DCHECK(!info()->IsStub());
   1769       __ Ldr(double_scratch(), FieldMemOperand(value,
   1770                                                HeapNumber::kValueOffset));
   1771       // Test the double value. Zero and NaN are false.
   1772       EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
   1773     } else if (type.IsString()) {
   1774       DCHECK(!info()->IsStub());
   1775       Register temp = ToRegister(instr->temp1());
   1776       __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
   1777       EmitCompareAndBranch(instr, ne, temp, 0);
   1778     } else {
   1779       ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
   1780       // Avoid deopts in the case where we've never executed this path before.
   1781       if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
   1782 
   1783       if (expected.Contains(ToBooleanStub::UNDEFINED)) {
   1784         // undefined -> false.
   1785         __ JumpIfRoot(
   1786             value, Heap::kUndefinedValueRootIndex, false_label);
   1787       }
   1788 
   1789       if (expected.Contains(ToBooleanStub::BOOLEAN)) {
   1790         // Boolean -> its value.
   1791         __ JumpIfRoot(
   1792             value, Heap::kTrueValueRootIndex, true_label);
   1793         __ JumpIfRoot(
   1794             value, Heap::kFalseValueRootIndex, false_label);
   1795       }
   1796 
   1797       if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
   1798         // 'null' -> false.
   1799         __ JumpIfRoot(
   1800             value, Heap::kNullValueRootIndex, false_label);
   1801       }
   1802 
   1803       if (expected.Contains(ToBooleanStub::SMI)) {
   1804         // Smis: 0 -> false, all other -> true.
   1805         DCHECK(Smi::FromInt(0) == 0);
   1806         __ Cbz(value, false_label);
   1807         __ JumpIfSmi(value, true_label);
   1808       } else if (expected.NeedsMap()) {
   1809         // If we need a map later and have a smi, deopt.
   1810         DeoptimizeIfSmi(value, instr, Deoptimizer::kSmi);
   1811       }
   1812 
   1813       Register map = NoReg;
   1814       Register scratch = NoReg;
   1815 
   1816       if (expected.NeedsMap()) {
   1817         DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
   1818         map = ToRegister(instr->temp1());
   1819         scratch = ToRegister(instr->temp2());
   1820 
   1821         __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
   1822 
   1823         if (expected.CanBeUndetectable()) {
   1824           // Undetectable -> false.
   1825           __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
   1826           __ TestAndBranchIfAnySet(
   1827               scratch, 1 << Map::kIsUndetectable, false_label);
   1828         }
   1829       }
   1830 
   1831       if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
   1832         // spec object -> true.
   1833         __ CompareInstanceType(map, scratch, FIRST_JS_RECEIVER_TYPE);
   1834         __ B(ge, true_label);
   1835       }
   1836 
   1837       if (expected.Contains(ToBooleanStub::STRING)) {
   1838         // String value -> false iff empty.
   1839         Label not_string;
   1840         __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
   1841         __ B(ge, &not_string);
   1842         __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
   1843         __ Cbz(scratch, false_label);
   1844         __ B(true_label);
   1845         __ Bind(&not_string);
   1846       }
   1847 
   1848       if (expected.Contains(ToBooleanStub::SYMBOL)) {
   1849         // Symbol value -> true.
   1850         __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
   1851         __ B(eq, true_label);
   1852       }
   1853 
   1854       if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
   1855         // SIMD value -> true.
   1856         __ CompareInstanceType(map, scratch, SIMD128_VALUE_TYPE);
   1857         __ B(eq, true_label);
   1858       }
   1859 
   1860       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
   1861         Label not_heap_number;
   1862         __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
   1863 
   1864         __ Ldr(double_scratch(),
   1865                FieldMemOperand(value, HeapNumber::kValueOffset));
   1866         __ Fcmp(double_scratch(), 0.0);
   1867         // If we got a NaN (overflow bit is set), jump to the false branch.
   1868         __ B(vs, false_label);
   1869         __ B(eq, false_label);
   1870         __ B(true_label);
   1871         __ Bind(&not_heap_number);
   1872       }
   1873 
   1874       if (!expected.IsGeneric()) {
   1875         // We've seen something for the first time -> deopt.
   1876         // This can only happen if we are not generic already.
   1877         Deoptimize(instr, Deoptimizer::kUnexpectedObject);
   1878       }
   1879     }
   1880   }
   1881 }
   1882 
   1883 
   1884 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   1885                                  int formal_parameter_count, int arity,
   1886                                  LInstruction* instr) {
   1887   bool dont_adapt_arguments =
   1888       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   1889   bool can_invoke_directly =
   1890       dont_adapt_arguments || formal_parameter_count == arity;
   1891 
   1892   // The function interface relies on the following register assignments.
   1893   Register function_reg = x1;
   1894   Register arity_reg = x0;
   1895 
   1896   LPointerMap* pointers = instr->pointer_map();
   1897 
   1898   if (FLAG_debug_code) {
   1899     Label is_not_smi;
   1900     // Try to confirm that function_reg (x1) is a tagged pointer.
   1901     __ JumpIfNotSmi(function_reg, &is_not_smi);
   1902     __ Abort(kExpectedFunctionObject);
   1903     __ Bind(&is_not_smi);
   1904   }
   1905 
   1906   if (can_invoke_directly) {
   1907     // Change context.
   1908     __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
   1909 
   1910     // Always initialize new target and number of actual arguments.
   1911     __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
   1912     __ Mov(arity_reg, arity);
   1913 
   1914     // Invoke function.
   1915     __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
   1916     __ Call(x10);
   1917 
   1918     // Set up deoptimization.
   1919     RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   1920   } else {
   1921     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   1922     ParameterCount count(arity);
   1923     ParameterCount expected(formal_parameter_count);
   1924     __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
   1925   }
   1926 }
   1927 
   1928 
   1929 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   1930   DCHECK(instr->IsMarkedAsCall());
   1931   DCHECK(ToRegister(instr->result()).Is(x0));
   1932 
   1933   if (instr->hydrogen()->IsTailCall()) {
   1934     if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
   1935 
   1936     if (instr->target()->IsConstantOperand()) {
   1937       LConstantOperand* target = LConstantOperand::cast(instr->target());
   1938       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   1939       // TODO(all): on ARM we use a call descriptor to specify a storage mode
   1940       // but on ARM64 we only have one storage mode so it isn't necessary. Check
   1941       // this understanding is correct.
   1942       __ Jump(code, RelocInfo::CODE_TARGET);
   1943     } else {
   1944       DCHECK(instr->target()->IsRegister());
   1945       Register target = ToRegister(instr->target());
   1946       __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
   1947       __ Br(target);
   1948     }
   1949   } else {
   1950     LPointerMap* pointers = instr->pointer_map();
   1951     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   1952 
   1953     if (instr->target()->IsConstantOperand()) {
   1954       LConstantOperand* target = LConstantOperand::cast(instr->target());
   1955       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   1956       generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
   1957       // TODO(all): on ARM we use a call descriptor to specify a storage mode
   1958       // but on ARM64 we only have one storage mode so it isn't necessary. Check
   1959       // this understanding is correct.
   1960       __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
   1961     } else {
   1962       DCHECK(instr->target()->IsRegister());
   1963       Register target = ToRegister(instr->target());
   1964       generator.BeforeCall(__ CallSize(target));
   1965       __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
   1966       __ Call(target);
   1967     }
   1968     generator.AfterCall();
   1969   }
   1970 
   1971   RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
   1972 }
   1973 
   1974 
   1975 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
   1976   DCHECK(instr->IsMarkedAsCall());
   1977   DCHECK(ToRegister(instr->function()).is(x1));
   1978 
   1979   // Change context.
   1980   __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
   1981 
   1982   // Always initialize new target and number of actual arguments.
   1983   __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
   1984   __ Mov(x0, instr->arity());
   1985 
   1986   // Load the code entry address
   1987   __ Ldr(x10, FieldMemOperand(x1, JSFunction::kCodeEntryOffset));
   1988   __ Call(x10);
   1989 
   1990   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   1991   RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
   1992 }
   1993 
   1994 
   1995 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
   1996   CallRuntime(instr->function(), instr->arity(), instr);
   1997   RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
   1998 }
   1999 
   2000 
   2001 void LCodeGen::DoCallStub(LCallStub* instr) {
   2002   DCHECK(ToRegister(instr->context()).is(cp));
   2003   DCHECK(ToRegister(instr->result()).is(x0));
   2004   switch (instr->hydrogen()->major_key()) {
   2005     case CodeStub::RegExpExec: {
   2006       RegExpExecStub stub(isolate());
   2007       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   2008       break;
   2009     }
   2010     case CodeStub::SubString: {
   2011       SubStringStub stub(isolate());
   2012       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   2013       break;
   2014     }
   2015     default:
   2016       UNREACHABLE();
   2017   }
   2018   RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
   2019 }
   2020 
   2021 
   2022 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
   2023   GenerateOsrPrologue();
   2024 }
   2025 
   2026 
   2027 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   2028   Register temp = ToRegister(instr->temp());
   2029   {
   2030     PushSafepointRegistersScope scope(this);
   2031     __ Push(object);
   2032     __ Mov(cp, 0);
   2033     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
   2034     RecordSafepointWithRegisters(
   2035         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
   2036     __ StoreToSafepointRegisterSlot(x0, temp);
   2037   }
   2038   DeoptimizeIfSmi(temp, instr, Deoptimizer::kInstanceMigrationFailed);
   2039 }
   2040 
   2041 
   2042 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
   2043   class DeferredCheckMaps: public LDeferredCode {
   2044    public:
   2045     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
   2046         : LDeferredCode(codegen), instr_(instr), object_(object) {
   2047       SetExit(check_maps());
   2048     }
   2049     virtual void Generate() {
   2050       codegen()->DoDeferredInstanceMigration(instr_, object_);
   2051     }
   2052     Label* check_maps() { return &check_maps_; }
   2053     virtual LInstruction* instr() { return instr_; }
   2054    private:
   2055     LCheckMaps* instr_;
   2056     Label check_maps_;
   2057     Register object_;
   2058   };
   2059 
   2060   if (instr->hydrogen()->IsStabilityCheck()) {
   2061     const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   2062     for (int i = 0; i < maps->size(); ++i) {
   2063       AddStabilityDependency(maps->at(i).handle());
   2064     }
   2065     return;
   2066   }
   2067 
   2068   Register object = ToRegister(instr->value());
   2069   Register map_reg = ToRegister(instr->temp());
   2070 
   2071   __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
   2072 
   2073   DeferredCheckMaps* deferred = NULL;
   2074   if (instr->hydrogen()->HasMigrationTarget()) {
   2075     deferred = new(zone()) DeferredCheckMaps(this, instr, object);
   2076     __ Bind(deferred->check_maps());
   2077   }
   2078 
   2079   const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   2080   Label success;
   2081   for (int i = 0; i < maps->size() - 1; i++) {
   2082     Handle<Map> map = maps->at(i).handle();
   2083     __ CompareMap(map_reg, map);
   2084     __ B(eq, &success);
   2085   }
   2086   Handle<Map> map = maps->at(maps->size() - 1).handle();
   2087   __ CompareMap(map_reg, map);
   2088 
   2089   // We didn't match a map.
   2090   if (instr->hydrogen()->HasMigrationTarget()) {
   2091     __ B(ne, deferred->entry());
   2092   } else {
   2093     DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
   2094   }
   2095 
   2096   __ Bind(&success);
   2097 }
   2098 
   2099 
   2100 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
   2101   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2102     DeoptimizeIfSmi(ToRegister(instr->value()), instr, Deoptimizer::kSmi);
   2103   }
   2104 }
   2105 
   2106 
   2107 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   2108   Register value = ToRegister(instr->value());
   2109   DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
   2110   DeoptimizeIfNotSmi(value, instr, Deoptimizer::kNotASmi);
   2111 }
   2112 
   2113 
   2114 void LCodeGen::DoCheckArrayBufferNotNeutered(
   2115     LCheckArrayBufferNotNeutered* instr) {
   2116   UseScratchRegisterScope temps(masm());
   2117   Register view = ToRegister(instr->view());
   2118   Register scratch = temps.AcquireX();
   2119 
   2120   __ Ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
   2121   __ Ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
   2122   __ Tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
   2123   DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds);
   2124 }
   2125 
   2126 
   2127 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   2128   Register input = ToRegister(instr->value());
   2129   Register scratch = ToRegister(instr->temp());
   2130 
   2131   __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   2132   __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   2133 
   2134   if (instr->hydrogen()->is_interval_check()) {
   2135     InstanceType first, last;
   2136     instr->hydrogen()->GetCheckInterval(&first, &last);
   2137 
   2138     __ Cmp(scratch, first);
   2139     if (first == last) {
   2140       // If there is only one type in the interval check for equality.
   2141       DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
   2142     } else if (last == LAST_TYPE) {
   2143       // We don't need to compare with the higher bound of the interval.
   2144       DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
   2145     } else {
   2146       // If we are below the lower bound, set the C flag and clear the Z flag
   2147       // to force a deopt.
   2148       __ Ccmp(scratch, last, CFlag, hs);
   2149       DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
   2150     }
   2151   } else {
   2152     uint8_t mask;
   2153     uint8_t tag;
   2154     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
   2155 
   2156     if (base::bits::IsPowerOfTwo32(mask)) {
   2157       DCHECK((tag == 0) || (tag == mask));
   2158       if (tag == 0) {
   2159         DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
   2160                            Deoptimizer::kWrongInstanceType);
   2161       } else {
   2162         DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
   2163                              Deoptimizer::kWrongInstanceType);
   2164       }
   2165     } else {
   2166       if (tag == 0) {
   2167         __ Tst(scratch, mask);
   2168       } else {
   2169         __ And(scratch, scratch, mask);
   2170         __ Cmp(scratch, tag);
   2171       }
   2172       DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
   2173     }
   2174   }
   2175 }
   2176 
   2177 
   2178 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
   2179   DoubleRegister input = ToDoubleRegister(instr->unclamped());
   2180   Register result = ToRegister32(instr->result());
   2181   __ ClampDoubleToUint8(result, input, double_scratch());
   2182 }
   2183 
   2184 
   2185 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
   2186   Register input = ToRegister32(instr->unclamped());
   2187   Register result = ToRegister32(instr->result());
   2188   __ ClampInt32ToUint8(result, input);
   2189 }
   2190 
   2191 
   2192 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
   2193   Register input = ToRegister(instr->unclamped());
   2194   Register result = ToRegister32(instr->result());
   2195   Label done;
   2196 
   2197   // Both smi and heap number cases are handled.
   2198   Label is_not_smi;
   2199   __ JumpIfNotSmi(input, &is_not_smi);
   2200   __ SmiUntag(result.X(), input);
   2201   __ ClampInt32ToUint8(result);
   2202   __ B(&done);
   2203 
   2204   __ Bind(&is_not_smi);
   2205 
   2206   // Check for heap number.
   2207   Label is_heap_number;
   2208   __ JumpIfHeapNumber(input, &is_heap_number);
   2209 
   2210   // Check for undefined. Undefined is coverted to zero for clamping conversion.
   2211   DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
   2212                       Deoptimizer::kNotAHeapNumberUndefined);
   2213   __ Mov(result, 0);
   2214   __ B(&done);
   2215 
   2216   // Heap number case.
   2217   __ Bind(&is_heap_number);
   2218   DoubleRegister dbl_scratch = double_scratch();
   2219   DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1());
   2220   __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
   2221   __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
   2222 
   2223   __ Bind(&done);
   2224 }
   2225 
   2226 
   2227 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
   2228   DoubleRegister value_reg = ToDoubleRegister(instr->value());
   2229   Register result_reg = ToRegister(instr->result());
   2230   if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
   2231     __ Fmov(result_reg, value_reg);
   2232     __ Lsr(result_reg, result_reg, 32);
   2233   } else {
   2234     __ Fmov(result_reg.W(), value_reg.S());
   2235   }
   2236 }
   2237 
   2238 
   2239 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
   2240   Register hi_reg = ToRegister(instr->hi());
   2241   Register lo_reg = ToRegister(instr->lo());
   2242   DoubleRegister result_reg = ToDoubleRegister(instr->result());
   2243 
   2244   // Insert the least significant 32 bits of hi_reg into the most significant
   2245   // 32 bits of lo_reg, and move to a floating point register.
   2246   __ Bfi(lo_reg, hi_reg, 32, 32);
   2247   __ Fmov(result_reg, lo_reg);
   2248 }
   2249 
   2250 
   2251 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   2252   Handle<String> class_name = instr->hydrogen()->class_name();
   2253   Label* true_label = instr->TrueLabel(chunk_);
   2254   Label* false_label = instr->FalseLabel(chunk_);
   2255   Register input = ToRegister(instr->value());
   2256   Register scratch1 = ToRegister(instr->temp1());
   2257   Register scratch2 = ToRegister(instr->temp2());
   2258 
   2259   __ JumpIfSmi(input, false_label);
   2260 
   2261   Register map = scratch2;
   2262   __ CompareObjectType(input, map, scratch1, JS_FUNCTION_TYPE);
   2263   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
   2264     __ B(eq, true_label);
   2265   } else {
   2266     __ B(eq, false_label);
   2267   }
   2268 
   2269   // Check if the constructor in the map is a function.
   2270   {
   2271     UseScratchRegisterScope temps(masm());
   2272     Register instance_type = temps.AcquireX();
   2273     __ GetMapConstructor(scratch1, map, scratch2, instance_type);
   2274     __ Cmp(instance_type, JS_FUNCTION_TYPE);
   2275   }
   2276   // Objects with a non-function constructor have class 'Object'.
   2277   if (String::Equals(class_name, isolate()->factory()->Object_string())) {
   2278     __ B(ne, true_label);
   2279   } else {
   2280     __ B(ne, false_label);
   2281   }
   2282 
   2283   // The constructor function is in scratch1. Get its instance class name.
   2284   __ Ldr(scratch1,
   2285          FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
   2286   __ Ldr(scratch1,
   2287          FieldMemOperand(scratch1,
   2288                          SharedFunctionInfo::kInstanceClassNameOffset));
   2289 
   2290   // The class name we are testing against is internalized since it's a literal.
   2291   // The name in the constructor is internalized because of the way the context
   2292   // is booted. This routine isn't expected to work for random API-created
   2293   // classes and it doesn't have to because you can't access it with natives
   2294   // syntax. Since both sides are internalized it is sufficient to use an
   2295   // identity comparison.
   2296   EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
   2297 }
   2298 
   2299 
   2300 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
   2301   DCHECK(instr->hydrogen()->representation().IsDouble());
   2302   FPRegister object = ToDoubleRegister(instr->object());
   2303   Register temp = ToRegister(instr->temp());
   2304 
   2305   // If we don't have a NaN, we don't have the hole, so branch now to avoid the
   2306   // (relatively expensive) hole-NaN check.
   2307   __ Fcmp(object, object);
   2308   __ B(vc, instr->FalseLabel(chunk_));
   2309 
   2310   // We have a NaN, but is it the hole?
   2311   __ Fmov(temp, object);
   2312   EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
   2313 }
   2314 
   2315 
   2316 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
   2317   DCHECK(instr->hydrogen()->representation().IsTagged());
   2318   Register object = ToRegister(instr->object());
   2319 
   2320   EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
   2321 }
   2322 
   2323 
   2324 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   2325   Register value = ToRegister(instr->value());
   2326   Register map = ToRegister(instr->temp());
   2327 
   2328   __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
   2329   EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
   2330 }
   2331 
   2332 
   2333 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
   2334   Representation rep = instr->hydrogen()->value()->representation();
   2335   DCHECK(!rep.IsInteger32());
   2336   Register scratch = ToRegister(instr->temp());
   2337 
   2338   if (rep.IsDouble()) {
   2339     __ JumpIfMinusZero(ToDoubleRegister(instr->value()),
   2340                        instr->TrueLabel(chunk()));
   2341   } else {
   2342     Register value = ToRegister(instr->value());
   2343     __ JumpIfNotHeapNumber(value, instr->FalseLabel(chunk()), DO_SMI_CHECK);
   2344     __ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
   2345     __ JumpIfMinusZero(scratch, instr->TrueLabel(chunk()));
   2346   }
   2347   EmitGoto(instr->FalseDestination(chunk()));
   2348 }
   2349 
   2350 
   2351 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
   2352   LOperand* left = instr->left();
   2353   LOperand* right = instr->right();
   2354   bool is_unsigned =
   2355       instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
   2356       instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
   2357   Condition cond = TokenToCondition(instr->op(), is_unsigned);
   2358 
   2359   if (left->IsConstantOperand() && right->IsConstantOperand()) {
   2360     // We can statically evaluate the comparison.
   2361     double left_val = ToDouble(LConstantOperand::cast(left));
   2362     double right_val = ToDouble(LConstantOperand::cast(right));
   2363     int next_block = EvalComparison(instr->op(), left_val, right_val) ?
   2364         instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
   2365     EmitGoto(next_block);
   2366   } else {
   2367     if (instr->is_double()) {
   2368       __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
   2369 
   2370       // If a NaN is involved, i.e. the result is unordered (V set),
   2371       // jump to false block label.
   2372       __ B(vs, instr->FalseLabel(chunk_));
   2373       EmitBranch(instr, cond);
   2374     } else {
   2375       if (instr->hydrogen_value()->representation().IsInteger32()) {
   2376         if (right->IsConstantOperand()) {
   2377           EmitCompareAndBranch(instr, cond, ToRegister32(left),
   2378                                ToOperand32(right));
   2379         } else {
   2380           // Commute the operands and the condition.
   2381           EmitCompareAndBranch(instr, CommuteCondition(cond),
   2382                                ToRegister32(right), ToOperand32(left));
   2383         }
   2384       } else {
   2385         DCHECK(instr->hydrogen_value()->representation().IsSmi());
   2386         if (right->IsConstantOperand()) {
   2387           int32_t value = ToInteger32(LConstantOperand::cast(right));
   2388           EmitCompareAndBranch(instr,
   2389                                cond,
   2390                                ToRegister(left),
   2391                                Operand(Smi::FromInt(value)));
   2392         } else if (left->IsConstantOperand()) {
   2393           // Commute the operands and the condition.
   2394           int32_t value = ToInteger32(LConstantOperand::cast(left));
   2395           EmitCompareAndBranch(instr,
   2396                                CommuteCondition(cond),
   2397                                ToRegister(right),
   2398                                Operand(Smi::FromInt(value)));
   2399         } else {
   2400           EmitCompareAndBranch(instr,
   2401                                cond,
   2402                                ToRegister(left),
   2403                                ToRegister(right));
   2404         }
   2405       }
   2406     }
   2407   }
   2408 }
   2409 
   2410 
   2411 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   2412   Register left = ToRegister(instr->left());
   2413   Register right = ToRegister(instr->right());
   2414   EmitCompareAndBranch(instr, eq, left, right);
   2415 }
   2416 
   2417 
   2418 void LCodeGen::DoCmpT(LCmpT* instr) {
   2419   DCHECK(ToRegister(instr->context()).is(cp));
   2420   Token::Value op = instr->op();
   2421   Condition cond = TokenToCondition(op, false);
   2422 
   2423   DCHECK(ToRegister(instr->left()).Is(x1));
   2424   DCHECK(ToRegister(instr->right()).Is(x0));
   2425   Handle<Code> ic =
   2426       CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
   2427   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2428   // Signal that we don't inline smi code before this stub.
   2429   InlineSmiCheckInfo::EmitNotInlined(masm());
   2430 
   2431   // Return true or false depending on CompareIC result.
   2432   // This instruction is marked as call. We can clobber any register.
   2433   DCHECK(instr->IsMarkedAsCall());
   2434   __ LoadTrueFalseRoots(x1, x2);
   2435   __ Cmp(x0, 0);
   2436   __ Csel(ToRegister(instr->result()), x1, x2, cond);
   2437 }
   2438 
   2439 
   2440 void LCodeGen::DoConstantD(LConstantD* instr) {
   2441   DCHECK(instr->result()->IsDoubleRegister());
   2442   DoubleRegister result = ToDoubleRegister(instr->result());
   2443   if (instr->value() == 0) {
   2444     if (copysign(1.0, instr->value()) == 1.0) {
   2445       __ Fmov(result, fp_zero);
   2446     } else {
   2447       __ Fneg(result, fp_zero);
   2448     }
   2449   } else {
   2450     __ Fmov(result, instr->value());
   2451   }
   2452 }
   2453 
   2454 
   2455 void LCodeGen::DoConstantE(LConstantE* instr) {
   2456   __ Mov(ToRegister(instr->result()), Operand(instr->value()));
   2457 }
   2458 
   2459 
   2460 void LCodeGen::DoConstantI(LConstantI* instr) {
   2461   DCHECK(is_int32(instr->value()));
   2462   // Cast the value here to ensure that the value isn't sign extended by the
   2463   // implicit Operand constructor.
   2464   __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
   2465 }
   2466 
   2467 
   2468 void LCodeGen::DoConstantS(LConstantS* instr) {
   2469   __ Mov(ToRegister(instr->result()), Operand(instr->value()));
   2470 }
   2471 
   2472 
   2473 void LCodeGen::DoConstantT(LConstantT* instr) {
   2474   Handle<Object> object = instr->value(isolate());
   2475   AllowDeferredHandleDereference smi_check;
   2476   __ LoadObject(ToRegister(instr->result()), object);
   2477 }
   2478 
   2479 
   2480 void LCodeGen::DoContext(LContext* instr) {
   2481   // If there is a non-return use, the context must be moved to a register.
   2482   Register result = ToRegister(instr->result());
   2483   if (info()->IsOptimizing()) {
   2484     __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
   2485   } else {
   2486     // If there is no frame, the context must be in cp.
   2487     DCHECK(result.is(cp));
   2488   }
   2489 }
   2490 
   2491 
   2492 void LCodeGen::DoCheckValue(LCheckValue* instr) {
   2493   Register reg = ToRegister(instr->value());
   2494   Handle<HeapObject> object = instr->hydrogen()->object().handle();
   2495   AllowDeferredHandleDereference smi_check;
   2496   if (isolate()->heap()->InNewSpace(*object)) {
   2497     UseScratchRegisterScope temps(masm());
   2498     Register temp = temps.AcquireX();
   2499     Handle<Cell> cell = isolate()->factory()->NewCell(object);
   2500     __ Mov(temp, Operand(cell));
   2501     __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
   2502     __ Cmp(reg, temp);
   2503   } else {
   2504     __ Cmp(reg, Operand(object));
   2505   }
   2506   DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
   2507 }
   2508 
   2509 
   2510 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   2511   last_lazy_deopt_pc_ = masm()->pc_offset();
   2512   DCHECK(instr->HasEnvironment());
   2513   LEnvironment* env = instr->environment();
   2514   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   2515   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   2516 }
   2517 
   2518 
   2519 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
   2520   Deoptimizer::BailoutType type = instr->hydrogen()->type();
   2521   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
   2522   // needed return address), even though the implementation of LAZY and EAGER is
   2523   // now identical. When LAZY is eventually completely folded into EAGER, remove
   2524   // the special case below.
   2525   if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
   2526     type = Deoptimizer::LAZY;
   2527   }
   2528 
   2529   Deoptimize(instr, instr->hydrogen()->reason(), &type);
   2530 }
   2531 
   2532 
   2533 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
   2534   Register dividend = ToRegister32(instr->dividend());
   2535   int32_t divisor = instr->divisor();
   2536   Register result = ToRegister32(instr->result());
   2537   DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
   2538   DCHECK(!result.is(dividend));
   2539 
   2540   // Check for (0 / -x) that will produce negative zero.
   2541   HDiv* hdiv = instr->hydrogen();
   2542   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   2543     DeoptimizeIfZero(dividend, instr, Deoptimizer::kDivisionByZero);
   2544   }
   2545   // Check for (kMinInt / -1).
   2546   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
   2547     // Test dividend for kMinInt by subtracting one (cmp) and checking for
   2548     // overflow.
   2549     __ Cmp(dividend, 1);
   2550     DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
   2551   }
   2552   // Deoptimize if remainder will not be 0.
   2553   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
   2554       divisor != 1 && divisor != -1) {
   2555     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
   2556     __ Tst(dividend, mask);
   2557     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
   2558   }
   2559 
   2560   if (divisor == -1) {  // Nice shortcut, not needed for correctness.
   2561     __ Neg(result, dividend);
   2562     return;
   2563   }
   2564   int32_t shift = WhichPowerOf2Abs(divisor);
   2565   if (shift == 0) {
   2566     __ Mov(result, dividend);
   2567   } else if (shift == 1) {
   2568     __ Add(result, dividend, Operand(dividend, LSR, 31));
   2569   } else {
   2570     __ Mov(result, Operand(dividend, ASR, 31));
   2571     __ Add(result, dividend, Operand(result, LSR, 32 - shift));
   2572   }
   2573   if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
   2574   if (divisor < 0) __ Neg(result, result);
   2575 }
   2576 
   2577 
   2578 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
   2579   Register dividend = ToRegister32(instr->dividend());
   2580   int32_t divisor = instr->divisor();
   2581   Register result = ToRegister32(instr->result());
   2582   DCHECK(!AreAliased(dividend, result));
   2583 
   2584   if (divisor == 0) {
   2585     Deoptimize(instr, Deoptimizer::kDivisionByZero);
   2586     return;
   2587   }
   2588 
   2589   // Check for (0 / -x) that will produce negative zero.
   2590   HDiv* hdiv = instr->hydrogen();
   2591   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   2592     DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
   2593   }
   2594 
   2595   __ TruncatingDiv(result, dividend, Abs(divisor));
   2596   if (divisor < 0) __ Neg(result, result);
   2597 
   2598   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
   2599     Register temp = ToRegister32(instr->temp());
   2600     DCHECK(!AreAliased(dividend, result, temp));
   2601     __ Sxtw(dividend.X(), dividend);
   2602     __ Mov(temp, divisor);
   2603     __ Smsubl(temp.X(), result, temp, dividend.X());
   2604     DeoptimizeIfNotZero(temp, instr, Deoptimizer::kLostPrecision);
   2605   }
   2606 }
   2607 
   2608 
   2609 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
   2610 void LCodeGen::DoDivI(LDivI* instr) {
   2611   HBinaryOperation* hdiv = instr->hydrogen();
   2612   Register dividend = ToRegister32(instr->dividend());
   2613   Register divisor = ToRegister32(instr->divisor());
   2614   Register result = ToRegister32(instr->result());
   2615 
   2616   // Issue the division first, and then check for any deopt cases whilst the
   2617   // result is computed.
   2618   __ Sdiv(result, dividend, divisor);
   2619 
   2620   if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
   2621     DCHECK(!instr->temp());
   2622     return;
   2623   }
   2624 
   2625   // Check for x / 0.
   2626   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   2627     DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
   2628   }
   2629 
   2630   // Check for (0 / -x) as that will produce negative zero.
   2631   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   2632     __ Cmp(divisor, 0);
   2633 
   2634     // If the divisor < 0 (mi), compare the dividend, and deopt if it is
   2635     // zero, ie. zero dividend with negative divisor deopts.
   2636     // If the divisor >= 0 (pl, the opposite of mi) set the flags to
   2637     // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
   2638     __ Ccmp(dividend, 0, NoFlag, mi);
   2639     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
   2640   }
   2641 
   2642   // Check for (kMinInt / -1).
   2643   if (hdiv->CheckFlag(HValue::kCanOverflow)) {
   2644     // Test dividend for kMinInt by subtracting one (cmp) and checking for
   2645     // overflow.
   2646     __ Cmp(dividend, 1);
   2647     // If overflow is set, ie. dividend = kMinInt, compare the divisor with
   2648     // -1. If overflow is clear, set the flags for condition ne, as the
   2649     // dividend isn't -1, and thus we shouldn't deopt.
   2650     __ Ccmp(divisor, -1, NoFlag, vs);
   2651     DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
   2652   }
   2653 
   2654   // Compute remainder and deopt if it's not zero.
   2655   Register remainder = ToRegister32(instr->temp());
   2656   __ Msub(remainder, result, divisor, dividend);
   2657   DeoptimizeIfNotZero(remainder, instr, Deoptimizer::kLostPrecision);
   2658 }
   2659 
   2660 
   2661 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
   2662   DoubleRegister input = ToDoubleRegister(instr->value());
   2663   Register result = ToRegister32(instr->result());
   2664 
   2665   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   2666     DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
   2667   }
   2668 
   2669   __ TryRepresentDoubleAsInt32(result, input, double_scratch());
   2670   DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
   2671 
   2672   if (instr->tag_result()) {
   2673     __ SmiTag(result.X());
   2674   }
   2675 }
   2676 
   2677 
   2678 void LCodeGen::DoDrop(LDrop* instr) {
   2679   __ Drop(instr->count());
   2680 
   2681   RecordPushedArgumentsDelta(instr->hydrogen_value()->argument_delta());
   2682 }
   2683 
   2684 
   2685 void LCodeGen::DoDummy(LDummy* instr) {
   2686   // Nothing to see here, move on!
   2687 }
   2688 
   2689 
   2690 void LCodeGen::DoDummyUse(LDummyUse* instr) {
   2691   // Nothing to see here, move on!
   2692 }
   2693 
   2694 
   2695 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
   2696   Register map = ToRegister(instr->map());
   2697   Register result = ToRegister(instr->result());
   2698   Label load_cache, done;
   2699 
   2700   __ EnumLengthUntagged(result, map);
   2701   __ Cbnz(result, &load_cache);
   2702 
   2703   __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
   2704   __ B(&done);
   2705 
   2706   __ Bind(&load_cache);
   2707   __ LoadInstanceDescriptors(map, result);
   2708   __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
   2709   __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
   2710   DeoptimizeIfZero(result, instr, Deoptimizer::kNoCache);
   2711 
   2712   __ Bind(&done);
   2713 }
   2714 
   2715 
   2716 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   2717   Register object = ToRegister(instr->object());
   2718   Register null_value = x5;
   2719 
   2720   DCHECK(instr->IsMarkedAsCall());
   2721   DCHECK(object.Is(x0));
   2722 
   2723   DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
   2724 
   2725   STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
   2726   __ CompareObjectType(object, x1, x1, JS_PROXY_TYPE);
   2727   DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject);
   2728 
   2729   Label use_cache, call_runtime;
   2730   __ LoadRoot(null_value, Heap::kNullValueRootIndex);
   2731   __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
   2732 
   2733   __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
   2734   __ B(&use_cache);
   2735 
   2736   // Get the set of properties to enumerate.
   2737   __ Bind(&call_runtime);
   2738   __ Push(object);
   2739   CallRuntime(Runtime::kGetPropertyNamesFast, instr);
   2740 
   2741   __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
   2742   DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr,
   2743                       Deoptimizer::kWrongMap);
   2744 
   2745   __ Bind(&use_cache);
   2746 }
   2747 
   2748 
   2749 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
   2750   Register input = ToRegister(instr->value());
   2751   Register result = ToRegister(instr->result());
   2752 
   2753   __ AssertString(input);
   2754 
   2755   // Assert that we can use a W register load to get the hash.
   2756   DCHECK((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
   2757   __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
   2758   __ IndexFromHash(result, result);
   2759 }
   2760 
   2761 
   2762 void LCodeGen::EmitGoto(int block) {
   2763   // Do not emit jump if we are emitting a goto to the next block.
   2764   if (!IsNextEmittedBlock(block)) {
   2765     __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
   2766   }
   2767 }
   2768 
   2769 
   2770 void LCodeGen::DoGoto(LGoto* instr) {
   2771   EmitGoto(instr->block_id());
   2772 }
   2773 
   2774 
   2775 void LCodeGen::DoHasCachedArrayIndexAndBranch(
   2776     LHasCachedArrayIndexAndBranch* instr) {
   2777   Register input = ToRegister(instr->value());
   2778   Register temp = ToRegister32(instr->temp());
   2779 
   2780   // Assert that the cache status bits fit in a W register.
   2781   DCHECK(is_uint32(String::kContainsCachedArrayIndexMask));
   2782   __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
   2783   __ Tst(temp, String::kContainsCachedArrayIndexMask);
   2784   EmitBranch(instr, eq);
   2785 }
   2786 
   2787 
   2788 // HHasInstanceTypeAndBranch instruction is built with an interval of type
   2789 // to test but is only used in very restricted ways. The only possible kinds
   2790 // of intervals are:
   2791 //  - [ FIRST_TYPE, instr->to() ]
   2792 //  - [ instr->form(), LAST_TYPE ]
   2793 //  - instr->from() == instr->to()
   2794 //
   2795 // These kinds of intervals can be check with only one compare instruction
   2796 // providing the correct value and test condition are used.
   2797 //
   2798 // TestType() will return the value to use in the compare instruction and
   2799 // BranchCondition() will return the condition to use depending on the kind
   2800 // of interval actually specified in the instruction.
   2801 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   2802   InstanceType from = instr->from();
   2803   InstanceType to = instr->to();
   2804   if (from == FIRST_TYPE) return to;
   2805   DCHECK((from == to) || (to == LAST_TYPE));
   2806   return from;
   2807 }
   2808 
   2809 
   2810 // See comment above TestType function for what this function does.
   2811 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
   2812   InstanceType from = instr->from();
   2813   InstanceType to = instr->to();
   2814   if (from == to) return eq;
   2815   if (to == LAST_TYPE) return hs;
   2816   if (from == FIRST_TYPE) return ls;
   2817   UNREACHABLE();
   2818   return eq;
   2819 }
   2820 
   2821 
   2822 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   2823   Register input = ToRegister(instr->value());
   2824   Register scratch = ToRegister(instr->temp());
   2825 
   2826   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2827     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2828   }
   2829   __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
   2830   EmitBranch(instr, BranchCondition(instr->hydrogen()));
   2831 }
   2832 
   2833 
   2834 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
   2835   Register result = ToRegister(instr->result());
   2836   Register base = ToRegister(instr->base_object());
   2837   if (instr->offset()->IsConstantOperand()) {
   2838     __ Add(result, base, ToOperand32(instr->offset()));
   2839   } else {
   2840     __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
   2841   }
   2842 }
   2843 
   2844 
   2845 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
   2846   DCHECK(ToRegister(instr->context()).is(cp));
   2847   DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
   2848   DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
   2849   DCHECK(ToRegister(instr->result()).is(x0));
   2850   InstanceOfStub stub(isolate());
   2851   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   2852 }
   2853 
   2854 
   2855 void LCodeGen::DoHasInPrototypeChainAndBranch(
   2856     LHasInPrototypeChainAndBranch* instr) {
   2857   Register const object = ToRegister(instr->object());
   2858   Register const object_map = ToRegister(instr->scratch1());
   2859   Register const object_instance_type = ToRegister(instr->scratch2());
   2860   Register const object_prototype = object_map;
   2861   Register const prototype = ToRegister(instr->prototype());
   2862 
   2863   // The {object} must be a spec object.  It's sufficient to know that {object}
   2864   // is not a smi, since all other non-spec objects have {null} prototypes and
   2865   // will be ruled out below.
   2866   if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
   2867     __ JumpIfSmi(object, instr->FalseLabel(chunk_));
   2868   }
   2869 
   2870   // Loop through the {object}s prototype chain looking for the {prototype}.
   2871   __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
   2872   Label loop;
   2873   __ Bind(&loop);
   2874 
   2875   // Deoptimize if the object needs to be access checked.
   2876   __ Ldrb(object_instance_type,
   2877           FieldMemOperand(object_map, Map::kBitFieldOffset));
   2878   __ Tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded));
   2879   DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck);
   2880   // Deoptimize for proxies.
   2881   __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
   2882   DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
   2883 
   2884   __ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
   2885   __ Cmp(object_prototype, prototype);
   2886   __ B(eq, instr->TrueLabel(chunk_));
   2887   __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
   2888   __ B(eq, instr->FalseLabel(chunk_));
   2889   __ Ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
   2890   __ B(&loop);
   2891 }
   2892 
   2893 
   2894 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
   2895   DoGap(instr);
   2896 }
   2897 
   2898 
   2899 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   2900   Register value = ToRegister32(instr->value());
   2901   DoubleRegister result = ToDoubleRegister(instr->result());
   2902   __ Scvtf(result, value);
   2903 }
   2904 
   2905 
   2906 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   2907   DCHECK(ToRegister(instr->context()).is(cp));
   2908   // The function is required to be in x1.
   2909   DCHECK(ToRegister(instr->function()).is(x1));
   2910   DCHECK(instr->HasPointerMap());
   2911 
   2912   Handle<JSFunction> known_function = instr->hydrogen()->known_function();
   2913   if (known_function.is_null()) {
   2914     LPointerMap* pointers = instr->pointer_map();
   2915     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   2916     ParameterCount count(instr->arity());
   2917     __ InvokeFunction(x1, no_reg, count, CALL_FUNCTION, generator);
   2918   } else {
   2919     CallKnownFunction(known_function,
   2920                       instr->hydrogen()->formal_parameter_count(),
   2921                       instr->arity(), instr);
   2922   }
   2923   RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
   2924 }
   2925 
   2926 
   2927 Condition LCodeGen::EmitIsString(Register input,
   2928                                  Register temp1,
   2929                                  Label* is_not_string,
   2930                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
   2931   if (check_needed == INLINE_SMI_CHECK) {
   2932     __ JumpIfSmi(input, is_not_string);
   2933   }
   2934   __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
   2935 
   2936   return lt;
   2937 }
   2938 
   2939 
   2940 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
   2941   Register val = ToRegister(instr->value());
   2942   Register scratch = ToRegister(instr->temp());
   2943 
   2944   SmiCheck check_needed =
   2945       instr->hydrogen()->value()->type().IsHeapObject()
   2946           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2947   Condition true_cond =
   2948       EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
   2949 
   2950   EmitBranch(instr, true_cond);
   2951 }
   2952 
   2953 
   2954 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   2955   Register value = ToRegister(instr->value());
   2956   STATIC_ASSERT(kSmiTag == 0);
   2957   EmitTestAndBranch(instr, eq, value, kSmiTagMask);
   2958 }
   2959 
   2960 
   2961 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
   2962   Register input = ToRegister(instr->value());
   2963   Register temp = ToRegister(instr->temp());
   2964 
   2965   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2966     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2967   }
   2968   __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
   2969   __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
   2970 
   2971   EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
   2972 }
   2973 
   2974 
   2975 static const char* LabelType(LLabel* label) {
   2976   if (label->is_loop_header()) return " (loop header)";
   2977   if (label->is_osr_entry()) return " (OSR entry)";
   2978   return "";
   2979 }
   2980 
   2981 
   2982 void LCodeGen::DoLabel(LLabel* label) {
   2983   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
   2984           current_instruction_,
   2985           label->hydrogen_value()->id(),
   2986           label->block_id(),
   2987           LabelType(label));
   2988 
   2989   // Inherit pushed_arguments_ from the predecessor's argument count.
   2990   if (label->block()->HasPredecessor()) {
   2991     pushed_arguments_ = label->block()->predecessors()->at(0)->argument_count();
   2992 #ifdef DEBUG
   2993     for (auto p : *label->block()->predecessors()) {
   2994       DCHECK_EQ(p->argument_count(), pushed_arguments_);
   2995     }
   2996 #endif
   2997   }
   2998 
   2999   __ Bind(label->label());
   3000   current_block_ = label->block_id();
   3001   DoGap(label);
   3002 }
   3003 
   3004 
   3005 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   3006   Register context = ToRegister(instr->context());
   3007   Register result = ToRegister(instr->result());
   3008   __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
   3009   if (instr->hydrogen()->RequiresHoleCheck()) {
   3010     if (instr->hydrogen()->DeoptimizesOnHole()) {
   3011       DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
   3012                        Deoptimizer::kHole);
   3013     } else {
   3014       Label not_the_hole;
   3015       __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
   3016       __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
   3017       __ Bind(&not_the_hole);
   3018     }
   3019   }
   3020 }
   3021 
   3022 
   3023 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   3024   Register function = ToRegister(instr->function());
   3025   Register result = ToRegister(instr->result());
   3026   Register temp = ToRegister(instr->temp());
   3027 
   3028   // Get the prototype or initial map from the function.
   3029   __ Ldr(result, FieldMemOperand(function,
   3030                                  JSFunction::kPrototypeOrInitialMapOffset));
   3031 
   3032   // Check that the function has a prototype or an initial map.
   3033   DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
   3034                    Deoptimizer::kHole);
   3035 
   3036   // If the function does not have an initial map, we're done.
   3037   Label done;
   3038   __ CompareObjectType(result, temp, temp, MAP_TYPE);
   3039   __ B(ne, &done);
   3040 
   3041   // Get the prototype from the initial map.
   3042   __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
   3043 
   3044   // All done.
   3045   __ Bind(&done);
   3046 }
   3047 
   3048 
   3049 template <class T>
   3050 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
   3051   Register vector_register = ToRegister(instr->temp_vector());
   3052   Register slot_register = LoadWithVectorDescriptor::SlotRegister();
   3053   DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
   3054   DCHECK(slot_register.is(x0));
   3055 
   3056   AllowDeferredHandleDereference vector_structure_check;
   3057   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
   3058   __ Mov(vector_register, vector);
   3059   // No need to allocate this register.
   3060   FeedbackVectorSlot slot = instr->hydrogen()->slot();
   3061   int index = vector->GetIndex(slot);
   3062   __ Mov(slot_register, Smi::FromInt(index));
   3063 }
   3064 
   3065 
   3066 template <class T>
   3067 void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
   3068   Register vector_register = ToRegister(instr->temp_vector());
   3069   Register slot_register = ToRegister(instr->temp_slot());
   3070 
   3071   AllowDeferredHandleDereference vector_structure_check;
   3072   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
   3073   __ Mov(vector_register, vector);
   3074   FeedbackVectorSlot slot = instr->hydrogen()->slot();
   3075   int index = vector->GetIndex(slot);
   3076   __ Mov(slot_register, Smi::FromInt(index));
   3077 }
   3078 
   3079 
   3080 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   3081   DCHECK(ToRegister(instr->context()).is(cp));
   3082   DCHECK(ToRegister(instr->global_object())
   3083              .is(LoadDescriptor::ReceiverRegister()));
   3084   DCHECK(ToRegister(instr->result()).Is(x0));
   3085   __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
   3086   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
   3087   Handle<Code> ic =
   3088       CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
   3089                                          SLOPPY, PREMONOMORPHIC).code();
   3090   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3091 }
   3092 
   3093 
   3094 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
   3095     Register key,
   3096     Register base,
   3097     Register scratch,
   3098     bool key_is_smi,
   3099     bool key_is_constant,
   3100     int constant_key,
   3101     ElementsKind elements_kind,
   3102     int base_offset) {
   3103   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   3104 
   3105   if (key_is_constant) {
   3106     int key_offset = constant_key << element_size_shift;
   3107     return MemOperand(base, key_offset + base_offset);
   3108   }
   3109 
   3110   if (key_is_smi) {
   3111     __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
   3112     return MemOperand(scratch, base_offset);
   3113   }
   3114 
   3115   if (base_offset == 0) {
   3116     return MemOperand(base, key, SXTW, element_size_shift);
   3117   }
   3118 
   3119   DCHECK(!AreAliased(scratch, key));
   3120   __ Add(scratch, base, base_offset);
   3121   return MemOperand(scratch, key, SXTW, element_size_shift);
   3122 }
   3123 
   3124 
   3125 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
   3126   Register ext_ptr = ToRegister(instr->elements());
   3127   Register scratch;
   3128   ElementsKind elements_kind = instr->elements_kind();
   3129 
   3130   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
   3131   bool key_is_constant = instr->key()->IsConstantOperand();
   3132   Register key = no_reg;
   3133   int constant_key = 0;
   3134   if (key_is_constant) {
   3135     DCHECK(instr->temp() == NULL);
   3136     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   3137     if (constant_key & 0xf0000000) {
   3138       Abort(kArrayIndexConstantValueTooBig);
   3139     }
   3140   } else {
   3141     scratch = ToRegister(instr->temp());
   3142     key = ToRegister(instr->key());
   3143   }
   3144 
   3145   MemOperand mem_op =
   3146       PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
   3147                                        key_is_constant, constant_key,
   3148                                        elements_kind,
   3149                                        instr->base_offset());
   3150 
   3151   if (elements_kind == FLOAT32_ELEMENTS) {
   3152     DoubleRegister result = ToDoubleRegister(instr->result());
   3153     __ Ldr(result.S(), mem_op);
   3154     __ Fcvt(result, result.S());
   3155   } else if (elements_kind == FLOAT64_ELEMENTS) {
   3156     DoubleRegister result = ToDoubleRegister(instr->result());
   3157     __ Ldr(result, mem_op);
   3158   } else {
   3159     Register result = ToRegister(instr->result());
   3160 
   3161     switch (elements_kind) {
   3162       case INT8_ELEMENTS:
   3163         __ Ldrsb(result, mem_op);
   3164         break;
   3165       case UINT8_ELEMENTS:
   3166       case UINT8_CLAMPED_ELEMENTS:
   3167         __ Ldrb(result, mem_op);
   3168         break;
   3169       case INT16_ELEMENTS:
   3170         __ Ldrsh(result, mem_op);
   3171         break;
   3172       case UINT16_ELEMENTS:
   3173         __ Ldrh(result, mem_op);
   3174         break;
   3175       case INT32_ELEMENTS:
   3176         __ Ldrsw(result, mem_op);
   3177         break;
   3178       case UINT32_ELEMENTS:
   3179         __ Ldr(result.W(), mem_op);
   3180         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
   3181           // Deopt if value > 0x80000000.
   3182           __ Tst(result, 0xFFFFFFFF80000000);
   3183           DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
   3184         }
   3185         break;
   3186       case FLOAT32_ELEMENTS:
   3187       case FLOAT64_ELEMENTS:
   3188       case FAST_HOLEY_DOUBLE_ELEMENTS:
   3189       case FAST_HOLEY_ELEMENTS:
   3190       case FAST_HOLEY_SMI_ELEMENTS:
   3191       case FAST_DOUBLE_ELEMENTS:
   3192       case FAST_ELEMENTS:
   3193       case FAST_SMI_ELEMENTS:
   3194       case DICTIONARY_ELEMENTS:
   3195       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
   3196       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
   3197         UNREACHABLE();
   3198         break;
   3199     }
   3200   }
   3201 }
   3202 
   3203 
   3204 MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
   3205                                               Register elements,
   3206                                               Register key,
   3207                                               bool key_is_tagged,
   3208                                               ElementsKind elements_kind,
   3209                                               Representation representation,
   3210                                               int base_offset) {
   3211   STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
   3212   STATIC_ASSERT(kSmiTag == 0);
   3213   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   3214 
   3215   // Even though the HLoad/StoreKeyed instructions force the input
   3216   // representation for the key to be an integer, the input gets replaced during
   3217   // bounds check elimination with the index argument to the bounds check, which
   3218   // can be tagged, so that case must be handled here, too.
   3219   if (key_is_tagged) {
   3220     __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
   3221     if (representation.IsInteger32()) {
   3222       DCHECK(elements_kind == FAST_SMI_ELEMENTS);
   3223       // Read or write only the smi payload in the case of fast smi arrays.
   3224       return UntagSmiMemOperand(base, base_offset);
   3225     } else {
   3226       return MemOperand(base, base_offset);
   3227     }
   3228   } else {
   3229     // Sign extend key because it could be a 32-bit negative value or contain
   3230     // garbage in the top 32-bits. The address computation happens in 64-bit.
   3231     DCHECK((element_size_shift >= 0) && (element_size_shift <= 4));
   3232     if (representation.IsInteger32()) {
   3233       DCHECK(elements_kind == FAST_SMI_ELEMENTS);
   3234       // Read or write only the smi payload in the case of fast smi arrays.
   3235       __ Add(base, elements, Operand(key, SXTW, element_size_shift));
   3236       return UntagSmiMemOperand(base, base_offset);
   3237     } else {
   3238       __ Add(base, elements, base_offset);
   3239       return MemOperand(base, key, SXTW, element_size_shift);
   3240     }
   3241   }
   3242 }
   3243 
   3244 
   3245 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
   3246   Register elements = ToRegister(instr->elements());
   3247   DoubleRegister result = ToDoubleRegister(instr->result());
   3248   MemOperand mem_op;
   3249 
   3250   if (instr->key()->IsConstantOperand()) {
   3251     DCHECK(instr->hydrogen()->RequiresHoleCheck() ||
   3252            (instr->temp() == NULL));
   3253 
   3254     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   3255     if (constant_key & 0xf0000000) {
   3256       Abort(kArrayIndexConstantValueTooBig);
   3257     }
   3258     int offset = instr->base_offset() + constant_key * kDoubleSize;
   3259     mem_op = MemOperand(elements, offset);
   3260   } else {
   3261     Register load_base = ToRegister(instr->temp());
   3262     Register key = ToRegister(instr->key());
   3263     bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
   3264     mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
   3265                                       instr->hydrogen()->elements_kind(),
   3266                                       instr->hydrogen()->representation(),
   3267                                       instr->base_offset());
   3268   }
   3269 
   3270   __ Ldr(result, mem_op);
   3271 
   3272   if (instr->hydrogen()->RequiresHoleCheck()) {
   3273     Register scratch = ToRegister(instr->temp());
   3274     __ Fmov(scratch, result);
   3275     __ Eor(scratch, scratch, kHoleNanInt64);
   3276     DeoptimizeIfZero(scratch, instr, Deoptimizer::kHole);
   3277   }
   3278 }
   3279 
   3280 
   3281 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
   3282   Register elements = ToRegister(instr->elements());
   3283   Register result = ToRegister(instr->result());
   3284   MemOperand mem_op;
   3285 
   3286   Representation representation = instr->hydrogen()->representation();
   3287   if (instr->key()->IsConstantOperand()) {
   3288     DCHECK(instr->temp() == NULL);
   3289     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   3290     int offset = instr->base_offset() +
   3291         ToInteger32(const_operand) * kPointerSize;
   3292     if (representation.IsInteger32()) {
   3293       DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
   3294       STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
   3295       STATIC_ASSERT(kSmiTag == 0);
   3296       mem_op = UntagSmiMemOperand(elements, offset);
   3297     } else {
   3298       mem_op = MemOperand(elements, offset);
   3299     }
   3300   } else {
   3301     Register load_base = ToRegister(instr->temp());
   3302     Register key = ToRegister(instr->key());
   3303     bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
   3304 
   3305     mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
   3306                                       instr->hydrogen()->elements_kind(),
   3307                                       representation, instr->base_offset());
   3308   }
   3309 
   3310   __ Load(result, mem_op, representation);
   3311 
   3312   if (instr->hydrogen()->RequiresHoleCheck()) {
   3313     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
   3314       DeoptimizeIfNotSmi(result, instr, Deoptimizer::kNotASmi);
   3315     } else {
   3316       DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
   3317                        Deoptimizer::kHole);
   3318     }
   3319   } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
   3320     DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
   3321     Label done;
   3322     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
   3323     __ B(ne, &done);
   3324     if (info()->IsStub()) {
   3325       // A stub can safely convert the hole to undefined only if the array
   3326       // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
   3327       // it needs to bail out.
   3328       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
   3329       __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
   3330       __ Cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
   3331       DeoptimizeIf(ne, instr, Deoptimizer::kHole);
   3332     }
   3333     __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
   3334     __ Bind(&done);
   3335   }
   3336 }
   3337 
   3338 
   3339 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
   3340   DCHECK(ToRegister(instr->context()).is(cp));
   3341   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   3342   DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
   3343 
   3344   if (instr->hydrogen()->HasVectorAndSlot()) {
   3345     EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
   3346   }
   3347 
   3348   Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
   3349                         isolate(), instr->hydrogen()->language_mode(),
   3350                         instr->hydrogen()->initialization_state()).code();
   3351   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3352 
   3353   DCHECK(ToRegister(instr->result()).Is(x0));
   3354 }
   3355 
   3356 
   3357 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   3358   HObjectAccess access = instr->hydrogen()->access();
   3359   int offset = access.offset();
   3360   Register object = ToRegister(instr->object());
   3361 
   3362   if (access.IsExternalMemory()) {
   3363     Register result = ToRegister(instr->result());
   3364     __ Load(result, MemOperand(object, offset), access.representation());
   3365     return;
   3366   }
   3367 
   3368   if (instr->hydrogen()->representation().IsDouble()) {
   3369     DCHECK(access.IsInobject());
   3370     FPRegister result = ToDoubleRegister(instr->result());
   3371     __ Ldr(result, FieldMemOperand(object, offset));
   3372     return;
   3373   }
   3374 
   3375   Register result = ToRegister(instr->result());
   3376   Register source;
   3377   if (access.IsInobject()) {
   3378     source = object;
   3379   } else {
   3380     // Load the properties array, using result as a scratch register.
   3381     __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   3382     source = result;
   3383   }
   3384 
   3385   if (access.representation().IsSmi() &&
   3386       instr->hydrogen()->representation().IsInteger32()) {
   3387     // Read int value directly from upper half of the smi.
   3388     STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
   3389     STATIC_ASSERT(kSmiTag == 0);
   3390     __ Load(result, UntagSmiFieldMemOperand(source, offset),
   3391             Representation::Integer32());
   3392   } else {
   3393     __ Load(result, FieldMemOperand(source, offset), access.representation());
   3394   }
   3395 }
   3396 
   3397 
   3398 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
   3399   DCHECK(ToRegister(instr->context()).is(cp));
   3400   // LoadIC expects name and receiver in registers.
   3401   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   3402   __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
   3403   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
   3404   Handle<Code> ic =
   3405       CodeFactory::LoadICInOptimizedCode(
   3406           isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
   3407           instr->hydrogen()->initialization_state()).code();
   3408   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3409 
   3410   DCHECK(ToRegister(instr->result()).is(x0));
   3411 }
   3412 
   3413 
   3414 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
   3415   Register result = ToRegister(instr->result());
   3416   __ LoadRoot(result, instr->index());
   3417 }
   3418 
   3419 
   3420 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
   3421   Register result = ToRegister(instr->result());
   3422   Register map = ToRegister(instr->value());
   3423   __ EnumLengthSmi(result, map);
   3424 }
   3425 
   3426 
   3427 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   3428   Representation r = instr->hydrogen()->value()->representation();
   3429   if (r.IsDouble()) {
   3430     DoubleRegister input = ToDoubleRegister(instr->value());
   3431     DoubleRegister result = ToDoubleRegister(instr->result());
   3432     __ Fabs(result, input);
   3433   } else if (r.IsSmi() || r.IsInteger32()) {
   3434     Register input = r.IsSmi() ? ToRegister(instr->value())
   3435                                : ToRegister32(instr->value());
   3436     Register result = r.IsSmi() ? ToRegister(instr->result())
   3437                                 : ToRegister32(instr->result());
   3438     __ Abs(result, input);
   3439     DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
   3440   }
   3441 }
   3442 
   3443 
   3444 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
   3445                                        Label* exit,
   3446                                        Label* allocation_entry) {
   3447   // Handle the tricky cases of MathAbsTagged:
   3448   //  - HeapNumber inputs.
   3449   //    - Negative inputs produce a positive result, so a new HeapNumber is
   3450   //      allocated to hold it.
   3451   //    - Positive inputs are returned as-is, since there is no need to allocate
   3452   //      a new HeapNumber for the result.
   3453   //  - The (smi) input -0x80000000, produces +0x80000000, which does not fit
   3454   //    a smi. In this case, the inline code sets the result and jumps directly
   3455   //    to the allocation_entry label.
   3456   DCHECK(instr->context() != NULL);
   3457   DCHECK(ToRegister(instr->context()).is(cp));
   3458   Register input = ToRegister(instr->value());
   3459   Register temp1 = ToRegister(instr->temp1());
   3460   Register temp2 = ToRegister(instr->temp2());
   3461   Register result_bits = ToRegister(instr->temp3());
   3462   Register result = ToRegister(instr->result());
   3463 
   3464   Label runtime_allocation;
   3465 
   3466   // Deoptimize if the input is not a HeapNumber.
   3467   DeoptimizeIfNotHeapNumber(input, instr);
   3468 
   3469   // If the argument is positive, we can return it as-is, without any need to
   3470   // allocate a new HeapNumber for the result. We have to do this in integer
   3471   // registers (rather than with fabs) because we need to be able to distinguish
   3472   // the two zeroes.
   3473   __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
   3474   __ Mov(result, input);
   3475   __ Tbz(result_bits, kXSignBit, exit);
   3476 
   3477   // Calculate abs(input) by clearing the sign bit.
   3478   __ Bic(result_bits, result_bits, kXSignMask);
   3479 
   3480   // Allocate a new HeapNumber to hold the result.
   3481   //  result_bits   The bit representation of the (double) result.
   3482   __ Bind(allocation_entry);
   3483   __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
   3484   // The inline (non-deferred) code will store result_bits into result.
   3485   __ B(exit);
   3486 
   3487   __ Bind(&runtime_allocation);
   3488   if (FLAG_debug_code) {
   3489     // Because result is in the pointer map, we need to make sure it has a valid
   3490     // tagged value before we call the runtime. We speculatively set it to the
   3491     // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
   3492     // be valid.
   3493     Label result_ok;
   3494     Register input = ToRegister(instr->value());
   3495     __ JumpIfSmi(result, &result_ok);
   3496     __ Cmp(input, result);
   3497     __ Assert(eq, kUnexpectedValue);
   3498     __ Bind(&result_ok);
   3499   }
   3500 
   3501   { PushSafepointRegistersScope scope(this);
   3502     CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
   3503                             instr->context());
   3504     __ StoreToSafepointRegisterSlot(x0, result);
   3505   }
   3506   // The inline (non-deferred) code will store result_bits into result.
   3507 }
   3508 
   3509 
   3510 void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
   3511   // Class for deferred case.
   3512   class DeferredMathAbsTagged: public LDeferredCode {
   3513    public:
   3514     DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
   3515         : LDeferredCode(codegen), instr_(instr) { }
   3516     virtual void Generate() {
   3517       codegen()->DoDeferredMathAbsTagged(instr_, exit(),
   3518                                          allocation_entry());
   3519     }
   3520     virtual LInstruction* instr() { return instr_; }
   3521     Label* allocation_entry() { return &allocation; }
   3522    private:
   3523     LMathAbsTagged* instr_;
   3524     Label allocation;
   3525   };
   3526 
   3527   // TODO(jbramley): The early-exit mechanism would skip the new frame handling
   3528   // in GenerateDeferredCode. Tidy this up.
   3529   DCHECK(!NeedsDeferredFrame());
   3530 
   3531   DeferredMathAbsTagged* deferred =
   3532       new(zone()) DeferredMathAbsTagged(this, instr);
   3533 
   3534   DCHECK(instr->hydrogen()->value()->representation().IsTagged() ||
   3535          instr->hydrogen()->value()->representation().IsSmi());
   3536   Register input = ToRegister(instr->value());
   3537   Register result_bits = ToRegister(instr->temp3());
   3538   Register result = ToRegister(instr->result());
   3539   Label done;
   3540 
   3541   // Handle smis inline.
   3542   // We can treat smis as 64-bit integers, since the (low-order) tag bits will
   3543   // never get set by the negation. This is therefore the same as the Integer32
   3544   // case in DoMathAbs, except that it operates on 64-bit values.
   3545   STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
   3546 
   3547   __ JumpIfNotSmi(input, deferred->entry());
   3548 
   3549   __ Abs(result, input, NULL, &done);
   3550 
   3551   // The result is the magnitude (abs) of the smallest value a smi can
   3552   // represent, encoded as a double.
   3553   __ Mov(result_bits, double_to_rawbits(0x80000000));
   3554   __ B(deferred->allocation_entry());
   3555 
   3556   __ Bind(deferred->exit());
   3557   __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
   3558 
   3559   __ Bind(&done);
   3560 }
   3561 
   3562 
   3563 void LCodeGen::DoMathExp(LMathExp* instr) {
   3564   DoubleRegister input = ToDoubleRegister(instr->value());
   3565   DoubleRegister result = ToDoubleRegister(instr->result());
   3566   DoubleRegister double_temp1 = ToDoubleRegister(instr->double_temp1());
   3567   DoubleRegister double_temp2 = double_scratch();
   3568   Register temp1 = ToRegister(instr->temp1());
   3569   Register temp2 = ToRegister(instr->temp2());
   3570   Register temp3 = ToRegister(instr->temp3());
   3571 
   3572   MathExpGenerator::EmitMathExp(masm(), input, result,
   3573                                 double_temp1, double_temp2,
   3574                                 temp1, temp2, temp3);
   3575 }
   3576 
   3577 
   3578 void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
   3579   DoubleRegister input = ToDoubleRegister(instr->value());
   3580   DoubleRegister result = ToDoubleRegister(instr->result());
   3581 
   3582   __ Frintm(result, input);
   3583 }
   3584 
   3585 
   3586 void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
   3587   DoubleRegister input = ToDoubleRegister(instr->value());
   3588   Register result = ToRegister(instr->result());
   3589 
   3590   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3591     DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
   3592   }
   3593 
   3594   __ Fcvtms(result, input);
   3595 
   3596   // Check that the result fits into a 32-bit integer.
   3597   //  - The result did not overflow.
   3598   __ Cmp(result, Operand(result, SXTW));
   3599   //  - The input was not NaN.
   3600   __ Fccmp(input, input, NoFlag, eq);
   3601   DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
   3602 }
   3603 
   3604 
   3605 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
   3606   Register dividend = ToRegister32(instr->dividend());
   3607   Register result = ToRegister32(instr->result());
   3608   int32_t divisor = instr->divisor();
   3609 
   3610   // If the divisor is 1, return the dividend.
   3611   if (divisor == 1) {
   3612     __ Mov(result, dividend, kDiscardForSameWReg);
   3613     return;
   3614   }
   3615 
   3616   // If the divisor is positive, things are easy: There can be no deopts and we
   3617   // can simply do an arithmetic right shift.
   3618   int32_t shift = WhichPowerOf2Abs(divisor);
   3619   if (divisor > 1) {
   3620     __ Mov(result, Operand(dividend, ASR, shift));
   3621     return;
   3622   }
   3623 
   3624   // If the divisor is negative, we have to negate and handle edge cases.
   3625   __ Negs(result, dividend);
   3626   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3627     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
   3628   }
   3629 
   3630   // Dividing by -1 is basically negation, unless we overflow.
   3631   if (divisor == -1) {
   3632     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
   3633       DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
   3634     }
   3635     return;
   3636   }
   3637 
   3638   // If the negation could not overflow, simply shifting is OK.
   3639   if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
   3640     __ Mov(result, Operand(dividend, ASR, shift));
   3641     return;
   3642   }
   3643 
   3644   __ Asr(result, result, shift);
   3645   __ Csel(result, result, kMinInt / divisor, vc);
   3646 }
   3647 
   3648 
   3649 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
   3650   Register dividend = ToRegister32(instr->dividend());
   3651   int32_t divisor = instr->divisor();
   3652   Register result = ToRegister32(instr->result());
   3653   DCHECK(!AreAliased(dividend, result));
   3654 
   3655   if (divisor == 0) {
   3656     Deoptimize(instr, Deoptimizer::kDivisionByZero);
   3657     return;
   3658   }
   3659 
   3660   // Check for (0 / -x) that will produce negative zero.
   3661   HMathFloorOfDiv* hdiv = instr->hydrogen();
   3662   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   3663     DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
   3664   }
   3665 
   3666   // Easy case: We need no dynamic check for the dividend and the flooring
   3667   // division is the same as the truncating division.
   3668   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
   3669       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
   3670     __ TruncatingDiv(result, dividend, Abs(divisor));
   3671     if (divisor < 0) __ Neg(result, result);
   3672     return;
   3673   }
   3674 
   3675   // In the general case we may need to adjust before and after the truncating
   3676   // division to get a flooring division.
   3677   Register temp = ToRegister32(instr->temp());
   3678   DCHECK(!AreAliased(temp, dividend, result));
   3679   Label needs_adjustment, done;
   3680   __ Cmp(dividend, 0);
   3681   __ B(divisor > 0 ? lt : gt, &needs_adjustment);
   3682   __ TruncatingDiv(result, dividend, Abs(divisor));
   3683   if (divisor < 0) __ Neg(result, result);
   3684   __ B(&done);
   3685   __ Bind(&needs_adjustment);
   3686   __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
   3687   __ TruncatingDiv(result, temp, Abs(divisor));
   3688   if (divisor < 0) __ Neg(result, result);
   3689   __ Sub(result, result, Operand(1));
   3690   __ Bind(&done);
   3691 }
   3692 
   3693 
   3694 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
   3695 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
   3696   Register dividend = ToRegister32(instr->dividend());
   3697   Register divisor = ToRegister32(instr->divisor());
   3698   Register remainder = ToRegister32(instr->temp());
   3699   Register result = ToRegister32(instr->result());
   3700 
   3701   // This can't cause an exception on ARM, so we can speculatively
   3702   // execute it already now.
   3703   __ Sdiv(result, dividend, divisor);
   3704 
   3705   // Check for x / 0.
   3706   DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
   3707 
   3708   // Check for (kMinInt / -1).
   3709   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   3710     // The V flag will be set iff dividend == kMinInt.
   3711     __ Cmp(dividend, 1);
   3712     __ Ccmp(divisor, -1, NoFlag, vs);
   3713     DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
   3714   }
   3715 
   3716   // Check for (0 / -x) that will produce negative zero.
   3717   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3718     __ Cmp(divisor, 0);
   3719     __ Ccmp(dividend, 0, ZFlag, mi);
   3720     // "divisor" can't be null because the code would have already been
   3721     // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
   3722     // In this case we need to deoptimize to produce a -0.
   3723     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
   3724   }
   3725 
   3726   Label done;
   3727   // If both operands have the same sign then we are done.
   3728   __ Eor(remainder, dividend, divisor);
   3729   __ Tbz(remainder, kWSignBit, &done);
   3730 
   3731   // Check if the result needs to be corrected.
   3732   __ Msub(remainder, result, divisor, dividend);
   3733   __ Cbz(remainder, &done);
   3734   __ Sub(result, result, 1);
   3735 
   3736   __ Bind(&done);
   3737 }
   3738 
   3739 
   3740 void LCodeGen::DoMathLog(LMathLog* instr) {
   3741   DCHECK(instr->IsMarkedAsCall());
   3742   DCHECK(ToDoubleRegister(instr->value()).is(d0));
   3743   __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
   3744                    0, 1);
   3745   DCHECK(ToDoubleRegister(instr->result()).Is(d0));
   3746 }
   3747 
   3748 
   3749 void LCodeGen::DoMathClz32(LMathClz32* instr) {
   3750   Register input = ToRegister32(instr->value());
   3751   Register result = ToRegister32(instr->result());
   3752   __ Clz(result, input);
   3753 }
   3754 
   3755 
   3756 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   3757   DoubleRegister input = ToDoubleRegister(instr->value());
   3758   DoubleRegister result = ToDoubleRegister(instr->result());
   3759   Label done;
   3760 
   3761   // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
   3762   //  Math.pow(-Infinity, 0.5) == +Infinity
   3763   //  Math.pow(-0.0, 0.5) == +0.0
   3764 
   3765   // Catch -infinity inputs first.
   3766   // TODO(jbramley): A constant infinity register would be helpful here.
   3767   __ Fmov(double_scratch(), kFP64NegativeInfinity);
   3768   __ Fcmp(double_scratch(), input);
   3769   __ Fabs(result, input);
   3770   __ B(&done, eq);
   3771 
   3772   // Add +0.0 to convert -0.0 to +0.0.
   3773   __ Fadd(double_scratch(), input, fp_zero);
   3774   __ Fsqrt(result, double_scratch());
   3775 
   3776   __ Bind(&done);
   3777 }
   3778 
   3779 
   3780 void LCodeGen::DoPower(LPower* instr) {
   3781   Representation exponent_type = instr->hydrogen()->right()->representation();
   3782   // Having marked this as a call, we can use any registers.
   3783   // Just make sure that the input/output registers are the expected ones.
   3784   Register tagged_exponent = MathPowTaggedDescriptor::exponent();
   3785   Register integer_exponent = MathPowIntegerDescriptor::exponent();
   3786   DCHECK(!instr->right()->IsDoubleRegister() ||
   3787          ToDoubleRegister(instr->right()).is(d1));
   3788   DCHECK(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
   3789          ToRegister(instr->right()).is(tagged_exponent));
   3790   DCHECK(!exponent_type.IsInteger32() ||
   3791          ToRegister(instr->right()).is(integer_exponent));
   3792   DCHECK(ToDoubleRegister(instr->left()).is(d0));
   3793   DCHECK(ToDoubleRegister(instr->result()).is(d0));
   3794 
   3795   if (exponent_type.IsSmi()) {
   3796     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3797     __ CallStub(&stub);
   3798   } else if (exponent_type.IsTagged()) {
   3799     Label no_deopt;
   3800     __ JumpIfSmi(tagged_exponent, &no_deopt);
   3801     DeoptimizeIfNotHeapNumber(tagged_exponent, instr);
   3802     __ Bind(&no_deopt);
   3803     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3804     __ CallStub(&stub);
   3805   } else if (exponent_type.IsInteger32()) {
   3806     // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
   3807     // supports large integer exponents.
   3808     __ Sxtw(integer_exponent, integer_exponent);
   3809     MathPowStub stub(isolate(), MathPowStub::INTEGER);
   3810     __ CallStub(&stub);
   3811   } else {
   3812     DCHECK(exponent_type.IsDouble());
   3813     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
   3814     __ CallStub(&stub);
   3815   }
   3816 }
   3817 
   3818 
   3819 void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
   3820   DoubleRegister input = ToDoubleRegister(instr->value());
   3821   DoubleRegister result = ToDoubleRegister(instr->result());
   3822   DoubleRegister scratch_d = double_scratch();
   3823 
   3824   DCHECK(!AreAliased(input, result, scratch_d));
   3825 
   3826   Label done;
   3827 
   3828   __ Frinta(result, input);
   3829   __ Fcmp(input, 0.0);
   3830   __ Fccmp(result, input, ZFlag, lt);
   3831   // The result is correct if the input was in [-0, +infinity], or was a
   3832   // negative integral value.
   3833   __ B(eq, &done);
   3834 
   3835   // Here the input is negative, non integral, with an exponent lower than 52.
   3836   // We do not have to worry about the 0.49999999999999994 (0x3fdfffffffffffff)
   3837   // case. So we can safely add 0.5.
   3838   __ Fmov(scratch_d, 0.5);
   3839   __ Fadd(result, input, scratch_d);
   3840   __ Frintm(result, result);
   3841   // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
   3842   __ Fabs(result, result);
   3843   __ Fneg(result, result);
   3844 
   3845   __ Bind(&done);
   3846 }
   3847 
   3848 
   3849 void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
   3850   DoubleRegister input = ToDoubleRegister(instr->value());
   3851   DoubleRegister temp = ToDoubleRegister(instr->temp1());
   3852   DoubleRegister dot_five = double_scratch();
   3853   Register result = ToRegister(instr->result());
   3854   Label done;
   3855 
   3856   // Math.round() rounds to the nearest integer, with ties going towards
   3857   // +infinity. This does not match any IEEE-754 rounding mode.
   3858   //  - Infinities and NaNs are propagated unchanged, but cause deopts because
   3859   //    they can't be represented as integers.
   3860   //  - The sign of the result is the same as the sign of the input. This means
   3861   //    that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
   3862   //    result of -0.0.
   3863 
   3864   // Add 0.5 and round towards -infinity.
   3865   __ Fmov(dot_five, 0.5);
   3866   __ Fadd(temp, input, dot_five);
   3867   __ Fcvtms(result, temp);
   3868 
   3869   // The result is correct if:
   3870   //  result is not 0, as the input could be NaN or [-0.5, -0.0].
   3871   //  result is not 1, as 0.499...94 will wrongly map to 1.
   3872   //  result fits in 32 bits.
   3873   __ Cmp(result, Operand(result.W(), SXTW));
   3874   __ Ccmp(result, 1, ZFlag, eq);
   3875   __ B(hi, &done);
   3876 
   3877   // At this point, we have to handle possible inputs of NaN or numbers in the
   3878   // range [-0.5, 1.5[, or numbers larger than 32 bits.
   3879 
   3880   // Deoptimize if the result > 1, as it must be larger than 32 bits.
   3881   __ Cmp(result, 1);
   3882   DeoptimizeIf(hi, instr, Deoptimizer::kOverflow);
   3883 
   3884   // Deoptimize for negative inputs, which at this point are only numbers in
   3885   // the range [-0.5, -0.0]
   3886   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3887     __ Fmov(result, input);
   3888     DeoptimizeIfNegative(result, instr, Deoptimizer::kMinusZero);
   3889   }
   3890 
   3891   // Deoptimize if the input was NaN.
   3892   __ Fcmp(input, dot_five);
   3893   DeoptimizeIf(vs, instr, Deoptimizer::kNaN);
   3894 
   3895   // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
   3896   // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
   3897   // else 0; we avoid dealing with 0.499...94 directly.
   3898   __ Cset(result, ge);
   3899   __ Bind(&done);
   3900 }
   3901 
   3902 
   3903 void LCodeGen::DoMathFround(LMathFround* instr) {
   3904   DoubleRegister input = ToDoubleRegister(instr->value());
   3905   DoubleRegister result = ToDoubleRegister(instr->result());
   3906   __ Fcvt(result.S(), input);
   3907   __ Fcvt(result, result.S());
   3908 }
   3909 
   3910 
   3911 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   3912   DoubleRegister input = ToDoubleRegister(instr->value());
   3913   DoubleRegister result = ToDoubleRegister(instr->result());
   3914   __ Fsqrt(result, input);
   3915 }
   3916 
   3917 
   3918 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
   3919   HMathMinMax::Operation op = instr->hydrogen()->operation();
   3920   if (instr->hydrogen()->representation().IsInteger32()) {
   3921     Register result = ToRegister32(instr->result());
   3922     Register left = ToRegister32(instr->left());
   3923     Operand right = ToOperand32(instr->right());
   3924 
   3925     __ Cmp(left, right);
   3926     __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
   3927   } else if (instr->hydrogen()->representation().IsSmi()) {
   3928     Register result = ToRegister(instr->result());
   3929     Register left = ToRegister(instr->left());
   3930     Operand right = ToOperand(instr->right());
   3931 
   3932     __ Cmp(left, right);
   3933     __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
   3934   } else {
   3935     DCHECK(instr->hydrogen()->representation().IsDouble());
   3936     DoubleRegister result = ToDoubleRegister(instr->result());
   3937     DoubleRegister left = ToDoubleRegister(instr->left());
   3938     DoubleRegister right = ToDoubleRegister(instr->right());
   3939 
   3940     if (op == HMathMinMax::kMathMax) {
   3941       __ Fmax(result, left, right);
   3942     } else {
   3943       DCHECK(op == HMathMinMax::kMathMin);
   3944       __ Fmin(result, left, right);
   3945     }
   3946   }
   3947 }
   3948 
   3949 
   3950 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
   3951   Register dividend = ToRegister32(instr->dividend());
   3952   int32_t divisor = instr->divisor();
   3953   DCHECK(dividend.is(ToRegister32(instr->result())));
   3954 
   3955   // Theoretically, a variation of the branch-free code for integer division by
   3956   // a power of 2 (calculating the remainder via an additional multiplication
   3957   // (which gets simplified to an 'and') and subtraction) should be faster, and
   3958   // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
   3959   // indicate that positive dividends are heavily favored, so the branching
   3960   // version performs better.
   3961   HMod* hmod = instr->hydrogen();
   3962   int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
   3963   Label dividend_is_not_negative, done;
   3964   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
   3965     __ Tbz(dividend, kWSignBit, &dividend_is_not_negative);
   3966     // Note that this is correct even for kMinInt operands.
   3967     __ Neg(dividend, dividend);
   3968     __ And(dividend, dividend, mask);
   3969     __ Negs(dividend, dividend);
   3970     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3971       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
   3972     }
   3973     __ B(&done);
   3974   }
   3975 
   3976   __ bind(&dividend_is_not_negative);
   3977   __ And(dividend, dividend, mask);
   3978   __ bind(&done);
   3979 }
   3980 
   3981 
   3982 void LCodeGen::DoModByConstI(LModByConstI* instr) {
   3983   Register dividend = ToRegister32(instr->dividend());
   3984   int32_t divisor = instr->divisor();
   3985   Register result = ToRegister32(instr->result());
   3986   Register temp = ToRegister32(instr->temp());
   3987   DCHECK(!AreAliased(dividend, result, temp));
   3988 
   3989   if (divisor == 0) {
   3990     Deoptimize(instr, Deoptimizer::kDivisionByZero);
   3991     return;
   3992   }
   3993 
   3994   __ TruncatingDiv(result, dividend, Abs(divisor));
   3995   __ Sxtw(dividend.X(), dividend);
   3996   __ Mov(temp, Abs(divisor));
   3997   __ Smsubl(result.X(), result, temp, dividend.X());
   3998 
   3999   // Check for negative zero.
   4000   HMod* hmod = instr->hydrogen();
   4001   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4002     Label remainder_not_zero;
   4003     __ Cbnz(result, &remainder_not_zero);
   4004     DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
   4005     __ bind(&remainder_not_zero);
   4006   }
   4007 }
   4008 
   4009 
   4010 void LCodeGen::DoModI(LModI* instr) {
   4011   Register dividend = ToRegister32(instr->left());
   4012   Register divisor = ToRegister32(instr->right());
   4013   Register result = ToRegister32(instr->result());
   4014 
   4015   Label done;
   4016   // modulo = dividend - quotient * divisor
   4017   __ Sdiv(result, dividend, divisor);
   4018   if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
   4019     DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
   4020   }
   4021   __ Msub(result, result, divisor, dividend);
   4022   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4023     __ Cbnz(result, &done);
   4024     DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
   4025   }
   4026   __ Bind(&done);
   4027 }
   4028 
   4029 
   4030 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
   4031   DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32());
   4032   bool is_smi = instr->hydrogen()->representation().IsSmi();
   4033   Register result =
   4034       is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
   4035   Register left =
   4036       is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left());
   4037   int32_t right = ToInteger32(instr->right());
   4038   DCHECK((right > -kMaxInt) && (right < kMaxInt));
   4039 
   4040   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   4041   bool bailout_on_minus_zero =
   4042     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
   4043 
   4044   if (bailout_on_minus_zero) {
   4045     if (right < 0) {
   4046       // The result is -0 if right is negative and left is zero.
   4047       DeoptimizeIfZero(left, instr, Deoptimizer::kMinusZero);
   4048     } else if (right == 0) {
   4049       // The result is -0 if the right is zero and the left is negative.
   4050       DeoptimizeIfNegative(left, instr, Deoptimizer::kMinusZero);
   4051     }
   4052   }
   4053 
   4054   switch (right) {
   4055     // Cases which can detect overflow.
   4056     case -1:
   4057       if (can_overflow) {
   4058         // Only 0x80000000 can overflow here.
   4059         __ Negs(result, left);
   4060         DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
   4061       } else {
   4062         __ Neg(result, left);
   4063       }
   4064       break;
   4065     case 0:
   4066       // This case can never overflow.
   4067       __ Mov(result, 0);
   4068       break;
   4069     case 1:
   4070       // This case can never overflow.
   4071       __ Mov(result, left, kDiscardForSameWReg);
   4072       break;
   4073     case 2:
   4074       if (can_overflow) {
   4075         __ Adds(result, left, left);
   4076         DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
   4077       } else {
   4078         __ Add(result, left, left);
   4079       }
   4080       break;
   4081 
   4082     default:
   4083       // Multiplication by constant powers of two (and some related values)
   4084       // can be done efficiently with shifted operands.
   4085       int32_t right_abs = Abs(right);
   4086 
   4087       if (base::bits::IsPowerOfTwo32(right_abs)) {
   4088         int right_log2 = WhichPowerOf2(right_abs);
   4089 
   4090         if (can_overflow) {
   4091           Register scratch = result;
   4092           DCHECK(!AreAliased(scratch, left));
   4093           __ Cls(scratch, left);
   4094           __ Cmp(scratch, right_log2);
   4095           DeoptimizeIf(lt, instr, Deoptimizer::kOverflow);
   4096         }
   4097 
   4098         if (right >= 0) {
   4099           // result = left << log2(right)
   4100           __ Lsl(result, left, right_log2);
   4101         } else {
   4102           // result = -left << log2(-right)
   4103           if (can_overflow) {
   4104             __ Negs(result, Operand(left, LSL, right_log2));
   4105             DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
   4106           } else {
   4107             __ Neg(result, Operand(left, LSL, right_log2));
   4108           }
   4109         }
   4110         return;
   4111       }
   4112 
   4113 
   4114       // For the following cases, we could perform a conservative overflow check
   4115       // with CLS as above. However the few cycles saved are likely not worth
   4116       // the risk of deoptimizing more often than required.
   4117       DCHECK(!can_overflow);
   4118 
   4119       if (right >= 0) {
   4120         if (base::bits::IsPowerOfTwo32(right - 1)) {
   4121           // result = left + left << log2(right - 1)
   4122           __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
   4123         } else if (base::bits::IsPowerOfTwo32(right + 1)) {
   4124           // result = -left + left << log2(right + 1)
   4125           __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
   4126           __ Neg(result, result);
   4127         } else {
   4128           UNREACHABLE();
   4129         }
   4130       } else {
   4131         if (base::bits::IsPowerOfTwo32(-right + 1)) {
   4132           // result = left - left << log2(-right + 1)
   4133           __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
   4134         } else if (base::bits::IsPowerOfTwo32(-right - 1)) {
   4135           // result = -left - left << log2(-right - 1)
   4136           __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
   4137           __ Neg(result, result);
   4138         } else {
   4139           UNREACHABLE();
   4140         }
   4141       }
   4142   }
   4143 }
   4144 
   4145 
   4146 void LCodeGen::DoMulI(LMulI* instr) {
   4147   Register result = ToRegister32(instr->result());
   4148   Register left = ToRegister32(instr->left());
   4149   Register right = ToRegister32(instr->right());
   4150 
   4151   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   4152   bool bailout_on_minus_zero =
   4153     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
   4154 
   4155   if (bailout_on_minus_zero && !left.Is(right)) {
   4156     // If one operand is zero and the other is negative, the result is -0.
   4157     //  - Set Z (eq) if either left or right, or both, are 0.
   4158     __ Cmp(left, 0);
   4159     __ Ccmp(right, 0, ZFlag, ne);
   4160     //  - If so (eq), set N (mi) if left + right is negative.
   4161     //  - Otherwise, clear N.
   4162     __ Ccmn(left, right, NoFlag, eq);
   4163     DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
   4164   }
   4165 
   4166   if (can_overflow) {
   4167     __ Smull(result.X(), left, right);
   4168     __ Cmp(result.X(), Operand(result, SXTW));
   4169     DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
   4170   } else {
   4171     __ Mul(result, left, right);
   4172   }
   4173 }
   4174 
   4175 
   4176 void LCodeGen::DoMulS(LMulS* instr) {
   4177   Register result = ToRegister(instr->result());
   4178   Register left = ToRegister(instr->left());
   4179   Register right = ToRegister(instr->right());
   4180 
   4181   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   4182   bool bailout_on_minus_zero =
   4183     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
   4184 
   4185   if (bailout_on_minus_zero && !left.Is(right)) {
   4186     // If one operand is zero and the other is negative, the result is -0.
   4187     //  - Set Z (eq) if either left or right, or both, are 0.
   4188     __ Cmp(left, 0);
   4189     __ Ccmp(right, 0, ZFlag, ne);
   4190     //  - If so (eq), set N (mi) if left + right is negative.
   4191     //  - Otherwise, clear N.
   4192     __ Ccmn(left, right, NoFlag, eq);
   4193     DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
   4194   }
   4195 
   4196   STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
   4197   if (can_overflow) {
   4198     __ Smulh(result, left, right);
   4199     __ Cmp(result, Operand(result.W(), SXTW));
   4200     __ SmiTag(result);
   4201     DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
   4202   } else {
   4203     if (AreAliased(result, left, right)) {
   4204       // All three registers are the same: half untag the input and then
   4205       // multiply, giving a tagged result.
   4206       STATIC_ASSERT((kSmiShift % 2) == 0);
   4207       __ Asr(result, left, kSmiShift / 2);
   4208       __ Mul(result, result, result);
   4209     } else if (result.Is(left) && !left.Is(right)) {
   4210       // Registers result and left alias, right is distinct: untag left into
   4211       // result, and then multiply by right, giving a tagged result.
   4212       __ SmiUntag(result, left);
   4213       __ Mul(result, result, right);
   4214     } else {
   4215       DCHECK(!left.Is(result));
   4216       // Registers result and right alias, left is distinct, or all registers
   4217       // are distinct: untag right into result, and then multiply by left,
   4218       // giving a tagged result.
   4219       __ SmiUntag(result, right);
   4220       __ Mul(result, left, result);
   4221     }
   4222   }
   4223 }
   4224 
   4225 
   4226 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
   4227   // TODO(3095996): Get rid of this. For now, we need to make the
   4228   // result register contain a valid pointer because it is already
   4229   // contained in the register pointer map.
   4230   Register result = ToRegister(instr->result());
   4231   __ Mov(result, 0);
   4232 
   4233   PushSafepointRegistersScope scope(this);
   4234   // NumberTagU and NumberTagD use the context from the frame, rather than
   4235   // the environment's HContext or HInlinedContext value.
   4236   // They only call Runtime::kAllocateHeapNumber.
   4237   // The corresponding HChange instructions are added in a phase that does
   4238   // not have easy access to the local context.
   4239   __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   4240   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4241   RecordSafepointWithRegisters(
   4242       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4243   __ StoreToSafepointRegisterSlot(x0, result);
   4244 }
   4245 
   4246 
   4247 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
   4248   class DeferredNumberTagD: public LDeferredCode {
   4249    public:
   4250     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
   4251         : LDeferredCode(codegen), instr_(instr) { }
   4252     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
   4253     virtual LInstruction* instr() { return instr_; }
   4254    private:
   4255     LNumberTagD* instr_;
   4256   };
   4257 
   4258   DoubleRegister input = ToDoubleRegister(instr->value());
   4259   Register result = ToRegister(instr->result());
   4260   Register temp1 = ToRegister(instr->temp1());
   4261   Register temp2 = ToRegister(instr->temp2());
   4262 
   4263   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
   4264   if (FLAG_inline_new) {
   4265     __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
   4266   } else {
   4267     __ B(deferred->entry());
   4268   }
   4269 
   4270   __ Bind(deferred->exit());
   4271   __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
   4272 }
   4273 
   4274 
   4275 void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
   4276                                     LOperand* value,
   4277                                     LOperand* temp1,
   4278                                     LOperand* temp2) {
   4279   Label slow, convert_and_store;
   4280   Register src = ToRegister32(value);
   4281   Register dst = ToRegister(instr->result());
   4282   Register scratch1 = ToRegister(temp1);
   4283 
   4284   if (FLAG_inline_new) {
   4285     Register scratch2 = ToRegister(temp2);
   4286     __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
   4287     __ B(&convert_and_store);
   4288   }
   4289 
   4290   // Slow case: call the runtime system to do the number allocation.
   4291   __ Bind(&slow);
   4292   // TODO(3095996): Put a valid pointer value in the stack slot where the result
   4293   // register is stored, as this register is in the pointer map, but contains an
   4294   // integer value.
   4295   __ Mov(dst, 0);
   4296   {
   4297     // Preserve the value of all registers.
   4298     PushSafepointRegistersScope scope(this);
   4299 
   4300     // NumberTagU and NumberTagD use the context from the frame, rather than
   4301     // the environment's HContext or HInlinedContext value.
   4302     // They only call Runtime::kAllocateHeapNumber.
   4303     // The corresponding HChange instructions are added in a phase that does
   4304     // not have easy access to the local context.
   4305     __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   4306     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4307     RecordSafepointWithRegisters(
   4308       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4309     __ StoreToSafepointRegisterSlot(x0, dst);
   4310   }
   4311 
   4312   // Convert number to floating point and store in the newly allocated heap
   4313   // number.
   4314   __ Bind(&convert_and_store);
   4315   DoubleRegister dbl_scratch = double_scratch();
   4316   __ Ucvtf(dbl_scratch, src);
   4317   __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
   4318 }
   4319 
   4320 
   4321 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
   4322   class DeferredNumberTagU: public LDeferredCode {
   4323    public:
   4324     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
   4325         : LDeferredCode(codegen), instr_(instr) { }
   4326     virtual void Generate() {
   4327       codegen()->DoDeferredNumberTagU(instr_,
   4328                                       instr_->value(),
   4329                                       instr_->temp1(),
   4330                                       instr_->temp2());
   4331     }
   4332     virtual LInstruction* instr() { return instr_; }
   4333    private:
   4334     LNumberTagU* instr_;
   4335   };
   4336 
   4337   Register value = ToRegister32(instr->value());
   4338   Register result = ToRegister(instr->result());
   4339 
   4340   DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
   4341   __ Cmp(value, Smi::kMaxValue);
   4342   __ B(hi, deferred->entry());
   4343   __ SmiTag(result, value.X());
   4344   __ Bind(deferred->exit());
   4345 }
   4346 
   4347 
   4348 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   4349   Register input = ToRegister(instr->value());
   4350   Register scratch = ToRegister(instr->temp());
   4351   DoubleRegister result = ToDoubleRegister(instr->result());
   4352   bool can_convert_undefined_to_nan =
   4353       instr->hydrogen()->can_convert_undefined_to_nan();
   4354 
   4355   Label done, load_smi;
   4356 
   4357   // Work out what untag mode we're working with.
   4358   HValue* value = instr->hydrogen()->value();
   4359   NumberUntagDMode mode = value->representation().IsSmi()
   4360       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
   4361 
   4362   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
   4363     __ JumpIfSmi(input, &load_smi);
   4364 
   4365     Label convert_undefined;
   4366 
   4367     // Heap number map check.
   4368     if (can_convert_undefined_to_nan) {
   4369       __ JumpIfNotHeapNumber(input, &convert_undefined);
   4370     } else {
   4371       DeoptimizeIfNotHeapNumber(input, instr);
   4372     }
   4373 
   4374     // Load heap number.
   4375     __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
   4376     if (instr->hydrogen()->deoptimize_on_minus_zero()) {
   4377       DeoptimizeIfMinusZero(result, instr, Deoptimizer::kMinusZero);
   4378     }
   4379     __ B(&done);
   4380 
   4381     if (can_convert_undefined_to_nan) {
   4382       __ Bind(&convert_undefined);
   4383       DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
   4384                           Deoptimizer::kNotAHeapNumberUndefined);
   4385 
   4386       __ LoadRoot(scratch, Heap::kNanValueRootIndex);
   4387       __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
   4388       __ B(&done);
   4389     }
   4390 
   4391   } else {
   4392     DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
   4393     // Fall through to load_smi.
   4394   }
   4395 
   4396   // Smi to double register conversion.
   4397   __ Bind(&load_smi);
   4398   __ SmiUntagToDouble(result, input);
   4399 
   4400   __ Bind(&done);
   4401 }
   4402 
   4403 
   4404 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
   4405   // This is a pseudo-instruction that ensures that the environment here is
   4406   // properly registered for deoptimization and records the assembler's PC
   4407   // offset.
   4408   LEnvironment* environment = instr->environment();
   4409 
   4410   // If the environment were already registered, we would have no way of
   4411   // backpatching it with the spill slot operands.
   4412   DCHECK(!environment->HasBeenRegistered());
   4413   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   4414 
   4415   GenerateOsrPrologue();
   4416 }
   4417 
   4418 
   4419 void LCodeGen::DoParameter(LParameter* instr) {
   4420   // Nothing to do.
   4421 }
   4422 
   4423 
   4424 void LCodeGen::DoPreparePushArguments(LPreparePushArguments* instr) {
   4425   __ PushPreamble(instr->argc(), kPointerSize);
   4426 }
   4427 
   4428 
   4429 void LCodeGen::DoPushArguments(LPushArguments* instr) {
   4430   MacroAssembler::PushPopQueue args(masm());
   4431 
   4432   for (int i = 0; i < instr->ArgumentCount(); ++i) {
   4433     LOperand* arg = instr->argument(i);
   4434     if (arg->IsDoubleRegister() || arg->IsDoubleStackSlot()) {
   4435       Abort(kDoPushArgumentNotImplementedForDoubleType);
   4436       return;
   4437     }
   4438     args.Queue(ToRegister(arg));
   4439   }
   4440 
   4441   // The preamble was done by LPreparePushArguments.
   4442   args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE);
   4443 
   4444   RecordPushedArgumentsDelta(instr->ArgumentCount());
   4445 }
   4446 
   4447 
   4448 void LCodeGen::DoReturn(LReturn* instr) {
   4449   if (FLAG_trace && info()->IsOptimizing()) {
   4450     // Push the return value on the stack as the parameter.
   4451     // Runtime::TraceExit returns its parameter in x0.  We're leaving the code
   4452     // managed by the register allocator and tearing down the frame, it's
   4453     // safe to write to the context register.
   4454     __ Push(x0);
   4455     __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   4456     __ CallRuntime(Runtime::kTraceExit);
   4457   }
   4458 
   4459   if (info()->saves_caller_doubles()) {
   4460     RestoreCallerDoubles();
   4461   }
   4462 
   4463   if (NeedsEagerFrame()) {
   4464     Register stack_pointer = masm()->StackPointer();
   4465     __ Mov(stack_pointer, fp);
   4466     __ Pop(fp, lr);
   4467   }
   4468 
   4469   if (instr->has_constant_parameter_count()) {
   4470     int parameter_count = ToInteger32(instr->constant_parameter_count());
   4471     __ Drop(parameter_count + 1);
   4472   } else {
   4473     DCHECK(info()->IsStub());  // Functions would need to drop one more value.
   4474     Register parameter_count = ToRegister(instr->parameter_count());
   4475     __ DropBySMI(parameter_count);
   4476   }
   4477   __ Ret();
   4478 }
   4479 
   4480 
   4481 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
   4482                                            Register temp,
   4483                                            LOperand* index,
   4484                                            String::Encoding encoding) {
   4485   if (index->IsConstantOperand()) {
   4486     int offset = ToInteger32(LConstantOperand::cast(index));
   4487     if (encoding == String::TWO_BYTE_ENCODING) {
   4488       offset *= kUC16Size;
   4489     }
   4490     STATIC_ASSERT(kCharSize == 1);
   4491     return FieldMemOperand(string, SeqString::kHeaderSize + offset);
   4492   }
   4493 
   4494   __ Add(temp, string, SeqString::kHeaderSize - kHeapObjectTag);
   4495   if (encoding == String::ONE_BYTE_ENCODING) {
   4496     return MemOperand(temp, ToRegister32(index), SXTW);
   4497   } else {
   4498     STATIC_ASSERT(kUC16Size == 2);
   4499     return MemOperand(temp, ToRegister32(index), SXTW, 1);
   4500   }
   4501 }
   4502 
   4503 
   4504 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
   4505   String::Encoding encoding = instr->hydrogen()->encoding();
   4506   Register string = ToRegister(instr->string());
   4507   Register result = ToRegister(instr->result());
   4508   Register temp = ToRegister(instr->temp());
   4509 
   4510   if (FLAG_debug_code) {
   4511     // Even though this lithium instruction comes with a temp register, we
   4512     // can't use it here because we want to use "AtStart" constraints on the
   4513     // inputs and the debug code here needs a scratch register.
   4514     UseScratchRegisterScope temps(masm());
   4515     Register dbg_temp = temps.AcquireX();
   4516 
   4517     __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset));
   4518     __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset));
   4519 
   4520     __ And(dbg_temp, dbg_temp,
   4521            Operand(kStringRepresentationMask | kStringEncodingMask));
   4522     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   4523     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   4524     __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING
   4525                              ? one_byte_seq_type : two_byte_seq_type));
   4526     __ Check(eq, kUnexpectedStringType);
   4527   }
   4528 
   4529   MemOperand operand =
   4530       BuildSeqStringOperand(string, temp, instr->index(), encoding);
   4531   if (encoding == String::ONE_BYTE_ENCODING) {
   4532     __ Ldrb(result, operand);
   4533   } else {
   4534     __ Ldrh(result, operand);
   4535   }
   4536 }
   4537 
   4538 
   4539 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
   4540   String::Encoding encoding = instr->hydrogen()->encoding();
   4541   Register string = ToRegister(instr->string());
   4542   Register value = ToRegister(instr->value());
   4543   Register temp = ToRegister(instr->temp());
   4544 
   4545   if (FLAG_debug_code) {
   4546     DCHECK(ToRegister(instr->context()).is(cp));
   4547     Register index = ToRegister(instr->index());
   4548     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   4549     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   4550     int encoding_mask =
   4551         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
   4552         ? one_byte_seq_type : two_byte_seq_type;
   4553     __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
   4554                                  encoding_mask);
   4555   }
   4556   MemOperand operand =
   4557       BuildSeqStringOperand(string, temp, instr->index(), encoding);
   4558   if (encoding == String::ONE_BYTE_ENCODING) {
   4559     __ Strb(value, operand);
   4560   } else {
   4561     __ Strh(value, operand);
   4562   }
   4563 }
   4564 
   4565 
   4566 void LCodeGen::DoSmiTag(LSmiTag* instr) {
   4567   HChange* hchange = instr->hydrogen();
   4568   Register input = ToRegister(instr->value());
   4569   Register output = ToRegister(instr->result());
   4570   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4571       hchange->value()->CheckFlag(HValue::kUint32)) {
   4572     DeoptimizeIfNegative(input.W(), instr, Deoptimizer::kOverflow);
   4573   }
   4574   __ SmiTag(output, input);
   4575 }
   4576 
   4577 
   4578 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
   4579   Register input = ToRegister(instr->value());
   4580   Register result = ToRegister(instr->result());
   4581   Label done, untag;
   4582 
   4583   if (instr->needs_check()) {
   4584     DeoptimizeIfNotSmi(input, instr, Deoptimizer::kNotASmi);
   4585   }
   4586 
   4587   __ Bind(&untag);
   4588   __ SmiUntag(result, input);
   4589   __ Bind(&done);
   4590 }
   4591 
   4592 
   4593 void LCodeGen::DoShiftI(LShiftI* instr) {
   4594   LOperand* right_op = instr->right();
   4595   Register left = ToRegister32(instr->left());
   4596   Register result = ToRegister32(instr->result());
   4597 
   4598   if (right_op->IsRegister()) {
   4599     Register right = ToRegister32(instr->right());
   4600     switch (instr->op()) {
   4601       case Token::ROR: __ Ror(result, left, right); break;
   4602       case Token::SAR: __ Asr(result, left, right); break;
   4603       case Token::SHL: __ Lsl(result, left, right); break;
   4604       case Token::SHR:
   4605         __ Lsr(result, left, right);
   4606         if (instr->can_deopt()) {
   4607           // If `left >>> right` >= 0x80000000, the result is not representable
   4608           // in a signed 32-bit smi.
   4609           DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
   4610         }
   4611         break;
   4612       default: UNREACHABLE();
   4613     }
   4614   } else {
   4615     DCHECK(right_op->IsConstantOperand());
   4616     int shift_count = JSShiftAmountFromLConstant(right_op);
   4617     if (shift_count == 0) {
   4618       if ((instr->op() == Token::SHR) && instr->can_deopt()) {
   4619         DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
   4620       }
   4621       __ Mov(result, left, kDiscardForSameWReg);
   4622     } else {
   4623       switch (instr->op()) {
   4624         case Token::ROR: __ Ror(result, left, shift_count); break;
   4625         case Token::SAR: __ Asr(result, left, shift_count); break;
   4626         case Token::SHL: __ Lsl(result, left, shift_count); break;
   4627         case Token::SHR: __ Lsr(result, left, shift_count); break;
   4628         default: UNREACHABLE();
   4629       }
   4630     }
   4631   }
   4632 }
   4633 
   4634 
   4635 void LCodeGen::DoShiftS(LShiftS* instr) {
   4636   LOperand* right_op = instr->right();
   4637   Register left = ToRegister(instr->left());
   4638   Register result = ToRegister(instr->result());
   4639 
   4640   if (right_op->IsRegister()) {
   4641     Register right = ToRegister(instr->right());
   4642 
   4643     // JavaScript shifts only look at the bottom 5 bits of the 'right' operand.
   4644     // Since we're handling smis in X registers, we have to extract these bits
   4645     // explicitly.
   4646     __ Ubfx(result, right, kSmiShift, 5);
   4647 
   4648     switch (instr->op()) {
   4649       case Token::ROR: {
   4650         // This is the only case that needs a scratch register. To keep things
   4651         // simple for the other cases, borrow a MacroAssembler scratch register.
   4652         UseScratchRegisterScope temps(masm());
   4653         Register temp = temps.AcquireW();
   4654         __ SmiUntag(temp, left);
   4655         __ Ror(result.W(), temp.W(), result.W());
   4656         __ SmiTag(result);
   4657         break;
   4658       }
   4659       case Token::SAR:
   4660         __ Asr(result, left, result);
   4661         __ Bic(result, result, kSmiShiftMask);
   4662         break;
   4663       case Token::SHL:
   4664         __ Lsl(result, left, result);
   4665         break;
   4666       case Token::SHR:
   4667         __ Lsr(result, left, result);
   4668         __ Bic(result, result, kSmiShiftMask);
   4669         if (instr->can_deopt()) {
   4670           // If `left >>> right` >= 0x80000000, the result is not representable
   4671           // in a signed 32-bit smi.
   4672           DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
   4673         }
   4674         break;
   4675       default: UNREACHABLE();
   4676     }
   4677   } else {
   4678     DCHECK(right_op->IsConstantOperand());
   4679     int shift_count = JSShiftAmountFromLConstant(right_op);
   4680     if (shift_count == 0) {
   4681       if ((instr->op() == Token::SHR) && instr->can_deopt()) {
   4682         DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
   4683       }
   4684       __ Mov(result, left);
   4685     } else {
   4686       switch (instr->op()) {
   4687         case Token::ROR:
   4688           __ SmiUntag(result, left);
   4689           __ Ror(result.W(), result.W(), shift_count);
   4690           __ SmiTag(result);
   4691           break;
   4692         case Token::SAR:
   4693           __ Asr(result, left, shift_count);
   4694           __ Bic(result, result, kSmiShiftMask);
   4695           break;
   4696         case Token::SHL:
   4697           __ Lsl(result, left, shift_count);
   4698           break;
   4699         case Token::SHR:
   4700           __ Lsr(result, left, shift_count);
   4701           __ Bic(result, result, kSmiShiftMask);
   4702           break;
   4703         default: UNREACHABLE();
   4704       }
   4705     }
   4706   }
   4707 }
   4708 
   4709 
   4710 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
   4711   __ Debug("LDebugBreak", 0, BREAK);
   4712 }
   4713 
   4714 
   4715 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   4716   DCHECK(ToRegister(instr->context()).is(cp));
   4717   Register scratch1 = x5;
   4718   Register scratch2 = x6;
   4719   DCHECK(instr->IsMarkedAsCall());
   4720 
   4721   // TODO(all): if Mov could handle object in new space then it could be used
   4722   // here.
   4723   __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
   4724   __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
   4725   __ Push(scratch1, scratch2);
   4726   CallRuntime(Runtime::kDeclareGlobals, instr);
   4727 }
   4728 
   4729 
   4730 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   4731   PushSafepointRegistersScope scope(this);
   4732   LoadContextFromDeferred(instr->context());
   4733   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   4734   RecordSafepointWithLazyDeopt(
   4735       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   4736   DCHECK(instr->HasEnvironment());
   4737   LEnvironment* env = instr->environment();
   4738   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   4739 }
   4740 
   4741 
   4742 void LCodeGen::DoStackCheck(LStackCheck* instr) {
   4743   class DeferredStackCheck: public LDeferredCode {
   4744    public:
   4745     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
   4746         : LDeferredCode(codegen), instr_(instr) { }
   4747     virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
   4748     virtual LInstruction* instr() { return instr_; }
   4749    private:
   4750     LStackCheck* instr_;
   4751   };
   4752 
   4753   DCHECK(instr->HasEnvironment());
   4754   LEnvironment* env = instr->environment();
   4755   // There is no LLazyBailout instruction for stack-checks. We have to
   4756   // prepare for lazy deoptimization explicitly here.
   4757   if (instr->hydrogen()->is_function_entry()) {
   4758     // Perform stack overflow check.
   4759     Label done;
   4760     __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
   4761     __ B(hs, &done);
   4762 
   4763     PredictableCodeSizeScope predictable(masm_,
   4764                                          Assembler::kCallSizeWithRelocation);
   4765     DCHECK(instr->context()->IsRegister());
   4766     DCHECK(ToRegister(instr->context()).is(cp));
   4767     CallCode(isolate()->builtins()->StackCheck(),
   4768              RelocInfo::CODE_TARGET,
   4769              instr);
   4770     __ Bind(&done);
   4771   } else {
   4772     DCHECK(instr->hydrogen()->is_backwards_branch());
   4773     // Perform stack overflow check if this goto needs it before jumping.
   4774     DeferredStackCheck* deferred_stack_check =
   4775         new(zone()) DeferredStackCheck(this, instr);
   4776     __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
   4777     __ B(lo, deferred_stack_check->entry());
   4778 
   4779     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   4780     __ Bind(instr->done_label());
   4781     deferred_stack_check->SetExit(instr->done_label());
   4782     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   4783     // Don't record a deoptimization index for the safepoint here.
   4784     // This will be done explicitly when emitting call and the safepoint in
   4785     // the deferred code.
   4786   }
   4787 }
   4788 
   4789 
   4790 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
   4791   Register function = ToRegister(instr->function());
   4792   Register code_object = ToRegister(instr->code_object());
   4793   Register temp = ToRegister(instr->temp());
   4794   __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
   4795   __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
   4796 }
   4797 
   4798 
   4799 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   4800   Register context = ToRegister(instr->context());
   4801   Register value = ToRegister(instr->value());
   4802   Register scratch = ToRegister(instr->temp());
   4803   MemOperand target = ContextMemOperand(context, instr->slot_index());
   4804 
   4805   Label skip_assignment;
   4806 
   4807   if (instr->hydrogen()->RequiresHoleCheck()) {
   4808     __ Ldr(scratch, target);
   4809     if (instr->hydrogen()->DeoptimizesOnHole()) {
   4810       DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr,
   4811                        Deoptimizer::kHole);
   4812     } else {
   4813       __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
   4814     }
   4815   }
   4816 
   4817   __ Str(value, target);
   4818   if (instr->hydrogen()->NeedsWriteBarrier()) {
   4819     SmiCheck check_needed =
   4820         instr->hydrogen()->value()->type().IsHeapObject()
   4821             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   4822     __ RecordWriteContextSlot(context, static_cast<int>(target.offset()), value,
   4823                               scratch, GetLinkRegisterState(), kSaveFPRegs,
   4824                               EMIT_REMEMBERED_SET, check_needed);
   4825   }
   4826   __ Bind(&skip_assignment);
   4827 }
   4828 
   4829 
   4830 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
   4831   Register ext_ptr = ToRegister(instr->elements());
   4832   Register key = no_reg;
   4833   Register scratch;
   4834   ElementsKind elements_kind = instr->elements_kind();
   4835 
   4836   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
   4837   bool key_is_constant = instr->key()->IsConstantOperand();
   4838   int constant_key = 0;
   4839   if (key_is_constant) {
   4840     DCHECK(instr->temp() == NULL);
   4841     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   4842     if (constant_key & 0xf0000000) {
   4843       Abort(kArrayIndexConstantValueTooBig);
   4844     }
   4845   } else {
   4846     key = ToRegister(instr->key());
   4847     scratch = ToRegister(instr->temp());
   4848   }
   4849 
   4850   MemOperand dst =
   4851     PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
   4852                                      key_is_constant, constant_key,
   4853                                      elements_kind,
   4854                                      instr->base_offset());
   4855 
   4856   if (elements_kind == FLOAT32_ELEMENTS) {
   4857     DoubleRegister value = ToDoubleRegister(instr->value());
   4858     DoubleRegister dbl_scratch = double_scratch();
   4859     __ Fcvt(dbl_scratch.S(), value);
   4860     __ Str(dbl_scratch.S(), dst);
   4861   } else if (elements_kind == FLOAT64_ELEMENTS) {
   4862     DoubleRegister value = ToDoubleRegister(instr->value());
   4863     __ Str(value, dst);
   4864   } else {
   4865     Register value = ToRegister(instr->value());
   4866 
   4867     switch (elements_kind) {
   4868       case UINT8_ELEMENTS:
   4869       case UINT8_CLAMPED_ELEMENTS:
   4870       case INT8_ELEMENTS:
   4871         __ Strb(value, dst);
   4872         break;
   4873       case INT16_ELEMENTS:
   4874       case UINT16_ELEMENTS:
   4875         __ Strh(value, dst);
   4876         break;
   4877       case INT32_ELEMENTS:
   4878       case UINT32_ELEMENTS:
   4879         __ Str(value.W(), dst);
   4880         break;
   4881       case FLOAT32_ELEMENTS:
   4882       case FLOAT64_ELEMENTS:
   4883       case FAST_DOUBLE_ELEMENTS:
   4884       case FAST_ELEMENTS:
   4885       case FAST_SMI_ELEMENTS:
   4886       case FAST_HOLEY_DOUBLE_ELEMENTS:
   4887       case FAST_HOLEY_ELEMENTS:
   4888       case FAST_HOLEY_SMI_ELEMENTS:
   4889       case DICTIONARY_ELEMENTS:
   4890       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
   4891       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
   4892         UNREACHABLE();
   4893         break;
   4894     }
   4895   }
   4896 }
   4897 
   4898 
   4899 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
   4900   Register elements = ToRegister(instr->elements());
   4901   DoubleRegister value = ToDoubleRegister(instr->value());
   4902   MemOperand mem_op;
   4903 
   4904   if (instr->key()->IsConstantOperand()) {
   4905     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   4906     if (constant_key & 0xf0000000) {
   4907       Abort(kArrayIndexConstantValueTooBig);
   4908     }
   4909     int offset = instr->base_offset() + constant_key * kDoubleSize;
   4910     mem_op = MemOperand(elements, offset);
   4911   } else {
   4912     Register store_base = ToRegister(instr->temp());
   4913     Register key = ToRegister(instr->key());
   4914     bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
   4915     mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
   4916                                       instr->hydrogen()->elements_kind(),
   4917                                       instr->hydrogen()->representation(),
   4918                                       instr->base_offset());
   4919   }
   4920 
   4921   if (instr->NeedsCanonicalization()) {
   4922     __ CanonicalizeNaN(double_scratch(), value);
   4923     __ Str(double_scratch(), mem_op);
   4924   } else {
   4925     __ Str(value, mem_op);
   4926   }
   4927 }
   4928 
   4929 
   4930 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
   4931   Register value = ToRegister(instr->value());
   4932   Register elements = ToRegister(instr->elements());
   4933   Register scratch = no_reg;
   4934   Register store_base = no_reg;
   4935   Register key = no_reg;
   4936   MemOperand mem_op;
   4937 
   4938   if (!instr->key()->IsConstantOperand() ||
   4939       instr->hydrogen()->NeedsWriteBarrier()) {
   4940     scratch = ToRegister(instr->temp());
   4941   }
   4942 
   4943   Representation representation = instr->hydrogen()->value()->representation();
   4944   if (instr->key()->IsConstantOperand()) {
   4945     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   4946     int offset = instr->base_offset() +
   4947         ToInteger32(const_operand) * kPointerSize;
   4948     store_base = elements;
   4949     if (representation.IsInteger32()) {
   4950       DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
   4951       DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
   4952       STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
   4953       STATIC_ASSERT(kSmiTag == 0);
   4954       mem_op = UntagSmiMemOperand(store_base, offset);
   4955     } else {
   4956       mem_op = MemOperand(store_base, offset);
   4957     }
   4958   } else {
   4959     store_base = scratch;
   4960     key = ToRegister(instr->key());
   4961     bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
   4962 
   4963     mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
   4964                                       instr->hydrogen()->elements_kind(),
   4965                                       representation, instr->base_offset());
   4966   }
   4967 
   4968   __ Store(value, mem_op, representation);
   4969 
   4970   if (instr->hydrogen()->NeedsWriteBarrier()) {
   4971     DCHECK(representation.IsTagged());
   4972     // This assignment may cause element_addr to alias store_base.
   4973     Register element_addr = scratch;
   4974     SmiCheck check_needed =
   4975         instr->hydrogen()->value()->type().IsHeapObject()
   4976             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   4977     // Compute address of modified element and store it into key register.
   4978     __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand());
   4979     __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
   4980                    kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed,
   4981                    instr->hydrogen()->PointersToHereCheckForValue());
   4982   }
   4983 }
   4984 
   4985 
   4986 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
   4987   DCHECK(ToRegister(instr->context()).is(cp));
   4988   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
   4989   DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
   4990   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
   4991 
   4992   if (instr->hydrogen()->HasVectorAndSlot()) {
   4993     EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
   4994   }
   4995 
   4996   Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
   4997                         isolate(), instr->language_mode(),
   4998                         instr->hydrogen()->initialization_state()).code();
   4999   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   5000 }
   5001 
   5002 
   5003 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
   5004   class DeferredMaybeGrowElements final : public LDeferredCode {
   5005    public:
   5006     DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
   5007         : LDeferredCode(codegen), instr_(instr) {}
   5008     void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
   5009     LInstruction* instr() override { return instr_; }
   5010 
   5011    private:
   5012     LMaybeGrowElements* instr_;
   5013   };
   5014 
   5015   Register result = x0;
   5016   DeferredMaybeGrowElements* deferred =
   5017       new (zone()) DeferredMaybeGrowElements(this, instr);
   5018   LOperand* key = instr->key();
   5019   LOperand* current_capacity = instr->current_capacity();
   5020 
   5021   DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
   5022   DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
   5023   DCHECK(key->IsConstantOperand() || key->IsRegister());
   5024   DCHECK(current_capacity->IsConstantOperand() ||
   5025          current_capacity->IsRegister());
   5026 
   5027   if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
   5028     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
   5029     int32_t constant_capacity =
   5030         ToInteger32(LConstantOperand::cast(current_capacity));
   5031     if (constant_key >= constant_capacity) {
   5032       // Deferred case.
   5033       __ B(deferred->entry());
   5034     }
   5035   } else if (key->IsConstantOperand()) {
   5036     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
   5037     __ Cmp(ToRegister(current_capacity), Operand(constant_key));
   5038     __ B(le, deferred->entry());
   5039   } else if (current_capacity->IsConstantOperand()) {
   5040     int32_t constant_capacity =
   5041         ToInteger32(LConstantOperand::cast(current_capacity));
   5042     __ Cmp(ToRegister(key), Operand(constant_capacity));
   5043     __ B(ge, deferred->entry());
   5044   } else {
   5045     __ Cmp(ToRegister(key), ToRegister(current_capacity));
   5046     __ B(ge, deferred->entry());
   5047   }
   5048 
   5049   __ Mov(result, ToRegister(instr->elements()));
   5050 
   5051   __ Bind(deferred->exit());
   5052 }
   5053 
   5054 
   5055 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
   5056   // TODO(3095996): Get rid of this. For now, we need to make the
   5057   // result register contain a valid pointer because it is already
   5058   // contained in the register pointer map.
   5059   Register result = x0;
   5060   __ Mov(result, 0);
   5061 
   5062   // We have to call a stub.
   5063   {
   5064     PushSafepointRegistersScope scope(this);
   5065     __ Move(result, ToRegister(instr->object()));
   5066 
   5067     LOperand* key = instr->key();
   5068     if (key->IsConstantOperand()) {
   5069       __ Mov(x3, Operand(ToSmi(LConstantOperand::cast(key))));
   5070     } else {
   5071       __ Mov(x3, ToRegister(key));
   5072       __ SmiTag(x3);
   5073     }
   5074 
   5075     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
   5076                                instr->hydrogen()->kind());
   5077     __ CallStub(&stub);
   5078     RecordSafepointWithLazyDeopt(
   5079         instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   5080     __ StoreToSafepointRegisterSlot(result, result);
   5081   }
   5082 
   5083   // Deopt on smi, which means the elements array changed to dictionary mode.
   5084   DeoptimizeIfSmi(result, instr, Deoptimizer::kSmi);
   5085 }
   5086 
   5087 
   5088 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
   5089   Representation representation = instr->representation();
   5090 
   5091   Register object = ToRegister(instr->object());
   5092   HObjectAccess access = instr->hydrogen()->access();
   5093   int offset = access.offset();
   5094 
   5095   if (access.IsExternalMemory()) {
   5096     DCHECK(!instr->hydrogen()->has_transition());
   5097     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
   5098     Register value = ToRegister(instr->value());
   5099     __ Store(value, MemOperand(object, offset), representation);
   5100     return;
   5101   }
   5102 
   5103   __ AssertNotSmi(object);
   5104 
   5105   if (!FLAG_unbox_double_fields && representation.IsDouble()) {
   5106     DCHECK(access.IsInobject());
   5107     DCHECK(!instr->hydrogen()->has_transition());
   5108     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
   5109     FPRegister value = ToDoubleRegister(instr->value());
   5110     __ Str(value, FieldMemOperand(object, offset));
   5111     return;
   5112   }
   5113 
   5114   DCHECK(!representation.IsSmi() ||
   5115          !instr->value()->IsConstantOperand() ||
   5116          IsInteger32Constant(LConstantOperand::cast(instr->value())));
   5117 
   5118   if (instr->hydrogen()->has_transition()) {
   5119     Handle<Map> transition = instr->hydrogen()->transition_map();
   5120     AddDeprecationDependency(transition);
   5121     // Store the new map value.
   5122     Register new_map_value = ToRegister(instr->temp0());
   5123     __ Mov(new_map_value, Operand(transition));
   5124     __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
   5125     if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
   5126       // Update the write barrier for the map field.
   5127       __ RecordWriteForMap(object,
   5128                            new_map_value,
   5129                            ToRegister(instr->temp1()),
   5130                            GetLinkRegisterState(),
   5131                            kSaveFPRegs);
   5132     }
   5133   }
   5134 
   5135   // Do the store.
   5136   Register destination;
   5137   if (access.IsInobject()) {
   5138     destination = object;
   5139   } else {
   5140     Register temp0 = ToRegister(instr->temp0());
   5141     __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
   5142     destination = temp0;
   5143   }
   5144 
   5145   if (FLAG_unbox_double_fields && representation.IsDouble()) {
   5146     DCHECK(access.IsInobject());
   5147     FPRegister value = ToDoubleRegister(instr->value());
   5148     __ Str(value, FieldMemOperand(object, offset));
   5149   } else if (representation.IsSmi() &&
   5150              instr->hydrogen()->value()->representation().IsInteger32()) {
   5151     DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
   5152 #ifdef DEBUG
   5153     Register temp0 = ToRegister(instr->temp0());
   5154     __ Ldr(temp0, FieldMemOperand(destination, offset));
   5155     __ AssertSmi(temp0);
   5156     // If destination aliased temp0, restore it to the address calculated
   5157     // earlier.
   5158     if (destination.Is(temp0)) {
   5159       DCHECK(!access.IsInobject());
   5160       __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
   5161     }
   5162 #endif
   5163     STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
   5164     STATIC_ASSERT(kSmiTag == 0);
   5165     Register value = ToRegister(instr->value());
   5166     __ Store(value, UntagSmiFieldMemOperand(destination, offset),
   5167              Representation::Integer32());
   5168   } else {
   5169     Register value = ToRegister(instr->value());
   5170     __ Store(value, FieldMemOperand(destination, offset), representation);
   5171   }
   5172   if (instr->hydrogen()->NeedsWriteBarrier()) {
   5173     Register value = ToRegister(instr->value());
   5174     __ RecordWriteField(destination,
   5175                         offset,
   5176                         value,                        // Clobbered.
   5177                         ToRegister(instr->temp1()),   // Clobbered.
   5178                         GetLinkRegisterState(),
   5179                         kSaveFPRegs,
   5180                         EMIT_REMEMBERED_SET,
   5181                         instr->hydrogen()->SmiCheckForWriteBarrier(),
   5182                         instr->hydrogen()->PointersToHereCheckForValue());
   5183   }
   5184 }
   5185 
   5186 
   5187 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
   5188   DCHECK(ToRegister(instr->context()).is(cp));
   5189   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
   5190   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
   5191 
   5192   if (instr->hydrogen()->HasVectorAndSlot()) {
   5193     EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
   5194   }
   5195 
   5196   __ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
   5197   Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
   5198                         isolate(), instr->language_mode(),
   5199                         instr->hydrogen()->initialization_state()).code();
   5200   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   5201 }
   5202 
   5203 
   5204 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   5205   DCHECK(ToRegister(instr->context()).is(cp));
   5206   DCHECK(ToRegister(instr->left()).Is(x1));
   5207   DCHECK(ToRegister(instr->right()).Is(x0));
   5208   StringAddStub stub(isolate(),
   5209                      instr->hydrogen()->flags(),
   5210                      instr->hydrogen()->pretenure_flag());
   5211   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   5212 }
   5213 
   5214 
   5215 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   5216   class DeferredStringCharCodeAt: public LDeferredCode {
   5217    public:
   5218     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
   5219         : LDeferredCode(codegen), instr_(instr) { }
   5220     virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
   5221     virtual LInstruction* instr() { return instr_; }
   5222    private:
   5223     LStringCharCodeAt* instr_;
   5224   };
   5225 
   5226   DeferredStringCharCodeAt* deferred =
   5227       new(zone()) DeferredStringCharCodeAt(this, instr);
   5228 
   5229   StringCharLoadGenerator::Generate(masm(),
   5230                                     ToRegister(instr->string()),
   5231                                     ToRegister32(instr->index()),
   5232                                     ToRegister(instr->result()),
   5233                                     deferred->entry());
   5234   __ Bind(deferred->exit());
   5235 }
   5236 
   5237 
   5238 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
   5239   Register string = ToRegister(instr->string());
   5240   Register result = ToRegister(instr->result());
   5241 
   5242   // TODO(3095996): Get rid of this. For now, we need to make the
   5243   // result register contain a valid pointer because it is already
   5244   // contained in the register pointer map.
   5245   __ Mov(result, 0);
   5246 
   5247   PushSafepointRegistersScope scope(this);
   5248   __ Push(string);
   5249   // Push the index as a smi. This is safe because of the checks in
   5250   // DoStringCharCodeAt above.
   5251   Register index = ToRegister(instr->index());
   5252   __ SmiTagAndPush(index);
   5253 
   5254   CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
   5255                           instr->context());
   5256   __ AssertSmi(x0);
   5257   __ SmiUntag(x0);
   5258   __ StoreToSafepointRegisterSlot(x0, result);
   5259 }
   5260 
   5261 
   5262 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
   5263   class DeferredStringCharFromCode: public LDeferredCode {
   5264    public:
   5265     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
   5266         : LDeferredCode(codegen), instr_(instr) { }
   5267     virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
   5268     virtual LInstruction* instr() { return instr_; }
   5269    private:
   5270     LStringCharFromCode* instr_;
   5271   };
   5272 
   5273   DeferredStringCharFromCode* deferred =
   5274       new(zone()) DeferredStringCharFromCode(this, instr);
   5275 
   5276   DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
   5277   Register char_code = ToRegister32(instr->char_code());
   5278   Register result = ToRegister(instr->result());
   5279 
   5280   __ Cmp(char_code, String::kMaxOneByteCharCode);
   5281   __ B(hi, deferred->entry());
   5282   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
   5283   __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag);
   5284   __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2));
   5285   __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
   5286   __ B(eq, deferred->entry());
   5287   __ Bind(deferred->exit());
   5288 }
   5289 
   5290 
   5291 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
   5292   Register char_code = ToRegister(instr->char_code());
   5293   Register result = ToRegister(instr->result());
   5294 
   5295   // TODO(3095996): Get rid of this. For now, we need to make the
   5296   // result register contain a valid pointer because it is already
   5297   // contained in the register pointer map.
   5298   __ Mov(result, 0);
   5299 
   5300   PushSafepointRegistersScope scope(this);
   5301   __ SmiTagAndPush(char_code);
   5302   CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
   5303                           instr->context());
   5304   __ StoreToSafepointRegisterSlot(x0, result);
   5305 }
   5306 
   5307 
   5308 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   5309   DCHECK(ToRegister(instr->context()).is(cp));
   5310   DCHECK(ToRegister(instr->left()).is(x1));
   5311   DCHECK(ToRegister(instr->right()).is(x0));
   5312 
   5313   Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
   5314   CallCode(code, RelocInfo::CODE_TARGET, instr);
   5315 
   5316   EmitCompareAndBranch(instr, TokenToCondition(instr->op(), false), x0, 0);
   5317 }
   5318 
   5319 
   5320 void LCodeGen::DoSubI(LSubI* instr) {
   5321   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   5322   Register result = ToRegister32(instr->result());
   5323   Register left = ToRegister32(instr->left());
   5324   Operand right = ToShiftedRightOperand32(instr->right(), instr);
   5325 
   5326   if (can_overflow) {
   5327     __ Subs(result, left, right);
   5328     DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
   5329   } else {
   5330     __ Sub(result, left, right);
   5331   }
   5332 }
   5333 
   5334 
   5335 void LCodeGen::DoSubS(LSubS* instr) {
   5336   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   5337   Register result = ToRegister(instr->result());
   5338   Register left = ToRegister(instr->left());
   5339   Operand right = ToOperand(instr->right());
   5340   if (can_overflow) {
   5341     __ Subs(result, left, right);
   5342     DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
   5343   } else {
   5344     __ Sub(result, left, right);
   5345   }
   5346 }
   5347 
   5348 
   5349 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
   5350                                    LOperand* value,
   5351                                    LOperand* temp1,
   5352                                    LOperand* temp2) {
   5353   Register input = ToRegister(value);
   5354   Register scratch1 = ToRegister(temp1);
   5355   DoubleRegister dbl_scratch1 = double_scratch();
   5356 
   5357   Label done;
   5358 
   5359   if (instr->truncating()) {
   5360     Register output = ToRegister(instr->result());
   5361     Label check_bools;
   5362 
   5363     // If it's not a heap number, jump to undefined check.
   5364     __ JumpIfNotHeapNumber(input, &check_bools);
   5365 
   5366     // A heap number: load value and convert to int32 using truncating function.
   5367     __ TruncateHeapNumberToI(output, input);
   5368     __ B(&done);
   5369 
   5370     __ Bind(&check_bools);
   5371 
   5372     Register true_root = output;
   5373     Register false_root = scratch1;
   5374     __ LoadTrueFalseRoots(true_root, false_root);
   5375     __ Cmp(input, true_root);
   5376     __ Cset(output, eq);
   5377     __ Ccmp(input, false_root, ZFlag, ne);
   5378     __ B(eq, &done);
   5379 
   5380     // Output contains zero, undefined is converted to zero for truncating
   5381     // conversions.
   5382     DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
   5383                         Deoptimizer::kNotAHeapNumberUndefinedBoolean);
   5384   } else {
   5385     Register output = ToRegister32(instr->result());
   5386     DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
   5387 
   5388     DeoptimizeIfNotHeapNumber(input, instr);
   5389 
   5390     // A heap number: load value and convert to int32 using non-truncating
   5391     // function. If the result is out of range, branch to deoptimize.
   5392     __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
   5393     __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
   5394     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
   5395 
   5396     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   5397       __ Cmp(output, 0);
   5398       __ B(ne, &done);
   5399       __ Fmov(scratch1, dbl_scratch1);
   5400       DeoptimizeIfNegative(scratch1, instr, Deoptimizer::kMinusZero);
   5401     }
   5402   }
   5403   __ Bind(&done);
   5404 }
   5405 
   5406 
   5407 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
   5408   class DeferredTaggedToI: public LDeferredCode {
   5409    public:
   5410     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
   5411         : LDeferredCode(codegen), instr_(instr) { }
   5412     virtual void Generate() {
   5413       codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
   5414                                      instr_->temp2());
   5415     }
   5416 
   5417     virtual LInstruction* instr() { return instr_; }
   5418    private:
   5419     LTaggedToI* instr_;
   5420   };
   5421 
   5422   Register input = ToRegister(instr->value());
   5423   Register output = ToRegister(instr->result());
   5424 
   5425   if (instr->hydrogen()->value()->representation().IsSmi()) {
   5426     __ SmiUntag(output, input);
   5427   } else {
   5428     DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
   5429 
   5430     __ JumpIfNotSmi(input, deferred->entry());
   5431     __ SmiUntag(output, input);
   5432     __ Bind(deferred->exit());
   5433   }
   5434 }
   5435 
   5436 
   5437 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   5438   Register result = ToRegister(instr->result());
   5439   __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   5440 }
   5441 
   5442 
   5443 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
   5444   DCHECK(ToRegister(instr->value()).Is(x0));
   5445   DCHECK(ToRegister(instr->result()).Is(x0));
   5446   __ Push(x0);
   5447   CallRuntime(Runtime::kToFastProperties, 1, instr);
   5448 }
   5449 
   5450 
   5451 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
   5452   Register object = ToRegister(instr->object());
   5453 
   5454   Handle<Map> from_map = instr->original_map();
   5455   Handle<Map> to_map = instr->transitioned_map();
   5456   ElementsKind from_kind = instr->from_kind();
   5457   ElementsKind to_kind = instr->to_kind();
   5458 
   5459   Label not_applicable;
   5460 
   5461   if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
   5462     Register temp1 = ToRegister(instr->temp1());
   5463     Register new_map = ToRegister(instr->temp2());
   5464     __ CheckMap(object, temp1, from_map, &not_applicable, DONT_DO_SMI_CHECK);
   5465     __ Mov(new_map, Operand(to_map));
   5466     __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
   5467     // Write barrier.
   5468     __ RecordWriteForMap(object, new_map, temp1, GetLinkRegisterState(),
   5469                          kDontSaveFPRegs);
   5470   } else {
   5471     {
   5472       UseScratchRegisterScope temps(masm());
   5473       // Use the temp register only in a restricted scope - the codegen checks
   5474       // that we do not use any register across a call.
   5475       __ CheckMap(object, temps.AcquireX(), from_map, &not_applicable,
   5476                   DONT_DO_SMI_CHECK);
   5477     }
   5478     DCHECK(object.is(x0));
   5479     DCHECK(ToRegister(instr->context()).is(cp));
   5480     PushSafepointRegistersScope scope(this);
   5481     __ Mov(x1, Operand(to_map));
   5482     bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
   5483     TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
   5484     __ CallStub(&stub);
   5485     RecordSafepointWithRegisters(
   5486         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
   5487   }
   5488   __ Bind(&not_applicable);
   5489 }
   5490 
   5491 
   5492 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
   5493   Register object = ToRegister(instr->object());
   5494   Register temp1 = ToRegister(instr->temp1());
   5495   Register temp2 = ToRegister(instr->temp2());
   5496 
   5497   Label no_memento_found;
   5498   __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
   5499   DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
   5500   __ Bind(&no_memento_found);
   5501 }
   5502 
   5503 
   5504 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
   5505   DoubleRegister input = ToDoubleRegister(instr->value());
   5506   Register result = ToRegister(instr->result());
   5507   __ TruncateDoubleToI(result, input);
   5508   if (instr->tag_result()) {
   5509     __ SmiTag(result, result);
   5510   }
   5511 }
   5512 
   5513 
   5514 void LCodeGen::DoTypeof(LTypeof* instr) {
   5515   DCHECK(ToRegister(instr->value()).is(x3));
   5516   DCHECK(ToRegister(instr->result()).is(x0));
   5517   Label end, do_call;
   5518   Register value_register = ToRegister(instr->value());
   5519   __ JumpIfNotSmi(value_register, &do_call);
   5520   __ Mov(x0, Immediate(isolate()->factory()->number_string()));
   5521   __ B(&end);
   5522   __ Bind(&do_call);
   5523   TypeofStub stub(isolate());
   5524   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   5525   __ Bind(&end);
   5526 }
   5527 
   5528 
   5529 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   5530   Handle<String> type_name = instr->type_literal();
   5531   Label* true_label = instr->TrueLabel(chunk_);
   5532   Label* false_label = instr->FalseLabel(chunk_);
   5533   Register value = ToRegister(instr->value());
   5534 
   5535   Factory* factory = isolate()->factory();
   5536   if (String::Equals(type_name, factory->number_string())) {
   5537     __ JumpIfSmi(value, true_label);
   5538 
   5539     int true_block = instr->TrueDestination(chunk_);
   5540     int false_block = instr->FalseDestination(chunk_);
   5541     int next_block = GetNextEmittedBlock();
   5542 
   5543     if (true_block == false_block) {
   5544       EmitGoto(true_block);
   5545     } else if (true_block == next_block) {
   5546       __ JumpIfNotHeapNumber(value, chunk_->GetAssemblyLabel(false_block));
   5547     } else {
   5548       __ JumpIfHeapNumber(value, chunk_->GetAssemblyLabel(true_block));
   5549       if (false_block != next_block) {
   5550         __ B(chunk_->GetAssemblyLabel(false_block));
   5551       }
   5552     }
   5553 
   5554   } else if (String::Equals(type_name, factory->string_string())) {
   5555     DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
   5556     Register map = ToRegister(instr->temp1());
   5557     Register scratch = ToRegister(instr->temp2());
   5558 
   5559     __ JumpIfSmi(value, false_label);
   5560     __ CompareObjectType(value, map, scratch, FIRST_NONSTRING_TYPE);
   5561     EmitBranch(instr, lt);
   5562 
   5563   } else if (String::Equals(type_name, factory->symbol_string())) {
   5564     DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
   5565     Register map = ToRegister(instr->temp1());
   5566     Register scratch = ToRegister(instr->temp2());
   5567 
   5568     __ JumpIfSmi(value, false_label);
   5569     __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
   5570     EmitBranch(instr, eq);
   5571 
   5572   } else if (String::Equals(type_name, factory->boolean_string())) {
   5573     __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
   5574     __ CompareRoot(value, Heap::kFalseValueRootIndex);
   5575     EmitBranch(instr, eq);
   5576 
   5577   } else if (String::Equals(type_name, factory->undefined_string())) {
   5578     DCHECK(instr->temp1() != NULL);
   5579     Register scratch = ToRegister(instr->temp1());
   5580 
   5581     __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
   5582     __ JumpIfSmi(value, false_label);
   5583     // Check for undetectable objects and jump to the true branch in this case.
   5584     __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
   5585     __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5586     EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
   5587 
   5588   } else if (String::Equals(type_name, factory->function_string())) {
   5589     DCHECK(instr->temp1() != NULL);
   5590     Register scratch = ToRegister(instr->temp1());
   5591 
   5592     __ JumpIfSmi(value, false_label);
   5593     __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
   5594     __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5595     __ And(scratch, scratch,
   5596            (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
   5597     EmitCompareAndBranch(instr, eq, scratch, 1 << Map::kIsCallable);
   5598 
   5599   } else if (String::Equals(type_name, factory->object_string())) {
   5600     DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
   5601     Register map = ToRegister(instr->temp1());
   5602     Register scratch = ToRegister(instr->temp2());
   5603 
   5604     __ JumpIfSmi(value, false_label);
   5605     __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
   5606     STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
   5607     __ JumpIfObjectType(value, map, scratch, FIRST_JS_RECEIVER_TYPE,
   5608                         false_label, lt);
   5609     // Check for callable or undetectable objects => false.
   5610     __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
   5611     EmitTestAndBranch(instr, eq, scratch,
   5612                       (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
   5613 
   5614 // clang-format off
   5615 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)       \
   5616   } else if (String::Equals(type_name, factory->type##_string())) { \
   5617     DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));   \
   5618     Register map = ToRegister(instr->temp1());                      \
   5619                                                                     \
   5620     __ JumpIfSmi(value, false_label);                               \
   5621     __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));    \
   5622     __ CompareRoot(map, Heap::k##Type##MapRootIndex);               \
   5623     EmitBranch(instr, eq);
   5624   SIMD128_TYPES(SIMD128_TYPE)
   5625 #undef SIMD128_TYPE
   5626     // clang-format on
   5627 
   5628   } else {
   5629     __ B(false_label);
   5630   }
   5631 }
   5632 
   5633 
   5634 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
   5635   __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
   5636 }
   5637 
   5638 
   5639 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
   5640   Register object = ToRegister(instr->value());
   5641   Register map = ToRegister(instr->map());
   5642   Register temp = ToRegister(instr->temp());
   5643   __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
   5644   __ Cmp(map, temp);
   5645   DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
   5646 }
   5647 
   5648 
   5649 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
   5650   Register receiver = ToRegister(instr->receiver());
   5651   Register function = ToRegister(instr->function());
   5652   Register result = ToRegister(instr->result());
   5653 
   5654   // If the receiver is null or undefined, we have to pass the global object as
   5655   // a receiver to normal functions. Values have to be passed unchanged to
   5656   // builtins and strict-mode functions.
   5657   Label global_object, done, copy_receiver;
   5658 
   5659   if (!instr->hydrogen()->known_function()) {
   5660     __ Ldr(result, FieldMemOperand(function,
   5661                                    JSFunction::kSharedFunctionInfoOffset));
   5662 
   5663     // CompilerHints is an int32 field. See objects.h.
   5664     __ Ldr(result.W(),
   5665            FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
   5666 
   5667     // Do not transform the receiver to object for strict mode functions.
   5668     __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, &copy_receiver);
   5669 
   5670     // Do not transform the receiver to object for builtins.
   5671     __ Tbnz(result, SharedFunctionInfo::kNative, &copy_receiver);
   5672   }
   5673 
   5674   // Normal function. Replace undefined or null with global receiver.
   5675   __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
   5676   __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
   5677 
   5678   // Deoptimize if the receiver is not a JS object.
   5679   DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi);
   5680   __ CompareObjectType(receiver, result, result, FIRST_JS_RECEIVER_TYPE);
   5681   __ B(ge, &copy_receiver);
   5682   Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject);
   5683 
   5684   __ Bind(&global_object);
   5685   __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
   5686   __ Ldr(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
   5687   __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
   5688   __ B(&done);
   5689 
   5690   __ Bind(&copy_receiver);
   5691   __ Mov(result, receiver);
   5692   __ Bind(&done);
   5693 }
   5694 
   5695 
   5696 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
   5697                                            Register result,
   5698                                            Register object,
   5699                                            Register index) {
   5700   PushSafepointRegistersScope scope(this);
   5701   __ Push(object);
   5702   __ Push(index);
   5703   __ Mov(cp, 0);
   5704   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
   5705   RecordSafepointWithRegisters(
   5706       instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
   5707   __ StoreToSafepointRegisterSlot(x0, result);
   5708 }
   5709 
   5710 
   5711 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
   5712   class DeferredLoadMutableDouble final : public LDeferredCode {
   5713    public:
   5714     DeferredLoadMutableDouble(LCodeGen* codegen,
   5715                               LLoadFieldByIndex* instr,
   5716                               Register result,
   5717                               Register object,
   5718                               Register index)
   5719         : LDeferredCode(codegen),
   5720           instr_(instr),
   5721           result_(result),
   5722           object_(object),
   5723           index_(index) {
   5724     }
   5725     void Generate() override {
   5726       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
   5727     }
   5728     LInstruction* instr() override { return instr_; }
   5729 
   5730    private:
   5731     LLoadFieldByIndex* instr_;
   5732     Register result_;
   5733     Register object_;
   5734     Register index_;
   5735   };
   5736   Register object = ToRegister(instr->object());
   5737   Register index = ToRegister(instr->index());
   5738   Register result = ToRegister(instr->result());
   5739 
   5740   __ AssertSmi(index);
   5741 
   5742   DeferredLoadMutableDouble* deferred;
   5743   deferred = new(zone()) DeferredLoadMutableDouble(
   5744       this, instr, result, object, index);
   5745 
   5746   Label out_of_object, done;
   5747 
   5748   __ TestAndBranchIfAnySet(
   5749       index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
   5750   __ Mov(index, Operand(index, ASR, 1));
   5751 
   5752   __ Cmp(index, Smi::FromInt(0));
   5753   __ B(lt, &out_of_object);
   5754 
   5755   STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
   5756   __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   5757   __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
   5758 
   5759   __ B(&done);
   5760 
   5761   __ Bind(&out_of_object);
   5762   __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   5763   // Index is equal to negated out of object property index plus 1.
   5764   __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   5765   __ Ldr(result, FieldMemOperand(result,
   5766                                  FixedArray::kHeaderSize - kPointerSize));
   5767   __ Bind(deferred->exit());
   5768   __ Bind(&done);
   5769 }
   5770 
   5771 
   5772 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
   5773   Register context = ToRegister(instr->context());
   5774   __ Str(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
   5775 }
   5776 
   5777 
   5778 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
   5779   Handle<ScopeInfo> scope_info = instr->scope_info();
   5780   __ Push(scope_info);
   5781   __ Push(ToRegister(instr->function()));
   5782   CallRuntime(Runtime::kPushBlockContext, instr);
   5783   RecordSafepoint(Safepoint::kNoLazyDeopt);
   5784 }
   5785 
   5786 
   5787 }  // namespace internal
   5788 }  // namespace v8
   5789