Home | History | Annotate | Download | only in arm64
      1 // Copyright 2013 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/crankshaft/arm64/lithium-codegen-arm64.h"
      6 
      7 #include "src/arm64/frames-arm64.h"
      8 #include "src/base/bits.h"
      9 #include "src/builtins/builtins-constructor.h"
     10 #include "src/code-factory.h"
     11 #include "src/code-stubs.h"
     12 #include "src/crankshaft/arm64/lithium-gap-resolver-arm64.h"
     13 #include "src/crankshaft/hydrogen-osr.h"
     14 #include "src/ic/ic.h"
     15 #include "src/ic/stub-cache.h"
     16 
     17 namespace v8 {
     18 namespace internal {
     19 
     20 
     21 class SafepointGenerator final : public CallWrapper {
     22  public:
     23   SafepointGenerator(LCodeGen* codegen,
     24                      LPointerMap* pointers,
     25                      Safepoint::DeoptMode mode)
     26       : codegen_(codegen),
     27         pointers_(pointers),
     28         deopt_mode_(mode) { }
     29   virtual ~SafepointGenerator() { }
     30 
     31   virtual void BeforeCall(int call_size) const { }
     32 
     33   virtual void AfterCall() const {
     34     codegen_->RecordSafepoint(pointers_, deopt_mode_);
     35   }
     36 
     37  private:
     38   LCodeGen* codegen_;
     39   LPointerMap* pointers_;
     40   Safepoint::DeoptMode deopt_mode_;
     41 };
     42 
     43 LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
     44     LCodeGen* codegen)
     45     : codegen_(codegen) {
     46   DCHECK(codegen_->info()->is_calling());
     47   DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
     48   codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
     49 
     50   UseScratchRegisterScope temps(codegen_->masm_);
     51   // Preserve the value of lr which must be saved on the stack (the call to
     52   // the stub will clobber it).
     53   Register to_be_pushed_lr =
     54       temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr());
     55   codegen_->masm_->Mov(to_be_pushed_lr, lr);
     56   StoreRegistersStateStub stub(codegen_->isolate());
     57   codegen_->masm_->CallStub(&stub);
     58 }
     59 
     60 LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
     61   DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
     62   RestoreRegistersStateStub stub(codegen_->isolate());
     63   codegen_->masm_->CallStub(&stub);
     64   codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
     65 }
     66 
     67 #define __ masm()->
     68 
     69 // Emit code to branch if the given condition holds.
     70 // The code generated here doesn't modify the flags and they must have
     71 // been set by some prior instructions.
     72 //
     73 // The EmitInverted function simply inverts the condition.
     74 class BranchOnCondition : public BranchGenerator {
     75  public:
     76   BranchOnCondition(LCodeGen* codegen, Condition cond)
     77     : BranchGenerator(codegen),
     78       cond_(cond) { }
     79 
     80   virtual void Emit(Label* label) const {
     81     __ B(cond_, label);
     82   }
     83 
     84   virtual void EmitInverted(Label* label) const {
     85     if (cond_ != al) {
     86       __ B(NegateCondition(cond_), label);
     87     }
     88   }
     89 
     90  private:
     91   Condition cond_;
     92 };
     93 
     94 
     95 // Emit code to compare lhs and rhs and branch if the condition holds.
     96 // This uses MacroAssembler's CompareAndBranch function so it will handle
     97 // converting the comparison to Cbz/Cbnz if the right-hand side is 0.
     98 //
     99 // EmitInverted still compares the two operands but inverts the condition.
    100 class CompareAndBranch : public BranchGenerator {
    101  public:
    102   CompareAndBranch(LCodeGen* codegen,
    103                    Condition cond,
    104                    const Register& lhs,
    105                    const Operand& rhs)
    106     : BranchGenerator(codegen),
    107       cond_(cond),
    108       lhs_(lhs),
    109       rhs_(rhs) { }
    110 
    111   virtual void Emit(Label* label) const {
    112     __ CompareAndBranch(lhs_, rhs_, cond_, label);
    113   }
    114 
    115   virtual void EmitInverted(Label* label) const {
    116     __ CompareAndBranch(lhs_, rhs_, NegateCondition(cond_), label);
    117   }
    118 
    119  private:
    120   Condition cond_;
    121   const Register& lhs_;
    122   const Operand& rhs_;
    123 };
    124 
    125 
    126 // Test the input with the given mask and branch if the condition holds.
    127 // If the condition is 'eq' or 'ne' this will use MacroAssembler's
    128 // TestAndBranchIfAllClear and TestAndBranchIfAnySet so it will handle the
    129 // conversion to Tbz/Tbnz when possible.
    130 class TestAndBranch : public BranchGenerator {
    131  public:
    132   TestAndBranch(LCodeGen* codegen,
    133                 Condition cond,
    134                 const Register& value,
    135                 uint64_t mask)
    136     : BranchGenerator(codegen),
    137       cond_(cond),
    138       value_(value),
    139       mask_(mask) { }
    140 
    141   virtual void Emit(Label* label) const {
    142     switch (cond_) {
    143       case eq:
    144         __ TestAndBranchIfAllClear(value_, mask_, label);
    145         break;
    146       case ne:
    147         __ TestAndBranchIfAnySet(value_, mask_, label);
    148         break;
    149       default:
    150         __ Tst(value_, mask_);
    151         __ B(cond_, label);
    152     }
    153   }
    154 
    155   virtual void EmitInverted(Label* label) const {
    156     // The inverse of "all clear" is "any set" and vice versa.
    157     switch (cond_) {
    158       case eq:
    159         __ TestAndBranchIfAnySet(value_, mask_, label);
    160         break;
    161       case ne:
    162         __ TestAndBranchIfAllClear(value_, mask_, label);
    163         break;
    164       default:
    165         __ Tst(value_, mask_);
    166         __ B(NegateCondition(cond_), label);
    167     }
    168   }
    169 
    170  private:
    171   Condition cond_;
    172   const Register& value_;
    173   uint64_t mask_;
    174 };
    175 
    176 
    177 // Test the input and branch if it is non-zero and not a NaN.
    178 class BranchIfNonZeroNumber : public BranchGenerator {
    179  public:
    180   BranchIfNonZeroNumber(LCodeGen* codegen, const FPRegister& value,
    181                         const FPRegister& scratch)
    182     : BranchGenerator(codegen), value_(value), scratch_(scratch) { }
    183 
    184   virtual void Emit(Label* label) const {
    185     __ Fabs(scratch_, value_);
    186     // Compare with 0.0. Because scratch_ is positive, the result can be one of
    187     // nZCv (equal), nzCv (greater) or nzCV (unordered).
    188     __ Fcmp(scratch_, 0.0);
    189     __ B(gt, label);
    190   }
    191 
    192   virtual void EmitInverted(Label* label) const {
    193     __ Fabs(scratch_, value_);
    194     __ Fcmp(scratch_, 0.0);
    195     __ B(le, label);
    196   }
    197 
    198  private:
    199   const FPRegister& value_;
    200   const FPRegister& scratch_;
    201 };
    202 
    203 
    204 // Test the input and branch if it is a heap number.
    205 class BranchIfHeapNumber : public BranchGenerator {
    206  public:
    207   BranchIfHeapNumber(LCodeGen* codegen, const Register& value)
    208       : BranchGenerator(codegen), value_(value) { }
    209 
    210   virtual void Emit(Label* label) const {
    211     __ JumpIfHeapNumber(value_, label);
    212   }
    213 
    214   virtual void EmitInverted(Label* label) const {
    215     __ JumpIfNotHeapNumber(value_, label);
    216   }
    217 
    218  private:
    219   const Register& value_;
    220 };
    221 
    222 
    223 // Test the input and branch if it is the specified root value.
    224 class BranchIfRoot : public BranchGenerator {
    225  public:
    226   BranchIfRoot(LCodeGen* codegen, const Register& value,
    227                Heap::RootListIndex index)
    228       : BranchGenerator(codegen), value_(value), index_(index) { }
    229 
    230   virtual void Emit(Label* label) const {
    231     __ JumpIfRoot(value_, index_, label);
    232   }
    233 
    234   virtual void EmitInverted(Label* label) const {
    235     __ JumpIfNotRoot(value_, index_, label);
    236   }
    237 
    238  private:
    239   const Register& value_;
    240   const Heap::RootListIndex index_;
    241 };
    242 
    243 
    244 void LCodeGen::WriteTranslation(LEnvironment* environment,
    245                                 Translation* translation) {
    246   if (environment == NULL) return;
    247 
    248   // The translation includes one command per value in the environment.
    249   int translation_size = environment->translation_size();
    250 
    251   WriteTranslation(environment->outer(), translation);
    252   WriteTranslationFrame(environment, translation);
    253 
    254   int object_index = 0;
    255   int dematerialized_index = 0;
    256   for (int i = 0; i < translation_size; ++i) {
    257     LOperand* value = environment->values()->at(i);
    258     AddToTranslation(
    259         environment, translation, value, environment->HasTaggedValueAt(i),
    260         environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
    261   }
    262 }
    263 
    264 
    265 void LCodeGen::AddToTranslation(LEnvironment* environment,
    266                                 Translation* translation,
    267                                 LOperand* op,
    268                                 bool is_tagged,
    269                                 bool is_uint32,
    270                                 int* object_index_pointer,
    271                                 int* dematerialized_index_pointer) {
    272   if (op == LEnvironment::materialization_marker()) {
    273     int object_index = (*object_index_pointer)++;
    274     if (environment->ObjectIsDuplicateAt(object_index)) {
    275       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
    276       translation->DuplicateObject(dupe_of);
    277       return;
    278     }
    279     int object_length = environment->ObjectLengthAt(object_index);
    280     if (environment->ObjectIsArgumentsAt(object_index)) {
    281       translation->BeginArgumentsObject(object_length);
    282     } else {
    283       translation->BeginCapturedObject(object_length);
    284     }
    285     int dematerialized_index = *dematerialized_index_pointer;
    286     int env_offset = environment->translation_size() + dematerialized_index;
    287     *dematerialized_index_pointer += object_length;
    288     for (int i = 0; i < object_length; ++i) {
    289       LOperand* value = environment->values()->at(env_offset + i);
    290       AddToTranslation(environment,
    291                        translation,
    292                        value,
    293                        environment->HasTaggedValueAt(env_offset + i),
    294                        environment->HasUint32ValueAt(env_offset + i),
    295                        object_index_pointer,
    296                        dematerialized_index_pointer);
    297     }
    298     return;
    299   }
    300 
    301   if (op->IsStackSlot()) {
    302     int index = op->index();
    303     if (is_tagged) {
    304       translation->StoreStackSlot(index);
    305     } else if (is_uint32) {
    306       translation->StoreUint32StackSlot(index);
    307     } else {
    308       translation->StoreInt32StackSlot(index);
    309     }
    310   } else if (op->IsDoubleStackSlot()) {
    311     int index = op->index();
    312     translation->StoreDoubleStackSlot(index);
    313   } else if (op->IsRegister()) {
    314     Register reg = ToRegister(op);
    315     if (is_tagged) {
    316       translation->StoreRegister(reg);
    317     } else if (is_uint32) {
    318       translation->StoreUint32Register(reg);
    319     } else {
    320       translation->StoreInt32Register(reg);
    321     }
    322   } else if (op->IsDoubleRegister()) {
    323     DoubleRegister reg = ToDoubleRegister(op);
    324     translation->StoreDoubleRegister(reg);
    325   } else if (op->IsConstantOperand()) {
    326     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
    327     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
    328     translation->StoreLiteral(src_index);
    329   } else {
    330     UNREACHABLE();
    331   }
    332 }
    333 
    334 
    335 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
    336                                                     Safepoint::DeoptMode mode) {
    337   environment->set_has_been_used();
    338   if (!environment->HasBeenRegistered()) {
    339     int frame_count = 0;
    340     int jsframe_count = 0;
    341     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
    342       ++frame_count;
    343       if (e->frame_type() == JS_FUNCTION) {
    344         ++jsframe_count;
    345       }
    346     }
    347     Translation translation(&translations_, frame_count, jsframe_count, zone());
    348     WriteTranslation(environment, &translation);
    349     int deoptimization_index = deoptimizations_.length();
    350     int pc_offset = masm()->pc_offset();
    351     environment->Register(deoptimization_index,
    352                           translation.index(),
    353                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
    354     deoptimizations_.Add(environment, zone());
    355   }
    356 }
    357 
    358 
    359 void LCodeGen::CallCode(Handle<Code> code,
    360                         RelocInfo::Mode mode,
    361                         LInstruction* instr) {
    362   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
    363 }
    364 
    365 
    366 void LCodeGen::CallCodeGeneric(Handle<Code> code,
    367                                RelocInfo::Mode mode,
    368                                LInstruction* instr,
    369                                SafepointMode safepoint_mode) {
    370   DCHECK(instr != NULL);
    371 
    372   Assembler::BlockPoolsScope scope(masm_);
    373   __ Call(code, mode);
    374   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
    375 
    376   if ((code->kind() == Code::BINARY_OP_IC) ||
    377       (code->kind() == Code::COMPARE_IC)) {
    378     // Signal that we don't inline smi code before these stubs in the
    379     // optimizing code generator.
    380     InlineSmiCheckInfo::EmitNotInlined(masm());
    381   }
    382 }
    383 
    384 
    385 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
    386   DCHECK(instr->IsMarkedAsCall());
    387   DCHECK(ToRegister(instr->context()).is(cp));
    388   DCHECK(ToRegister(instr->constructor()).is(x1));
    389 
    390   __ Mov(x0, Operand(instr->arity()));
    391   __ Mov(x2, instr->hydrogen()->site());
    392 
    393   ElementsKind kind = instr->hydrogen()->elements_kind();
    394   AllocationSiteOverrideMode override_mode =
    395       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
    396           ? DISABLE_ALLOCATION_SITES
    397           : DONT_OVERRIDE;
    398 
    399   if (instr->arity() == 0) {
    400     ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
    401     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    402   } else if (instr->arity() == 1) {
    403     Label done;
    404     if (IsFastPackedElementsKind(kind)) {
    405       Label packed_case;
    406 
    407       // We might need to create a holey array; look at the first argument.
    408       __ Peek(x10, 0);
    409       __ Cbz(x10, &packed_case);
    410 
    411       ElementsKind holey_kind = GetHoleyElementsKind(kind);
    412       ArraySingleArgumentConstructorStub stub(isolate(),
    413                                               holey_kind,
    414                                               override_mode);
    415       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    416       __ B(&done);
    417       __ Bind(&packed_case);
    418     }
    419 
    420     ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
    421     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    422     __ Bind(&done);
    423   } else {
    424     ArrayNArgumentsConstructorStub stub(isolate());
    425     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    426   }
    427   RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
    428 
    429   DCHECK(ToRegister(instr->result()).is(x0));
    430 }
    431 
    432 
    433 void LCodeGen::CallRuntime(const Runtime::Function* function,
    434                            int num_arguments,
    435                            LInstruction* instr,
    436                            SaveFPRegsMode save_doubles) {
    437   DCHECK(instr != NULL);
    438 
    439   __ CallRuntime(function, num_arguments, save_doubles);
    440 
    441   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
    442 }
    443 
    444 
    445 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
    446   if (context->IsRegister()) {
    447     __ Mov(cp, ToRegister(context));
    448   } else if (context->IsStackSlot()) {
    449     __ Ldr(cp, ToMemOperand(context, kMustUseFramePointer));
    450   } else if (context->IsConstantOperand()) {
    451     HConstant* constant =
    452         chunk_->LookupConstant(LConstantOperand::cast(context));
    453     __ LoadHeapObject(cp,
    454                       Handle<HeapObject>::cast(constant->handle(isolate())));
    455   } else {
    456     UNREACHABLE();
    457   }
    458 }
    459 
    460 
    461 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
    462                                        int argc,
    463                                        LInstruction* instr,
    464                                        LOperand* context) {
    465   if (context != nullptr) LoadContextFromDeferred(context);
    466   __ CallRuntimeSaveDoubles(id);
    467   RecordSafepointWithRegisters(
    468       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
    469 }
    470 
    471 
    472 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
    473                                             SafepointMode safepoint_mode) {
    474   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
    475     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
    476   } else {
    477     DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
    478     RecordSafepointWithRegisters(
    479         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
    480   }
    481 }
    482 
    483 
    484 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
    485                                Safepoint::Kind kind,
    486                                int arguments,
    487                                Safepoint::DeoptMode deopt_mode) {
    488   DCHECK(expected_safepoint_kind_ == kind);
    489 
    490   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
    491   Safepoint safepoint = safepoints_.DefineSafepoint(
    492       masm(), kind, arguments, deopt_mode);
    493 
    494   for (int i = 0; i < operands->length(); i++) {
    495     LOperand* pointer = operands->at(i);
    496     if (pointer->IsStackSlot()) {
    497       safepoint.DefinePointerSlot(pointer->index(), zone());
    498     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
    499       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
    500     }
    501   }
    502 }
    503 
    504 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
    505                                Safepoint::DeoptMode deopt_mode) {
    506   RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
    507 }
    508 
    509 
    510 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
    511   LPointerMap empty_pointers(zone());
    512   RecordSafepoint(&empty_pointers, deopt_mode);
    513 }
    514 
    515 
    516 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
    517                                             int arguments,
    518                                             Safepoint::DeoptMode deopt_mode) {
    519   RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
    520 }
    521 
    522 
    523 bool LCodeGen::GenerateCode() {
    524   LPhase phase("Z_Code generation", chunk());
    525   DCHECK(is_unused());
    526   status_ = GENERATING;
    527 
    528   // Open a frame scope to indicate that there is a frame on the stack.  The
    529   // NONE indicates that the scope shouldn't actually generate code to set up
    530   // the frame (that is done in GeneratePrologue).
    531   FrameScope frame_scope(masm_, StackFrame::NONE);
    532 
    533   return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
    534          GenerateJumpTable() && GenerateSafepointTable();
    535 }
    536 
    537 
    538 void LCodeGen::SaveCallerDoubles() {
    539   DCHECK(info()->saves_caller_doubles());
    540   DCHECK(NeedsEagerFrame());
    541   Comment(";;; Save clobbered callee double registers");
    542   BitVector* doubles = chunk()->allocated_double_registers();
    543   BitVector::Iterator iterator(doubles);
    544   int count = 0;
    545   while (!iterator.Done()) {
    546     // TODO(all): Is this supposed to save just the callee-saved doubles? It
    547     // looks like it's saving all of them.
    548     FPRegister value = FPRegister::from_code(iterator.Current());
    549     __ Poke(value, count * kDoubleSize);
    550     iterator.Advance();
    551     count++;
    552   }
    553 }
    554 
    555 
    556 void LCodeGen::RestoreCallerDoubles() {
    557   DCHECK(info()->saves_caller_doubles());
    558   DCHECK(NeedsEagerFrame());
    559   Comment(";;; Restore clobbered callee double registers");
    560   BitVector* doubles = chunk()->allocated_double_registers();
    561   BitVector::Iterator iterator(doubles);
    562   int count = 0;
    563   while (!iterator.Done()) {
    564     // TODO(all): Is this supposed to restore just the callee-saved doubles? It
    565     // looks like it's restoring all of them.
    566     FPRegister value = FPRegister::from_code(iterator.Current());
    567     __ Peek(value, count * kDoubleSize);
    568     iterator.Advance();
    569     count++;
    570   }
    571 }
    572 
    573 
    574 bool LCodeGen::GeneratePrologue() {
    575   DCHECK(is_generating());
    576 
    577   if (info()->IsOptimizing()) {
    578     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
    579   }
    580 
    581   DCHECK(__ StackPointer().Is(jssp));
    582   info()->set_prologue_offset(masm_->pc_offset());
    583   if (NeedsEagerFrame()) {
    584     if (info()->IsStub()) {
    585       __ StubPrologue(
    586           StackFrame::STUB,
    587           GetStackSlotCount() + TypedFrameConstants::kFixedSlotCount);
    588     } else {
    589       __ Prologue(info()->GeneratePreagedPrologue());
    590       // Reserve space for the stack slots needed by the code.
    591       int slots = GetStackSlotCount();
    592       if (slots > 0) {
    593         __ Claim(slots, kPointerSize);
    594       }
    595     }
    596     frame_is_built_ = true;
    597   }
    598 
    599   if (info()->saves_caller_doubles()) {
    600     SaveCallerDoubles();
    601   }
    602   return !is_aborted();
    603 }
    604 
    605 
    606 void LCodeGen::DoPrologue(LPrologue* instr) {
    607   Comment(";;; Prologue begin");
    608 
    609   // Allocate a local context if needed.
    610   if (info()->scope()->NeedsContext()) {
    611     Comment(";;; Allocate local context");
    612     bool need_write_barrier = true;
    613     // Argument to NewContext is the function, which is in x1.
    614     int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
    615     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
    616     if (info()->scope()->is_script_scope()) {
    617       __ Mov(x10, Operand(info()->scope()->scope_info()));
    618       __ Push(x1, x10);
    619       __ CallRuntime(Runtime::kNewScriptContext);
    620       deopt_mode = Safepoint::kLazyDeopt;
    621     } else {
    622       if (slots <=
    623           ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
    624         Callable callable = CodeFactory::FastNewFunctionContext(
    625             isolate(), info()->scope()->scope_type());
    626         __ Mov(FastNewFunctionContextDescriptor::SlotsRegister(), slots);
    627         __ Call(callable.code(), RelocInfo::CODE_TARGET);
    628         // Result of the FastNewFunctionContext builtin is always in new space.
    629         need_write_barrier = false;
    630       } else {
    631         __ Push(x1);
    632         __ Push(Smi::FromInt(info()->scope()->scope_type()));
    633         __ CallRuntime(Runtime::kNewFunctionContext);
    634       }
    635     }
    636     RecordSafepoint(deopt_mode);
    637     // Context is returned in x0. It replaces the context passed to us. It's
    638     // saved in the stack and kept live in cp.
    639     __ Mov(cp, x0);
    640     __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset));
    641     // Copy any necessary parameters into the context.
    642     int num_parameters = info()->scope()->num_parameters();
    643     int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
    644     for (int i = first_parameter; i < num_parameters; i++) {
    645       Variable* var = (i == -1) ? info()->scope()->receiver()
    646                                 : info()->scope()->parameter(i);
    647       if (var->IsContextSlot()) {
    648         Register value = x0;
    649         Register scratch = x3;
    650 
    651         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
    652             (num_parameters - 1 - i) * kPointerSize;
    653         // Load parameter from stack.
    654         __ Ldr(value, MemOperand(fp, parameter_offset));
    655         // Store it in the context.
    656         MemOperand target = ContextMemOperand(cp, var->index());
    657         __ Str(value, target);
    658         // Update the write barrier. This clobbers value and scratch.
    659         if (need_write_barrier) {
    660           __ RecordWriteContextSlot(cp, static_cast<int>(target.offset()),
    661                                     value, scratch, GetLinkRegisterState(),
    662                                     kSaveFPRegs);
    663         } else if (FLAG_debug_code) {
    664           Label done;
    665           __ JumpIfInNewSpace(cp, &done);
    666           __ Abort(kExpectedNewSpaceObject);
    667           __ bind(&done);
    668         }
    669       }
    670     }
    671     Comment(";;; End allocate local context");
    672   }
    673 
    674   Comment(";;; Prologue end");
    675 }
    676 
    677 
    678 void LCodeGen::GenerateOsrPrologue() {
    679   // Generate the OSR entry prologue at the first unknown OSR value, or if there
    680   // are none, at the OSR entrypoint instruction.
    681   if (osr_pc_offset_ >= 0) return;
    682 
    683   osr_pc_offset_ = masm()->pc_offset();
    684 
    685   // Adjust the frame size, subsuming the unoptimized frame into the
    686   // optimized frame.
    687   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
    688   DCHECK(slots >= 0);
    689   __ Claim(slots);
    690 }
    691 
    692 
    693 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
    694   if (instr->IsCall()) {
    695     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
    696   }
    697   if (!instr->IsLazyBailout() && !instr->IsGap()) {
    698     safepoints_.BumpLastLazySafepointIndex();
    699   }
    700 }
    701 
    702 
    703 bool LCodeGen::GenerateDeferredCode() {
    704   DCHECK(is_generating());
    705   if (deferred_.length() > 0) {
    706     for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
    707       LDeferredCode* code = deferred_[i];
    708 
    709       HValue* value =
    710           instructions_->at(code->instruction_index())->hydrogen_value();
    711       RecordAndWritePosition(value->position());
    712 
    713       Comment(";;; <@%d,#%d> "
    714               "-------------------- Deferred %s --------------------",
    715               code->instruction_index(),
    716               code->instr()->hydrogen_value()->id(),
    717               code->instr()->Mnemonic());
    718 
    719       __ Bind(code->entry());
    720 
    721       if (NeedsDeferredFrame()) {
    722         Comment(";;; Build frame");
    723         DCHECK(!frame_is_built_);
    724         DCHECK(info()->IsStub());
    725         frame_is_built_ = true;
    726         __ Push(lr, fp);
    727         __ Mov(fp, StackFrame::TypeToMarker(StackFrame::STUB));
    728         __ Push(fp);
    729         __ Add(fp, __ StackPointer(),
    730                TypedFrameConstants::kFixedFrameSizeFromFp);
    731         Comment(";;; Deferred code");
    732       }
    733 
    734       code->Generate();
    735 
    736       if (NeedsDeferredFrame()) {
    737         Comment(";;; Destroy frame");
    738         DCHECK(frame_is_built_);
    739         __ Pop(xzr, fp, lr);
    740         frame_is_built_ = false;
    741       }
    742 
    743       __ B(code->exit());
    744     }
    745   }
    746 
    747   // Force constant pool emission at the end of the deferred code to make
    748   // sure that no constant pools are emitted after deferred code because
    749   // deferred code generation is the last step which generates code. The two
    750   // following steps will only output data used by crakshaft.
    751   masm()->CheckConstPool(true, false);
    752 
    753   return !is_aborted();
    754 }
    755 
    756 
    757 bool LCodeGen::GenerateJumpTable() {
    758   Label needs_frame, call_deopt_entry;
    759 
    760   if (jump_table_.length() > 0) {
    761     Comment(";;; -------------------- Jump table --------------------");
    762     Address base = jump_table_[0]->address;
    763 
    764     UseScratchRegisterScope temps(masm());
    765     Register entry_offset = temps.AcquireX();
    766 
    767     int length = jump_table_.length();
    768     for (int i = 0; i < length; i++) {
    769       Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
    770       __ Bind(&table_entry->label);
    771 
    772       Address entry = table_entry->address;
    773       DeoptComment(table_entry->deopt_info);
    774 
    775       // Second-level deopt table entries are contiguous and small, so instead
    776       // of loading the full, absolute address of each one, load the base
    777       // address and add an immediate offset.
    778       __ Mov(entry_offset, entry - base);
    779 
    780       if (table_entry->needs_frame) {
    781         DCHECK(!info()->saves_caller_doubles());
    782         Comment(";;; call deopt with frame");
    783         // Save lr before Bl, fp will be adjusted in the needs_frame code.
    784         __ Push(lr, fp);
    785         // Reuse the existing needs_frame code.
    786         __ Bl(&needs_frame);
    787       } else {
    788         // There is nothing special to do, so just continue to the second-level
    789         // table.
    790         __ Bl(&call_deopt_entry);
    791       }
    792 
    793       masm()->CheckConstPool(false, false);
    794     }
    795 
    796     if (needs_frame.is_linked()) {
    797       // This variant of deopt can only be used with stubs. Since we don't
    798       // have a function pointer to install in the stack frame that we're
    799       // building, install a special marker there instead.
    800       DCHECK(info()->IsStub());
    801 
    802       Comment(";;; needs_frame common code");
    803       UseScratchRegisterScope temps(masm());
    804       Register stub_marker = temps.AcquireX();
    805       __ Bind(&needs_frame);
    806       __ Mov(stub_marker, StackFrame::TypeToMarker(StackFrame::STUB));
    807       __ Push(cp, stub_marker);
    808       __ Add(fp, __ StackPointer(), 2 * kPointerSize);
    809     }
    810 
    811     // Generate common code for calling the second-level deopt table.
    812     __ Bind(&call_deopt_entry);
    813 
    814     if (info()->saves_caller_doubles()) {
    815       DCHECK(info()->IsStub());
    816       RestoreCallerDoubles();
    817     }
    818 
    819     Register deopt_entry = temps.AcquireX();
    820     __ Mov(deopt_entry, Operand(reinterpret_cast<uint64_t>(base),
    821                                 RelocInfo::RUNTIME_ENTRY));
    822     __ Add(deopt_entry, deopt_entry, entry_offset);
    823     __ Br(deopt_entry);
    824   }
    825 
    826   // Force constant pool emission at the end of the deopt jump table to make
    827   // sure that no constant pools are emitted after.
    828   masm()->CheckConstPool(true, false);
    829 
    830   // The deoptimization jump table is the last part of the instruction
    831   // sequence. Mark the generated code as done unless we bailed out.
    832   if (!is_aborted()) status_ = DONE;
    833   return !is_aborted();
    834 }
    835 
    836 
    837 bool LCodeGen::GenerateSafepointTable() {
    838   DCHECK(is_done());
    839   // We do not know how much data will be emitted for the safepoint table, so
    840   // force emission of the veneer pool.
    841   masm()->CheckVeneerPool(true, true);
    842   safepoints_.Emit(masm(), GetTotalFrameSlotCount());
    843   return !is_aborted();
    844 }
    845 
    846 
    847 void LCodeGen::FinishCode(Handle<Code> code) {
    848   DCHECK(is_done());
    849   code->set_stack_slots(GetTotalFrameSlotCount());
    850   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
    851   PopulateDeoptimizationData(code);
    852 }
    853 
    854 void LCodeGen::DeoptimizeBranch(
    855     LInstruction* instr, DeoptimizeReason deopt_reason, BranchType branch_type,
    856     Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) {
    857   LEnvironment* environment = instr->environment();
    858   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
    859   Deoptimizer::BailoutType bailout_type =
    860     info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
    861 
    862   if (override_bailout_type != NULL) {
    863     bailout_type = *override_bailout_type;
    864   }
    865 
    866   DCHECK(environment->HasBeenRegistered());
    867   int id = environment->deoptimization_index();
    868   Address entry =
    869       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
    870 
    871   if (entry == NULL) {
    872     Abort(kBailoutWasNotPrepared);
    873   }
    874 
    875   if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
    876     Label not_zero;
    877     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
    878 
    879     __ Push(x0, x1, x2);
    880     __ Mrs(x2, NZCV);
    881     __ Mov(x0, count);
    882     __ Ldr(w1, MemOperand(x0));
    883     __ Subs(x1, x1, 1);
    884     __ B(gt, &not_zero);
    885     __ Mov(w1, FLAG_deopt_every_n_times);
    886     __ Str(w1, MemOperand(x0));
    887     __ Pop(x2, x1, x0);
    888     DCHECK(frame_is_built_);
    889     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
    890     __ Unreachable();
    891 
    892     __ Bind(&not_zero);
    893     __ Str(w1, MemOperand(x0));
    894     __ Msr(NZCV, x2);
    895     __ Pop(x2, x1, x0);
    896   }
    897 
    898   if (info()->ShouldTrapOnDeopt()) {
    899     Label dont_trap;
    900     __ B(&dont_trap, InvertBranchType(branch_type), reg, bit);
    901     __ Debug("trap_on_deopt", __LINE__, BREAK);
    902     __ Bind(&dont_trap);
    903   }
    904 
    905   Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
    906 
    907   DCHECK(info()->IsStub() || frame_is_built_);
    908   // Go through jump table if we need to build frame, or restore caller doubles.
    909   if (branch_type == always &&
    910       frame_is_built_ && !info()->saves_caller_doubles()) {
    911     DeoptComment(deopt_info);
    912     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
    913   } else {
    914     Deoptimizer::JumpTableEntry* table_entry =
    915         new (zone()) Deoptimizer::JumpTableEntry(
    916             entry, deopt_info, bailout_type, !frame_is_built_);
    917     // We often have several deopts to the same entry, reuse the last
    918     // jump entry if this is the case.
    919     if (FLAG_trace_deopt || isolate()->is_profiling() ||
    920         jump_table_.is_empty() ||
    921         !table_entry->IsEquivalentTo(*jump_table_.last())) {
    922       jump_table_.Add(table_entry, zone());
    923     }
    924     __ B(&jump_table_.last()->label, branch_type, reg, bit);
    925   }
    926 }
    927 
    928 void LCodeGen::Deoptimize(LInstruction* instr, DeoptimizeReason deopt_reason,
    929                           Deoptimizer::BailoutType* override_bailout_type) {
    930   DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1,
    931                    override_bailout_type);
    932 }
    933 
    934 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
    935                             DeoptimizeReason deopt_reason) {
    936   DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond));
    937 }
    938 
    939 void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
    940                                 DeoptimizeReason deopt_reason) {
    941   DeoptimizeBranch(instr, deopt_reason, reg_zero, rt);
    942 }
    943 
    944 void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
    945                                    DeoptimizeReason deopt_reason) {
    946   DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt);
    947 }
    948 
    949 void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
    950                                     DeoptimizeReason deopt_reason) {
    951   int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
    952   DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason);
    953 }
    954 
    955 void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
    956                                DeoptimizeReason deopt_reason) {
    957   DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
    958 }
    959 
    960 void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
    961                                   DeoptimizeReason deopt_reason) {
    962   DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
    963 }
    964 
    965 void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
    966                                 LInstruction* instr,
    967                                 DeoptimizeReason deopt_reason) {
    968   __ CompareRoot(rt, index);
    969   DeoptimizeIf(eq, instr, deopt_reason);
    970 }
    971 
    972 void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
    973                                    LInstruction* instr,
    974                                    DeoptimizeReason deopt_reason) {
    975   __ CompareRoot(rt, index);
    976   DeoptimizeIf(ne, instr, deopt_reason);
    977 }
    978 
    979 void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
    980                                      DeoptimizeReason deopt_reason) {
    981   __ TestForMinusZero(input);
    982   DeoptimizeIf(vs, instr, deopt_reason);
    983 }
    984 
    985 
    986 void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
    987   __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
    988   DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
    989 }
    990 
    991 void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
    992                                   DeoptimizeReason deopt_reason) {
    993   DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit);
    994 }
    995 
    996 void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
    997                                     DeoptimizeReason deopt_reason) {
    998   DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit);
    999 }
   1000 
   1001 
   1002 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
   1003   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
   1004     // Ensure that we have enough space after the previous lazy-bailout
   1005     // instruction for patching the code here.
   1006     intptr_t current_pc = masm()->pc_offset();
   1007 
   1008     if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
   1009       ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
   1010       DCHECK((padding_size % kInstructionSize) == 0);
   1011       InstructionAccurateScope instruction_accurate(
   1012           masm(), padding_size / kInstructionSize);
   1013 
   1014       while (padding_size > 0) {
   1015         __ nop();
   1016         padding_size -= kInstructionSize;
   1017       }
   1018     }
   1019   }
   1020   last_lazy_deopt_pc_ = masm()->pc_offset();
   1021 }
   1022 
   1023 
   1024 Register LCodeGen::ToRegister(LOperand* op) const {
   1025   // TODO(all): support zero register results, as ToRegister32.
   1026   DCHECK((op != NULL) && op->IsRegister());
   1027   return Register::from_code(op->index());
   1028 }
   1029 
   1030 
   1031 Register LCodeGen::ToRegister32(LOperand* op) const {
   1032   DCHECK(op != NULL);
   1033   if (op->IsConstantOperand()) {
   1034     // If this is a constant operand, the result must be the zero register.
   1035     DCHECK(ToInteger32(LConstantOperand::cast(op)) == 0);
   1036     return wzr;
   1037   } else {
   1038     return ToRegister(op).W();
   1039   }
   1040 }
   1041 
   1042 
   1043 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
   1044   HConstant* constant = chunk_->LookupConstant(op);
   1045   return Smi::FromInt(constant->Integer32Value());
   1046 }
   1047 
   1048 
   1049 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
   1050   DCHECK((op != NULL) && op->IsDoubleRegister());
   1051   return DoubleRegister::from_code(op->index());
   1052 }
   1053 
   1054 
   1055 Operand LCodeGen::ToOperand(LOperand* op) {
   1056   DCHECK(op != NULL);
   1057   if (op->IsConstantOperand()) {
   1058     LConstantOperand* const_op = LConstantOperand::cast(op);
   1059     HConstant* constant = chunk()->LookupConstant(const_op);
   1060     Representation r = chunk_->LookupLiteralRepresentation(const_op);
   1061     if (r.IsSmi()) {
   1062       DCHECK(constant->HasSmiValue());
   1063       return Operand(Smi::FromInt(constant->Integer32Value()));
   1064     } else if (r.IsInteger32()) {
   1065       DCHECK(constant->HasInteger32Value());
   1066       return Operand(constant->Integer32Value());
   1067     } else if (r.IsDouble()) {
   1068       Abort(kToOperandUnsupportedDoubleImmediate);
   1069     }
   1070     DCHECK(r.IsTagged());
   1071     return Operand(constant->handle(isolate()));
   1072   } else if (op->IsRegister()) {
   1073     return Operand(ToRegister(op));
   1074   } else if (op->IsDoubleRegister()) {
   1075     Abort(kToOperandIsDoubleRegisterUnimplemented);
   1076     return Operand(0);
   1077   }
   1078   // Stack slots not implemented, use ToMemOperand instead.
   1079   UNREACHABLE();
   1080   return Operand(0);
   1081 }
   1082 
   1083 
   1084 Operand LCodeGen::ToOperand32(LOperand* op) {
   1085   DCHECK(op != NULL);
   1086   if (op->IsRegister()) {
   1087     return Operand(ToRegister32(op));
   1088   } else if (op->IsConstantOperand()) {
   1089     LConstantOperand* const_op = LConstantOperand::cast(op);
   1090     HConstant* constant = chunk()->LookupConstant(const_op);
   1091     Representation r = chunk_->LookupLiteralRepresentation(const_op);
   1092     if (r.IsInteger32()) {
   1093       return Operand(constant->Integer32Value());
   1094     } else {
   1095       // Other constants not implemented.
   1096       Abort(kToOperand32UnsupportedImmediate);
   1097     }
   1098   }
   1099   // Other cases are not implemented.
   1100   UNREACHABLE();
   1101   return Operand(0);
   1102 }
   1103 
   1104 
   1105 static int64_t ArgumentsOffsetWithoutFrame(int index) {
   1106   DCHECK(index < 0);
   1107   return -(index + 1) * kPointerSize;
   1108 }
   1109 
   1110 
   1111 MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
   1112   DCHECK(op != NULL);
   1113   DCHECK(!op->IsRegister());
   1114   DCHECK(!op->IsDoubleRegister());
   1115   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
   1116   if (NeedsEagerFrame()) {
   1117     int fp_offset = FrameSlotToFPOffset(op->index());
   1118     // Loads and stores have a bigger reach in positive offset than negative.
   1119     // We try to access using jssp (positive offset) first, then fall back to
   1120     // fp (negative offset) if that fails.
   1121     //
   1122     // We can reference a stack slot from jssp only if we know how much we've
   1123     // put on the stack. We don't know this in the following cases:
   1124     // - stack_mode != kCanUseStackPointer: this is the case when deferred
   1125     //   code has saved the registers.
   1126     // - saves_caller_doubles(): some double registers have been pushed, jssp
   1127     //   references the end of the double registers and not the end of the stack
   1128     //   slots.
   1129     // In both of the cases above, we _could_ add the tracking information
   1130     // required so that we can use jssp here, but in practice it isn't worth it.
   1131     if ((stack_mode == kCanUseStackPointer) &&
   1132         !info()->saves_caller_doubles()) {
   1133       int jssp_offset_to_fp =
   1134           (pushed_arguments_ + GetTotalFrameSlotCount()) * kPointerSize -
   1135           StandardFrameConstants::kFixedFrameSizeAboveFp;
   1136       int jssp_offset = fp_offset + jssp_offset_to_fp;
   1137       if (masm()->IsImmLSScaled(jssp_offset, LSDoubleWord)) {
   1138         return MemOperand(masm()->StackPointer(), jssp_offset);
   1139       }
   1140     }
   1141     return MemOperand(fp, fp_offset);
   1142   } else {
   1143     // Retrieve parameter without eager stack-frame relative to the
   1144     // stack-pointer.
   1145     return MemOperand(masm()->StackPointer(),
   1146                       ArgumentsOffsetWithoutFrame(op->index()));
   1147   }
   1148 }
   1149 
   1150 
   1151 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
   1152   HConstant* constant = chunk_->LookupConstant(op);
   1153   DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
   1154   return constant->handle(isolate());
   1155 }
   1156 
   1157 
   1158 template <class LI>
   1159 Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info) {
   1160   if (shift_info->shift() == NO_SHIFT) {
   1161     return ToOperand32(right);
   1162   } else {
   1163     return Operand(
   1164         ToRegister32(right),
   1165         shift_info->shift(),
   1166         JSShiftAmountFromLConstant(shift_info->shift_amount()));
   1167   }
   1168 }
   1169 
   1170 
   1171 bool LCodeGen::IsSmi(LConstantOperand* op) const {
   1172   return chunk_->LookupLiteralRepresentation(op).IsSmi();
   1173 }
   1174 
   1175 
   1176 bool LCodeGen::IsInteger32Constant(LConstantOperand* op) const {
   1177   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
   1178 }
   1179 
   1180 
   1181 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
   1182   HConstant* constant = chunk_->LookupConstant(op);
   1183   return constant->Integer32Value();
   1184 }
   1185 
   1186 
   1187 double LCodeGen::ToDouble(LConstantOperand* op) const {
   1188   HConstant* constant = chunk_->LookupConstant(op);
   1189   DCHECK(constant->HasDoubleValue());
   1190   return constant->DoubleValue();
   1191 }
   1192 
   1193 
   1194 Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
   1195   Condition cond = nv;
   1196   switch (op) {
   1197     case Token::EQ:
   1198     case Token::EQ_STRICT:
   1199       cond = eq;
   1200       break;
   1201     case Token::NE:
   1202     case Token::NE_STRICT:
   1203       cond = ne;
   1204       break;
   1205     case Token::LT:
   1206       cond = is_unsigned ? lo : lt;
   1207       break;
   1208     case Token::GT:
   1209       cond = is_unsigned ? hi : gt;
   1210       break;
   1211     case Token::LTE:
   1212       cond = is_unsigned ? ls : le;
   1213       break;
   1214     case Token::GTE:
   1215       cond = is_unsigned ? hs : ge;
   1216       break;
   1217     case Token::IN:
   1218     case Token::INSTANCEOF:
   1219     default:
   1220       UNREACHABLE();
   1221   }
   1222   return cond;
   1223 }
   1224 
   1225 
   1226 template<class InstrType>
   1227 void LCodeGen::EmitBranchGeneric(InstrType instr,
   1228                                  const BranchGenerator& branch) {
   1229   int left_block = instr->TrueDestination(chunk_);
   1230   int right_block = instr->FalseDestination(chunk_);
   1231 
   1232   int next_block = GetNextEmittedBlock();
   1233 
   1234   if (right_block == left_block) {
   1235     EmitGoto(left_block);
   1236   } else if (left_block == next_block) {
   1237     branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
   1238   } else {
   1239     branch.Emit(chunk_->GetAssemblyLabel(left_block));
   1240     if (right_block != next_block) {
   1241       __ B(chunk_->GetAssemblyLabel(right_block));
   1242     }
   1243   }
   1244 }
   1245 
   1246 
   1247 template<class InstrType>
   1248 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
   1249   DCHECK((condition != al) && (condition != nv));
   1250   BranchOnCondition branch(this, condition);
   1251   EmitBranchGeneric(instr, branch);
   1252 }
   1253 
   1254 
   1255 template<class InstrType>
   1256 void LCodeGen::EmitCompareAndBranch(InstrType instr,
   1257                                     Condition condition,
   1258                                     const Register& lhs,
   1259                                     const Operand& rhs) {
   1260   DCHECK((condition != al) && (condition != nv));
   1261   CompareAndBranch branch(this, condition, lhs, rhs);
   1262   EmitBranchGeneric(instr, branch);
   1263 }
   1264 
   1265 
   1266 template<class InstrType>
   1267 void LCodeGen::EmitTestAndBranch(InstrType instr,
   1268                                  Condition condition,
   1269                                  const Register& value,
   1270                                  uint64_t mask) {
   1271   DCHECK((condition != al) && (condition != nv));
   1272   TestAndBranch branch(this, condition, value, mask);
   1273   EmitBranchGeneric(instr, branch);
   1274 }
   1275 
   1276 
   1277 template<class InstrType>
   1278 void LCodeGen::EmitBranchIfNonZeroNumber(InstrType instr,
   1279                                          const FPRegister& value,
   1280                                          const FPRegister& scratch) {
   1281   BranchIfNonZeroNumber branch(this, value, scratch);
   1282   EmitBranchGeneric(instr, branch);
   1283 }
   1284 
   1285 
   1286 template<class InstrType>
   1287 void LCodeGen::EmitBranchIfHeapNumber(InstrType instr,
   1288                                       const Register& value) {
   1289   BranchIfHeapNumber branch(this, value);
   1290   EmitBranchGeneric(instr, branch);
   1291 }
   1292 
   1293 
   1294 template<class InstrType>
   1295 void LCodeGen::EmitBranchIfRoot(InstrType instr,
   1296                                 const Register& value,
   1297                                 Heap::RootListIndex index) {
   1298   BranchIfRoot branch(this, value, index);
   1299   EmitBranchGeneric(instr, branch);
   1300 }
   1301 
   1302 
   1303 void LCodeGen::DoGap(LGap* gap) {
   1304   for (int i = LGap::FIRST_INNER_POSITION;
   1305        i <= LGap::LAST_INNER_POSITION;
   1306        i++) {
   1307     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
   1308     LParallelMove* move = gap->GetParallelMove(inner_pos);
   1309     if (move != NULL) {
   1310       resolver_.Resolve(move);
   1311     }
   1312   }
   1313 }
   1314 
   1315 
   1316 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
   1317   Register arguments = ToRegister(instr->arguments());
   1318   Register result = ToRegister(instr->result());
   1319 
   1320   // The pointer to the arguments array come from DoArgumentsElements.
   1321   // It does not point directly to the arguments and there is an offest of
   1322   // two words that we must take into account when accessing an argument.
   1323   // Subtracting the index from length accounts for one, so we add one more.
   1324 
   1325   if (instr->length()->IsConstantOperand() &&
   1326       instr->index()->IsConstantOperand()) {
   1327     int index = ToInteger32(LConstantOperand::cast(instr->index()));
   1328     int length = ToInteger32(LConstantOperand::cast(instr->length()));
   1329     int offset = ((length - index) + 1) * kPointerSize;
   1330     __ Ldr(result, MemOperand(arguments, offset));
   1331   } else if (instr->index()->IsConstantOperand()) {
   1332     Register length = ToRegister32(instr->length());
   1333     int index = ToInteger32(LConstantOperand::cast(instr->index()));
   1334     int loc = index - 1;
   1335     if (loc != 0) {
   1336       __ Sub(result.W(), length, loc);
   1337       __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
   1338     } else {
   1339       __ Ldr(result, MemOperand(arguments, length, UXTW, kPointerSizeLog2));
   1340     }
   1341   } else {
   1342     Register length = ToRegister32(instr->length());
   1343     Operand index = ToOperand32(instr->index());
   1344     __ Sub(result.W(), length, index);
   1345     __ Add(result.W(), result.W(), 1);
   1346     __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
   1347   }
   1348 }
   1349 
   1350 
   1351 void LCodeGen::DoAddE(LAddE* instr) {
   1352   Register result = ToRegister(instr->result());
   1353   Register left = ToRegister(instr->left());
   1354   Operand right = Operand(x0);  // Dummy initialization.
   1355   if (instr->hydrogen()->external_add_type() == AddOfExternalAndTagged) {
   1356     right = Operand(ToRegister(instr->right()));
   1357   } else if (instr->right()->IsConstantOperand()) {
   1358     right = ToInteger32(LConstantOperand::cast(instr->right()));
   1359   } else {
   1360     right = Operand(ToRegister32(instr->right()), SXTW);
   1361   }
   1362 
   1363   DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
   1364   __ Add(result, left, right);
   1365 }
   1366 
   1367 
   1368 void LCodeGen::DoAddI(LAddI* instr) {
   1369   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1370   Register result = ToRegister32(instr->result());
   1371   Register left = ToRegister32(instr->left());
   1372   Operand right = ToShiftedRightOperand32(instr->right(), instr);
   1373 
   1374   if (can_overflow) {
   1375     __ Adds(result, left, right);
   1376     DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
   1377   } else {
   1378     __ Add(result, left, right);
   1379   }
   1380 }
   1381 
   1382 
   1383 void LCodeGen::DoAddS(LAddS* instr) {
   1384   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1385   Register result = ToRegister(instr->result());
   1386   Register left = ToRegister(instr->left());
   1387   Operand right = ToOperand(instr->right());
   1388   if (can_overflow) {
   1389     __ Adds(result, left, right);
   1390     DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
   1391   } else {
   1392     __ Add(result, left, right);
   1393   }
   1394 }
   1395 
   1396 
   1397 void LCodeGen::DoAllocate(LAllocate* instr) {
   1398   class DeferredAllocate: public LDeferredCode {
   1399    public:
   1400     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
   1401         : LDeferredCode(codegen), instr_(instr) { }
   1402     virtual void Generate() { codegen()->DoDeferredAllocate(instr_); }
   1403     virtual LInstruction* instr() { return instr_; }
   1404    private:
   1405     LAllocate* instr_;
   1406   };
   1407 
   1408   DeferredAllocate* deferred = new(zone()) DeferredAllocate(this, instr);
   1409 
   1410   Register result = ToRegister(instr->result());
   1411   Register temp1 = ToRegister(instr->temp1());
   1412   Register temp2 = ToRegister(instr->temp2());
   1413 
   1414   // Allocate memory for the object.
   1415   AllocationFlags flags = NO_ALLOCATION_FLAGS;
   1416   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   1417     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   1418   }
   1419 
   1420   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   1421     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   1422     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   1423   }
   1424 
   1425   if (instr->hydrogen()->IsAllocationFoldingDominator()) {
   1426     flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
   1427   }
   1428   DCHECK(!instr->hydrogen()->IsAllocationFolded());
   1429 
   1430   if (instr->size()->IsConstantOperand()) {
   1431     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   1432     CHECK(size <= kMaxRegularHeapObjectSize);
   1433     __ Allocate(size, result, temp1, temp2, deferred->entry(), flags);
   1434   } else {
   1435     Register size = ToRegister32(instr->size());
   1436     __ Sxtw(size.X(), size);
   1437     __ Allocate(size.X(), result, temp1, temp2, deferred->entry(), flags);
   1438   }
   1439 
   1440   __ Bind(deferred->exit());
   1441 
   1442   if (instr->hydrogen()->MustPrefillWithFiller()) {
   1443     Register start = temp1;
   1444     Register end = temp2;
   1445     Register filler = ToRegister(instr->temp3());
   1446 
   1447     __ Sub(start, result, kHeapObjectTag);
   1448 
   1449     if (instr->size()->IsConstantOperand()) {
   1450       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   1451       __ Add(end, start, size);
   1452     } else {
   1453       __ Add(end, start, ToRegister(instr->size()));
   1454     }
   1455     __ LoadRoot(filler, Heap::kOnePointerFillerMapRootIndex);
   1456     __ InitializeFieldsWithFiller(start, end, filler);
   1457   } else {
   1458     DCHECK(instr->temp3() == NULL);
   1459   }
   1460 }
   1461 
   1462 
   1463 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
   1464   // TODO(3095996): Get rid of this. For now, we need to make the
   1465   // result register contain a valid pointer because it is already
   1466   // contained in the register pointer map.
   1467   __ Mov(ToRegister(instr->result()), Smi::kZero);
   1468 
   1469   PushSafepointRegistersScope scope(this);
   1470   LoadContextFromDeferred(instr->context());
   1471   // We're in a SafepointRegistersScope so we can use any scratch registers.
   1472   Register size = x0;
   1473   if (instr->size()->IsConstantOperand()) {
   1474     __ Mov(size, ToSmi(LConstantOperand::cast(instr->size())));
   1475   } else {
   1476     __ SmiTag(size, ToRegister32(instr->size()).X());
   1477   }
   1478   int flags = AllocateDoubleAlignFlag::encode(
   1479       instr->hydrogen()->MustAllocateDoubleAligned());
   1480   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   1481     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   1482     flags = AllocateTargetSpace::update(flags, OLD_SPACE);
   1483   } else {
   1484     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
   1485   }
   1486   __ Mov(x10, Smi::FromInt(flags));
   1487   __ Push(size, x10);
   1488 
   1489   CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr, nullptr);
   1490   __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
   1491 
   1492   if (instr->hydrogen()->IsAllocationFoldingDominator()) {
   1493     AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
   1494     if (instr->hydrogen()->IsOldSpaceAllocation()) {
   1495       DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   1496       allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
   1497     }
   1498     // If the allocation folding dominator allocate triggered a GC, allocation
   1499     // happend in the runtime. We have to reset the top pointer to virtually
   1500     // undo the allocation.
   1501     ExternalReference allocation_top =
   1502         AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
   1503     Register top_address = x10;
   1504     __ Sub(x0, x0, Operand(kHeapObjectTag));
   1505     __ Mov(top_address, Operand(allocation_top));
   1506     __ Str(x0, MemOperand(top_address));
   1507     __ Add(x0, x0, Operand(kHeapObjectTag));
   1508   }
   1509 }
   1510 
   1511 void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
   1512   DCHECK(instr->hydrogen()->IsAllocationFolded());
   1513   DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
   1514   Register result = ToRegister(instr->result());
   1515   Register scratch1 = ToRegister(instr->temp1());
   1516   Register scratch2 = ToRegister(instr->temp2());
   1517 
   1518   AllocationFlags flags = ALLOCATION_FOLDED;
   1519   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   1520     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   1521   }
   1522   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   1523     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   1524     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   1525   }
   1526   if (instr->size()->IsConstantOperand()) {
   1527     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   1528     CHECK(size <= kMaxRegularHeapObjectSize);
   1529     __ FastAllocate(size, result, scratch1, scratch2, flags);
   1530   } else {
   1531     Register size = ToRegister(instr->size());
   1532     __ FastAllocate(size, result, scratch1, scratch2, flags);
   1533   }
   1534 }
   1535 
   1536 
   1537 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   1538   Register receiver = ToRegister(instr->receiver());
   1539   Register function = ToRegister(instr->function());
   1540   Register length = ToRegister32(instr->length());
   1541 
   1542   Register elements = ToRegister(instr->elements());
   1543   Register scratch = x5;
   1544   DCHECK(receiver.Is(x0));  // Used for parameter count.
   1545   DCHECK(function.Is(x1));  // Required by InvokeFunction.
   1546   DCHECK(ToRegister(instr->result()).Is(x0));
   1547   DCHECK(instr->IsMarkedAsCall());
   1548 
   1549   // Copy the arguments to this function possibly from the
   1550   // adaptor frame below it.
   1551   const uint32_t kArgumentsLimit = 1 * KB;
   1552   __ Cmp(length, kArgumentsLimit);
   1553   DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments);
   1554 
   1555   // Push the receiver and use the register to keep the original
   1556   // number of arguments.
   1557   __ Push(receiver);
   1558   Register argc = receiver;
   1559   receiver = NoReg;
   1560   __ Sxtw(argc, length);
   1561   // The arguments are at a one pointer size offset from elements.
   1562   __ Add(elements, elements, 1 * kPointerSize);
   1563 
   1564   // Loop through the arguments pushing them onto the execution
   1565   // stack.
   1566   Label invoke, loop;
   1567   // length is a small non-negative integer, due to the test above.
   1568   __ Cbz(length, &invoke);
   1569   __ Bind(&loop);
   1570   __ Ldr(scratch, MemOperand(elements, length, SXTW, kPointerSizeLog2));
   1571   __ Push(scratch);
   1572   __ Subs(length, length, 1);
   1573   __ B(ne, &loop);
   1574 
   1575   __ Bind(&invoke);
   1576 
   1577   InvokeFlag flag = CALL_FUNCTION;
   1578   if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
   1579     DCHECK(!info()->saves_caller_doubles());
   1580     // TODO(ishell): drop current frame before pushing arguments to the stack.
   1581     flag = JUMP_FUNCTION;
   1582     ParameterCount actual(x0);
   1583     // It is safe to use x3, x4 and x5 as scratch registers here given that
   1584     // 1) we are not going to return to caller function anyway,
   1585     // 2) x3 (new.target) will be initialized below.
   1586     PrepareForTailCall(actual, x3, x4, x5);
   1587   }
   1588 
   1589   DCHECK(instr->HasPointerMap());
   1590   LPointerMap* pointers = instr->pointer_map();
   1591   SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
   1592   // The number of arguments is stored in argc (receiver) which is x0, as
   1593   // expected by InvokeFunction.
   1594   ParameterCount actual(argc);
   1595   __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
   1596 }
   1597 
   1598 
   1599 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   1600   Register result = ToRegister(instr->result());
   1601 
   1602   if (instr->hydrogen()->from_inlined()) {
   1603     // When we are inside an inlined function, the arguments are the last things
   1604     // that have been pushed on the stack. Therefore the arguments array can be
   1605     // accessed directly from jssp.
   1606     // However in the normal case, it is accessed via fp but there are two words
   1607     // on the stack between fp and the arguments (the saved lr and fp) and the
   1608     // LAccessArgumentsAt implementation take that into account.
   1609     // In the inlined case we need to subtract the size of 2 words to jssp to
   1610     // get a pointer which will work well with LAccessArgumentsAt.
   1611     DCHECK(masm()->StackPointer().Is(jssp));
   1612     __ Sub(result, jssp, 2 * kPointerSize);
   1613   } else if (instr->hydrogen()->arguments_adaptor()) {
   1614     DCHECK(instr->temp() != NULL);
   1615     Register previous_fp = ToRegister(instr->temp());
   1616 
   1617     __ Ldr(previous_fp,
   1618            MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   1619     __ Ldr(result, MemOperand(previous_fp,
   1620                               CommonFrameConstants::kContextOrFrameTypeOffset));
   1621     __ Cmp(result, StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR));
   1622     __ Csel(result, fp, previous_fp, ne);
   1623   } else {
   1624     __ Mov(result, fp);
   1625   }
   1626 }
   1627 
   1628 
   1629 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
   1630   Register elements = ToRegister(instr->elements());
   1631   Register result = ToRegister32(instr->result());
   1632   Label done;
   1633 
   1634   // If no arguments adaptor frame the number of arguments is fixed.
   1635   __ Cmp(fp, elements);
   1636   __ Mov(result, scope()->num_parameters());
   1637   __ B(eq, &done);
   1638 
   1639   // Arguments adaptor frame present. Get argument length from there.
   1640   __ Ldr(result.X(), MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   1641   __ Ldr(result,
   1642          UntagSmiMemOperand(result.X(),
   1643                             ArgumentsAdaptorFrameConstants::kLengthOffset));
   1644 
   1645   // Argument length is in result register.
   1646   __ Bind(&done);
   1647 }
   1648 
   1649 
   1650 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
   1651   DoubleRegister left = ToDoubleRegister(instr->left());
   1652   DoubleRegister right = ToDoubleRegister(instr->right());
   1653   DoubleRegister result = ToDoubleRegister(instr->result());
   1654 
   1655   switch (instr->op()) {
   1656     case Token::ADD: __ Fadd(result, left, right); break;
   1657     case Token::SUB: __ Fsub(result, left, right); break;
   1658     case Token::MUL: __ Fmul(result, left, right); break;
   1659     case Token::DIV: __ Fdiv(result, left, right); break;
   1660     case Token::MOD: {
   1661       // The ECMA-262 remainder operator is the remainder from a truncating
   1662       // (round-towards-zero) division. Note that this differs from IEEE-754.
   1663       //
   1664       // TODO(jbramley): See if it's possible to do this inline, rather than by
   1665       // calling a helper function. With frintz (to produce the intermediate
   1666       // quotient) and fmsub (to calculate the remainder without loss of
   1667       // precision), it should be possible. However, we would need support for
   1668       // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
   1669       // support that yet.
   1670       DCHECK(left.Is(d0));
   1671       DCHECK(right.Is(d1));
   1672       __ CallCFunction(
   1673           ExternalReference::mod_two_doubles_operation(isolate()),
   1674           0, 2);
   1675       DCHECK(result.Is(d0));
   1676       break;
   1677     }
   1678     default:
   1679       UNREACHABLE();
   1680       break;
   1681   }
   1682 }
   1683 
   1684 
   1685 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
   1686   DCHECK(ToRegister(instr->context()).is(cp));
   1687   DCHECK(ToRegister(instr->left()).is(x1));
   1688   DCHECK(ToRegister(instr->right()).is(x0));
   1689   DCHECK(ToRegister(instr->result()).is(x0));
   1690 
   1691   Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
   1692   CallCode(code, RelocInfo::CODE_TARGET, instr);
   1693 }
   1694 
   1695 
   1696 void LCodeGen::DoBitI(LBitI* instr) {
   1697   Register result = ToRegister32(instr->result());
   1698   Register left = ToRegister32(instr->left());
   1699   Operand right = ToShiftedRightOperand32(instr->right(), instr);
   1700 
   1701   switch (instr->op()) {
   1702     case Token::BIT_AND: __ And(result, left, right); break;
   1703     case Token::BIT_OR:  __ Orr(result, left, right); break;
   1704     case Token::BIT_XOR: __ Eor(result, left, right); break;
   1705     default:
   1706       UNREACHABLE();
   1707       break;
   1708   }
   1709 }
   1710 
   1711 
   1712 void LCodeGen::DoBitS(LBitS* instr) {
   1713   Register result = ToRegister(instr->result());
   1714   Register left = ToRegister(instr->left());
   1715   Operand right = ToOperand(instr->right());
   1716 
   1717   switch (instr->op()) {
   1718     case Token::BIT_AND: __ And(result, left, right); break;
   1719     case Token::BIT_OR:  __ Orr(result, left, right); break;
   1720     case Token::BIT_XOR: __ Eor(result, left, right); break;
   1721     default:
   1722       UNREACHABLE();
   1723       break;
   1724   }
   1725 }
   1726 
   1727 
   1728 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
   1729   Condition cond = instr->hydrogen()->allow_equality() ? hi : hs;
   1730   DCHECK(instr->hydrogen()->index()->representation().IsInteger32());
   1731   DCHECK(instr->hydrogen()->length()->representation().IsInteger32());
   1732   if (instr->index()->IsConstantOperand()) {
   1733     Operand index = ToOperand32(instr->index());
   1734     Register length = ToRegister32(instr->length());
   1735     __ Cmp(length, index);
   1736     cond = CommuteCondition(cond);
   1737   } else {
   1738     Register index = ToRegister32(instr->index());
   1739     Operand length = ToOperand32(instr->length());
   1740     __ Cmp(index, length);
   1741   }
   1742   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
   1743     __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
   1744   } else {
   1745     DeoptimizeIf(cond, instr, DeoptimizeReason::kOutOfBounds);
   1746   }
   1747 }
   1748 
   1749 
   1750 void LCodeGen::DoBranch(LBranch* instr) {
   1751   Representation r = instr->hydrogen()->value()->representation();
   1752   Label* true_label = instr->TrueLabel(chunk_);
   1753   Label* false_label = instr->FalseLabel(chunk_);
   1754 
   1755   if (r.IsInteger32()) {
   1756     DCHECK(!info()->IsStub());
   1757     EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
   1758   } else if (r.IsSmi()) {
   1759     DCHECK(!info()->IsStub());
   1760     STATIC_ASSERT(kSmiTag == 0);
   1761     EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
   1762   } else if (r.IsDouble()) {
   1763     DoubleRegister value = ToDoubleRegister(instr->value());
   1764     // Test the double value. Zero and NaN are false.
   1765     EmitBranchIfNonZeroNumber(instr, value, double_scratch());
   1766   } else {
   1767     DCHECK(r.IsTagged());
   1768     Register value = ToRegister(instr->value());
   1769     HType type = instr->hydrogen()->value()->type();
   1770 
   1771     if (type.IsBoolean()) {
   1772       DCHECK(!info()->IsStub());
   1773       __ CompareRoot(value, Heap::kTrueValueRootIndex);
   1774       EmitBranch(instr, eq);
   1775     } else if (type.IsSmi()) {
   1776       DCHECK(!info()->IsStub());
   1777       EmitCompareAndBranch(instr, ne, value, Smi::kZero);
   1778     } else if (type.IsJSArray()) {
   1779       DCHECK(!info()->IsStub());
   1780       EmitGoto(instr->TrueDestination(chunk()));
   1781     } else if (type.IsHeapNumber()) {
   1782       DCHECK(!info()->IsStub());
   1783       __ Ldr(double_scratch(), FieldMemOperand(value,
   1784                                                HeapNumber::kValueOffset));
   1785       // Test the double value. Zero and NaN are false.
   1786       EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
   1787     } else if (type.IsString()) {
   1788       DCHECK(!info()->IsStub());
   1789       Register temp = ToRegister(instr->temp1());
   1790       __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
   1791       EmitCompareAndBranch(instr, ne, temp, 0);
   1792     } else {
   1793       ToBooleanHints expected = instr->hydrogen()->expected_input_types();
   1794       // Avoid deopts in the case where we've never executed this path before.
   1795       if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
   1796 
   1797       if (expected & ToBooleanHint::kUndefined) {
   1798         // undefined -> false.
   1799         __ JumpIfRoot(
   1800             value, Heap::kUndefinedValueRootIndex, false_label);
   1801       }
   1802 
   1803       if (expected & ToBooleanHint::kBoolean) {
   1804         // Boolean -> its value.
   1805         __ JumpIfRoot(
   1806             value, Heap::kTrueValueRootIndex, true_label);
   1807         __ JumpIfRoot(
   1808             value, Heap::kFalseValueRootIndex, false_label);
   1809       }
   1810 
   1811       if (expected & ToBooleanHint::kNull) {
   1812         // 'null' -> false.
   1813         __ JumpIfRoot(
   1814             value, Heap::kNullValueRootIndex, false_label);
   1815       }
   1816 
   1817       if (expected & ToBooleanHint::kSmallInteger) {
   1818         // Smis: 0 -> false, all other -> true.
   1819         DCHECK(Smi::kZero == 0);
   1820         __ Cbz(value, false_label);
   1821         __ JumpIfSmi(value, true_label);
   1822       } else if (expected & ToBooleanHint::kNeedsMap) {
   1823         // If we need a map later and have a smi, deopt.
   1824         DeoptimizeIfSmi(value, instr, DeoptimizeReason::kSmi);
   1825       }
   1826 
   1827       Register map = NoReg;
   1828       Register scratch = NoReg;
   1829 
   1830       if (expected & ToBooleanHint::kNeedsMap) {
   1831         DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
   1832         map = ToRegister(instr->temp1());
   1833         scratch = ToRegister(instr->temp2());
   1834 
   1835         __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
   1836 
   1837         if (expected & ToBooleanHint::kCanBeUndetectable) {
   1838           // Undetectable -> false.
   1839           __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
   1840           __ TestAndBranchIfAnySet(
   1841               scratch, 1 << Map::kIsUndetectable, false_label);
   1842         }
   1843       }
   1844 
   1845       if (expected & ToBooleanHint::kReceiver) {
   1846         // spec object -> true.
   1847         __ CompareInstanceType(map, scratch, FIRST_JS_RECEIVER_TYPE);
   1848         __ B(ge, true_label);
   1849       }
   1850 
   1851       if (expected & ToBooleanHint::kString) {
   1852         // String value -> false iff empty.
   1853         Label not_string;
   1854         __ CompareInstanceType(map, scratch, FIRST_NONSTRING_TYPE);
   1855         __ B(ge, &not_string);
   1856         __ Ldr(scratch, FieldMemOperand(value, String::kLengthOffset));
   1857         __ Cbz(scratch, false_label);
   1858         __ B(true_label);
   1859         __ Bind(&not_string);
   1860       }
   1861 
   1862       if (expected & ToBooleanHint::kSymbol) {
   1863         // Symbol value -> true.
   1864         __ CompareInstanceType(map, scratch, SYMBOL_TYPE);
   1865         __ B(eq, true_label);
   1866       }
   1867 
   1868       if (expected & ToBooleanHint::kHeapNumber) {
   1869         Label not_heap_number;
   1870         __ JumpIfNotRoot(map, Heap::kHeapNumberMapRootIndex, &not_heap_number);
   1871 
   1872         __ Ldr(double_scratch(),
   1873                FieldMemOperand(value, HeapNumber::kValueOffset));
   1874         __ Fcmp(double_scratch(), 0.0);
   1875         // If we got a NaN (overflow bit is set), jump to the false branch.
   1876         __ B(vs, false_label);
   1877         __ B(eq, false_label);
   1878         __ B(true_label);
   1879         __ Bind(&not_heap_number);
   1880       }
   1881 
   1882       if (expected != ToBooleanHint::kAny) {
   1883         // We've seen something for the first time -> deopt.
   1884         // This can only happen if we are not generic already.
   1885         Deoptimize(instr, DeoptimizeReason::kUnexpectedObject);
   1886       }
   1887     }
   1888   }
   1889 }
   1890 
   1891 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   1892                                  int formal_parameter_count, int arity,
   1893                                  bool is_tail_call, LInstruction* instr) {
   1894   bool dont_adapt_arguments =
   1895       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   1896   bool can_invoke_directly =
   1897       dont_adapt_arguments || formal_parameter_count == arity;
   1898 
   1899   // The function interface relies on the following register assignments.
   1900   Register function_reg = x1;
   1901   Register arity_reg = x0;
   1902 
   1903   LPointerMap* pointers = instr->pointer_map();
   1904 
   1905   if (FLAG_debug_code) {
   1906     Label is_not_smi;
   1907     // Try to confirm that function_reg (x1) is a tagged pointer.
   1908     __ JumpIfNotSmi(function_reg, &is_not_smi);
   1909     __ Abort(kExpectedFunctionObject);
   1910     __ Bind(&is_not_smi);
   1911   }
   1912 
   1913   if (can_invoke_directly) {
   1914     // Change context.
   1915     __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
   1916 
   1917     // Always initialize new target and number of actual arguments.
   1918     __ LoadRoot(x3, Heap::kUndefinedValueRootIndex);
   1919     __ Mov(arity_reg, arity);
   1920 
   1921     bool is_self_call = function.is_identical_to(info()->closure());
   1922 
   1923     // Invoke function.
   1924     if (is_self_call) {
   1925       Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
   1926       if (is_tail_call) {
   1927         __ Jump(self, RelocInfo::CODE_TARGET);
   1928       } else {
   1929         __ Call(self, RelocInfo::CODE_TARGET);
   1930       }
   1931     } else {
   1932       __ Ldr(x10, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
   1933       if (is_tail_call) {
   1934         __ Jump(x10);
   1935       } else {
   1936         __ Call(x10);
   1937       }
   1938     }
   1939 
   1940     if (!is_tail_call) {
   1941       // Set up deoptimization.
   1942       RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   1943     }
   1944   } else {
   1945     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   1946     ParameterCount actual(arity);
   1947     ParameterCount expected(formal_parameter_count);
   1948     InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
   1949     __ InvokeFunction(function_reg, expected, actual, flag, generator);
   1950   }
   1951 }
   1952 
   1953 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   1954   DCHECK(instr->IsMarkedAsCall());
   1955   DCHECK(ToRegister(instr->result()).Is(x0));
   1956 
   1957   if (instr->hydrogen()->IsTailCall()) {
   1958     if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
   1959 
   1960     if (instr->target()->IsConstantOperand()) {
   1961       LConstantOperand* target = LConstantOperand::cast(instr->target());
   1962       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   1963       // TODO(all): on ARM we use a call descriptor to specify a storage mode
   1964       // but on ARM64 we only have one storage mode so it isn't necessary. Check
   1965       // this understanding is correct.
   1966       __ Jump(code, RelocInfo::CODE_TARGET);
   1967     } else {
   1968       DCHECK(instr->target()->IsRegister());
   1969       Register target = ToRegister(instr->target());
   1970       __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
   1971       __ Br(target);
   1972     }
   1973   } else {
   1974     LPointerMap* pointers = instr->pointer_map();
   1975     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   1976 
   1977     if (instr->target()->IsConstantOperand()) {
   1978       LConstantOperand* target = LConstantOperand::cast(instr->target());
   1979       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   1980       generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
   1981       // TODO(all): on ARM we use a call descriptor to specify a storage mode
   1982       // but on ARM64 we only have one storage mode so it isn't necessary. Check
   1983       // this understanding is correct.
   1984       __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
   1985     } else {
   1986       DCHECK(instr->target()->IsRegister());
   1987       Register target = ToRegister(instr->target());
   1988       generator.BeforeCall(__ CallSize(target));
   1989       __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
   1990       __ Call(target);
   1991     }
   1992     generator.AfterCall();
   1993   }
   1994 
   1995   HCallWithDescriptor* hinstr = instr->hydrogen();
   1996   RecordPushedArgumentsDelta(hinstr->argument_delta());
   1997 
   1998   // HCallWithDescriptor instruction is translated to zero or more
   1999   // LPushArguments (they handle parameters passed on the stack) followed by
   2000   // a LCallWithDescriptor. Each LPushArguments instruction generated records
   2001   // the number of arguments pushed thus we need to offset them here.
   2002   // The |argument_delta()| used above "knows" only about JS parameters while
   2003   // we are dealing here with particular calling convention details.
   2004   RecordPushedArgumentsDelta(-hinstr->descriptor().GetStackParameterCount());
   2005 }
   2006 
   2007 
   2008 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
   2009   CallRuntime(instr->function(), instr->arity(), instr);
   2010   RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
   2011 }
   2012 
   2013 
   2014 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
   2015   GenerateOsrPrologue();
   2016 }
   2017 
   2018 
   2019 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   2020   Register temp = ToRegister(instr->temp());
   2021   Label deopt, done;
   2022   // If the map is not deprecated the migration attempt does not make sense.
   2023   __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
   2024   __ Ldr(temp, FieldMemOperand(temp, Map::kBitField3Offset));
   2025   __ Tst(temp, Operand(Map::Deprecated::kMask));
   2026   __ B(eq, &deopt);
   2027 
   2028   {
   2029     PushSafepointRegistersScope scope(this);
   2030     __ Push(object);
   2031     __ Mov(cp, 0);
   2032     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
   2033     RecordSafepointWithRegisters(
   2034         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
   2035     __ StoreToSafepointRegisterSlot(x0, temp);
   2036   }
   2037   __ Tst(temp, Operand(kSmiTagMask));
   2038   __ B(ne, &done);
   2039 
   2040   __ bind(&deopt);
   2041   Deoptimize(instr, DeoptimizeReason::kInstanceMigrationFailed);
   2042 
   2043   __ bind(&done);
   2044 }
   2045 
   2046 
   2047 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
   2048   class DeferredCheckMaps: public LDeferredCode {
   2049    public:
   2050     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
   2051         : LDeferredCode(codegen), instr_(instr), object_(object) {
   2052       SetExit(check_maps());
   2053     }
   2054     virtual void Generate() {
   2055       codegen()->DoDeferredInstanceMigration(instr_, object_);
   2056     }
   2057     Label* check_maps() { return &check_maps_; }
   2058     virtual LInstruction* instr() { return instr_; }
   2059    private:
   2060     LCheckMaps* instr_;
   2061     Label check_maps_;
   2062     Register object_;
   2063   };
   2064 
   2065   if (instr->hydrogen()->IsStabilityCheck()) {
   2066     const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   2067     for (int i = 0; i < maps->size(); ++i) {
   2068       AddStabilityDependency(maps->at(i).handle());
   2069     }
   2070     return;
   2071   }
   2072 
   2073   Register object = ToRegister(instr->value());
   2074   Register map_reg = ToRegister(instr->temp());
   2075 
   2076   __ Ldr(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
   2077 
   2078   DeferredCheckMaps* deferred = NULL;
   2079   if (instr->hydrogen()->HasMigrationTarget()) {
   2080     deferred = new(zone()) DeferredCheckMaps(this, instr, object);
   2081     __ Bind(deferred->check_maps());
   2082   }
   2083 
   2084   const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   2085   Label success;
   2086   for (int i = 0; i < maps->size() - 1; i++) {
   2087     Handle<Map> map = maps->at(i).handle();
   2088     __ CompareMap(map_reg, map);
   2089     __ B(eq, &success);
   2090   }
   2091   Handle<Map> map = maps->at(maps->size() - 1).handle();
   2092   __ CompareMap(map_reg, map);
   2093 
   2094   // We didn't match a map.
   2095   if (instr->hydrogen()->HasMigrationTarget()) {
   2096     __ B(ne, deferred->entry());
   2097   } else {
   2098     DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
   2099   }
   2100 
   2101   __ Bind(&success);
   2102 }
   2103 
   2104 
   2105 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
   2106   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2107     DeoptimizeIfSmi(ToRegister(instr->value()), instr, DeoptimizeReason::kSmi);
   2108   }
   2109 }
   2110 
   2111 
   2112 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   2113   Register value = ToRegister(instr->value());
   2114   DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
   2115   DeoptimizeIfNotSmi(value, instr, DeoptimizeReason::kNotASmi);
   2116 }
   2117 
   2118 
   2119 void LCodeGen::DoCheckArrayBufferNotNeutered(
   2120     LCheckArrayBufferNotNeutered* instr) {
   2121   UseScratchRegisterScope temps(masm());
   2122   Register view = ToRegister(instr->view());
   2123   Register scratch = temps.AcquireX();
   2124 
   2125   __ Ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
   2126   __ Ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
   2127   __ Tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
   2128   DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds);
   2129 }
   2130 
   2131 
   2132 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   2133   Register input = ToRegister(instr->value());
   2134   Register scratch = ToRegister(instr->temp());
   2135 
   2136   __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   2137   __ Ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   2138 
   2139   if (instr->hydrogen()->is_interval_check()) {
   2140     InstanceType first, last;
   2141     instr->hydrogen()->GetCheckInterval(&first, &last);
   2142 
   2143     __ Cmp(scratch, first);
   2144     if (first == last) {
   2145       // If there is only one type in the interval check for equality.
   2146       DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
   2147     } else if (last == LAST_TYPE) {
   2148       // We don't need to compare with the higher bound of the interval.
   2149       DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType);
   2150     } else {
   2151       // If we are below the lower bound, set the C flag and clear the Z flag
   2152       // to force a deopt.
   2153       __ Ccmp(scratch, last, CFlag, hs);
   2154       DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType);
   2155     }
   2156   } else {
   2157     uint8_t mask;
   2158     uint8_t tag;
   2159     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
   2160 
   2161     if (base::bits::IsPowerOfTwo32(mask)) {
   2162       DCHECK((tag == 0) || (tag == mask));
   2163       if (tag == 0) {
   2164         DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
   2165                            DeoptimizeReason::kWrongInstanceType);
   2166       } else {
   2167         DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
   2168                              DeoptimizeReason::kWrongInstanceType);
   2169       }
   2170     } else {
   2171       if (tag == 0) {
   2172         __ Tst(scratch, mask);
   2173       } else {
   2174         __ And(scratch, scratch, mask);
   2175         __ Cmp(scratch, tag);
   2176       }
   2177       DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
   2178     }
   2179   }
   2180 }
   2181 
   2182 
   2183 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
   2184   DoubleRegister input = ToDoubleRegister(instr->unclamped());
   2185   Register result = ToRegister32(instr->result());
   2186   __ ClampDoubleToUint8(result, input, double_scratch());
   2187 }
   2188 
   2189 
   2190 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
   2191   Register input = ToRegister32(instr->unclamped());
   2192   Register result = ToRegister32(instr->result());
   2193   __ ClampInt32ToUint8(result, input);
   2194 }
   2195 
   2196 
   2197 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
   2198   Register input = ToRegister(instr->unclamped());
   2199   Register result = ToRegister32(instr->result());
   2200   Label done;
   2201 
   2202   // Both smi and heap number cases are handled.
   2203   Label is_not_smi;
   2204   __ JumpIfNotSmi(input, &is_not_smi);
   2205   __ SmiUntag(result.X(), input);
   2206   __ ClampInt32ToUint8(result);
   2207   __ B(&done);
   2208 
   2209   __ Bind(&is_not_smi);
   2210 
   2211   // Check for heap number.
   2212   Label is_heap_number;
   2213   __ JumpIfHeapNumber(input, &is_heap_number);
   2214 
   2215   // Check for undefined. Undefined is coverted to zero for clamping conversion.
   2216   DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
   2217                       DeoptimizeReason::kNotAHeapNumberUndefined);
   2218   __ Mov(result, 0);
   2219   __ B(&done);
   2220 
   2221   // Heap number case.
   2222   __ Bind(&is_heap_number);
   2223   DoubleRegister dbl_scratch = double_scratch();
   2224   DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1());
   2225   __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
   2226   __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
   2227 
   2228   __ Bind(&done);
   2229 }
   2230 
   2231 
   2232 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   2233   Handle<String> class_name = instr->hydrogen()->class_name();
   2234   Label* true_label = instr->TrueLabel(chunk_);
   2235   Label* false_label = instr->FalseLabel(chunk_);
   2236   Register input = ToRegister(instr->value());
   2237   Register scratch1 = ToRegister(instr->temp1());
   2238   Register scratch2 = ToRegister(instr->temp2());
   2239 
   2240   __ JumpIfSmi(input, false_label);
   2241 
   2242   Register map = scratch2;
   2243   __ CompareObjectType(input, map, scratch1, FIRST_FUNCTION_TYPE);
   2244   STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
   2245   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
   2246     __ B(hs, true_label);
   2247   } else {
   2248     __ B(hs, false_label);
   2249   }
   2250 
   2251   // Check if the constructor in the map is a function.
   2252   {
   2253     UseScratchRegisterScope temps(masm());
   2254     Register instance_type = temps.AcquireX();
   2255     __ GetMapConstructor(scratch1, map, scratch2, instance_type);
   2256     __ Cmp(instance_type, JS_FUNCTION_TYPE);
   2257   }
   2258   // Objects with a non-function constructor have class 'Object'.
   2259   if (String::Equals(class_name, isolate()->factory()->Object_string())) {
   2260     __ B(ne, true_label);
   2261   } else {
   2262     __ B(ne, false_label);
   2263   }
   2264 
   2265   // The constructor function is in scratch1. Get its instance class name.
   2266   __ Ldr(scratch1,
   2267          FieldMemOperand(scratch1, JSFunction::kSharedFunctionInfoOffset));
   2268   __ Ldr(scratch1,
   2269          FieldMemOperand(scratch1,
   2270                          SharedFunctionInfo::kInstanceClassNameOffset));
   2271 
   2272   // The class name we are testing against is internalized since it's a literal.
   2273   // The name in the constructor is internalized because of the way the context
   2274   // is booted. This routine isn't expected to work for random API-created
   2275   // classes and it doesn't have to because you can't access it with natives
   2276   // syntax. Since both sides are internalized it is sufficient to use an
   2277   // identity comparison.
   2278   EmitCompareAndBranch(instr, eq, scratch1, Operand(class_name));
   2279 }
   2280 
   2281 
   2282 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
   2283   DCHECK(instr->hydrogen()->representation().IsDouble());
   2284   FPRegister object = ToDoubleRegister(instr->object());
   2285   Register temp = ToRegister(instr->temp());
   2286 
   2287   // If we don't have a NaN, we don't have the hole, so branch now to avoid the
   2288   // (relatively expensive) hole-NaN check.
   2289   __ Fcmp(object, object);
   2290   __ B(vc, instr->FalseLabel(chunk_));
   2291 
   2292   // We have a NaN, but is it the hole?
   2293   __ Fmov(temp, object);
   2294   EmitCompareAndBranch(instr, eq, temp, kHoleNanInt64);
   2295 }
   2296 
   2297 
   2298 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
   2299   DCHECK(instr->hydrogen()->representation().IsTagged());
   2300   Register object = ToRegister(instr->object());
   2301 
   2302   EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
   2303 }
   2304 
   2305 
   2306 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   2307   Register value = ToRegister(instr->value());
   2308   Register map = ToRegister(instr->temp());
   2309 
   2310   __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
   2311   EmitCompareAndBranch(instr, eq, map, Operand(instr->map()));
   2312 }
   2313 
   2314 
   2315 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
   2316   LOperand* left = instr->left();
   2317   LOperand* right = instr->right();
   2318   bool is_unsigned =
   2319       instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
   2320       instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
   2321   Condition cond = TokenToCondition(instr->op(), is_unsigned);
   2322 
   2323   if (left->IsConstantOperand() && right->IsConstantOperand()) {
   2324     // We can statically evaluate the comparison.
   2325     double left_val = ToDouble(LConstantOperand::cast(left));
   2326     double right_val = ToDouble(LConstantOperand::cast(right));
   2327     int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
   2328                          ? instr->TrueDestination(chunk_)
   2329                          : instr->FalseDestination(chunk_);
   2330     EmitGoto(next_block);
   2331   } else {
   2332     if (instr->is_double()) {
   2333       __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
   2334 
   2335       // If a NaN is involved, i.e. the result is unordered (V set),
   2336       // jump to false block label.
   2337       __ B(vs, instr->FalseLabel(chunk_));
   2338       EmitBranch(instr, cond);
   2339     } else {
   2340       if (instr->hydrogen_value()->representation().IsInteger32()) {
   2341         if (right->IsConstantOperand()) {
   2342           EmitCompareAndBranch(instr, cond, ToRegister32(left),
   2343                                ToOperand32(right));
   2344         } else {
   2345           // Commute the operands and the condition.
   2346           EmitCompareAndBranch(instr, CommuteCondition(cond),
   2347                                ToRegister32(right), ToOperand32(left));
   2348         }
   2349       } else {
   2350         DCHECK(instr->hydrogen_value()->representation().IsSmi());
   2351         if (right->IsConstantOperand()) {
   2352           int32_t value = ToInteger32(LConstantOperand::cast(right));
   2353           EmitCompareAndBranch(instr,
   2354                                cond,
   2355                                ToRegister(left),
   2356                                Operand(Smi::FromInt(value)));
   2357         } else if (left->IsConstantOperand()) {
   2358           // Commute the operands and the condition.
   2359           int32_t value = ToInteger32(LConstantOperand::cast(left));
   2360           EmitCompareAndBranch(instr,
   2361                                CommuteCondition(cond),
   2362                                ToRegister(right),
   2363                                Operand(Smi::FromInt(value)));
   2364         } else {
   2365           EmitCompareAndBranch(instr,
   2366                                cond,
   2367                                ToRegister(left),
   2368                                ToRegister(right));
   2369         }
   2370       }
   2371     }
   2372   }
   2373 }
   2374 
   2375 
   2376 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   2377   Register left = ToRegister(instr->left());
   2378   Register right = ToRegister(instr->right());
   2379   EmitCompareAndBranch(instr, eq, left, right);
   2380 }
   2381 
   2382 
   2383 void LCodeGen::DoCmpT(LCmpT* instr) {
   2384   DCHECK(ToRegister(instr->context()).is(cp));
   2385   Token::Value op = instr->op();
   2386   Condition cond = TokenToCondition(op, false);
   2387 
   2388   DCHECK(ToRegister(instr->left()).Is(x1));
   2389   DCHECK(ToRegister(instr->right()).Is(x0));
   2390   Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   2391   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2392   // Signal that we don't inline smi code before this stub.
   2393   InlineSmiCheckInfo::EmitNotInlined(masm());
   2394 
   2395   // Return true or false depending on CompareIC result.
   2396   // This instruction is marked as call. We can clobber any register.
   2397   DCHECK(instr->IsMarkedAsCall());
   2398   __ LoadTrueFalseRoots(x1, x2);
   2399   __ Cmp(x0, 0);
   2400   __ Csel(ToRegister(instr->result()), x1, x2, cond);
   2401 }
   2402 
   2403 
   2404 void LCodeGen::DoConstantD(LConstantD* instr) {
   2405   DCHECK(instr->result()->IsDoubleRegister());
   2406   DoubleRegister result = ToDoubleRegister(instr->result());
   2407   if (instr->value() == 0) {
   2408     if (copysign(1.0, instr->value()) == 1.0) {
   2409       __ Fmov(result, fp_zero);
   2410     } else {
   2411       __ Fneg(result, fp_zero);
   2412     }
   2413   } else {
   2414     __ Fmov(result, instr->value());
   2415   }
   2416 }
   2417 
   2418 
   2419 void LCodeGen::DoConstantE(LConstantE* instr) {
   2420   __ Mov(ToRegister(instr->result()), Operand(instr->value()));
   2421 }
   2422 
   2423 
   2424 void LCodeGen::DoConstantI(LConstantI* instr) {
   2425   DCHECK(is_int32(instr->value()));
   2426   // Cast the value here to ensure that the value isn't sign extended by the
   2427   // implicit Operand constructor.
   2428   __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
   2429 }
   2430 
   2431 
   2432 void LCodeGen::DoConstantS(LConstantS* instr) {
   2433   __ Mov(ToRegister(instr->result()), Operand(instr->value()));
   2434 }
   2435 
   2436 
   2437 void LCodeGen::DoConstantT(LConstantT* instr) {
   2438   Handle<Object> object = instr->value(isolate());
   2439   AllowDeferredHandleDereference smi_check;
   2440   __ LoadObject(ToRegister(instr->result()), object);
   2441 }
   2442 
   2443 
   2444 void LCodeGen::DoContext(LContext* instr) {
   2445   // If there is a non-return use, the context must be moved to a register.
   2446   Register result = ToRegister(instr->result());
   2447   if (info()->IsOptimizing()) {
   2448     __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
   2449   } else {
   2450     // If there is no frame, the context must be in cp.
   2451     DCHECK(result.is(cp));
   2452   }
   2453 }
   2454 
   2455 
   2456 void LCodeGen::DoCheckValue(LCheckValue* instr) {
   2457   Register reg = ToRegister(instr->value());
   2458   Handle<HeapObject> object = instr->hydrogen()->object().handle();
   2459   AllowDeferredHandleDereference smi_check;
   2460   if (isolate()->heap()->InNewSpace(*object)) {
   2461     UseScratchRegisterScope temps(masm());
   2462     Register temp = temps.AcquireX();
   2463     Handle<Cell> cell = isolate()->factory()->NewCell(object);
   2464     __ Mov(temp, Operand(cell));
   2465     __ Ldr(temp, FieldMemOperand(temp, Cell::kValueOffset));
   2466     __ Cmp(reg, temp);
   2467   } else {
   2468     __ Cmp(reg, Operand(object));
   2469   }
   2470   DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
   2471 }
   2472 
   2473 
   2474 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   2475   last_lazy_deopt_pc_ = masm()->pc_offset();
   2476   DCHECK(instr->HasEnvironment());
   2477   LEnvironment* env = instr->environment();
   2478   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   2479   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   2480 }
   2481 
   2482 
   2483 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
   2484   Deoptimizer::BailoutType type = instr->hydrogen()->type();
   2485   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
   2486   // needed return address), even though the implementation of LAZY and EAGER is
   2487   // now identical. When LAZY is eventually completely folded into EAGER, remove
   2488   // the special case below.
   2489   if (info()->IsStub() && (type == Deoptimizer::EAGER)) {
   2490     type = Deoptimizer::LAZY;
   2491   }
   2492 
   2493   Deoptimize(instr, instr->hydrogen()->reason(), &type);
   2494 }
   2495 
   2496 
   2497 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
   2498   Register dividend = ToRegister32(instr->dividend());
   2499   int32_t divisor = instr->divisor();
   2500   Register result = ToRegister32(instr->result());
   2501   DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
   2502   DCHECK(!result.is(dividend));
   2503 
   2504   // Check for (0 / -x) that will produce negative zero.
   2505   HDiv* hdiv = instr->hydrogen();
   2506   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   2507     DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kDivisionByZero);
   2508   }
   2509   // Check for (kMinInt / -1).
   2510   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
   2511     // Test dividend for kMinInt by subtracting one (cmp) and checking for
   2512     // overflow.
   2513     __ Cmp(dividend, 1);
   2514     DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
   2515   }
   2516   // Deoptimize if remainder will not be 0.
   2517   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
   2518       divisor != 1 && divisor != -1) {
   2519     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
   2520     __ Tst(dividend, mask);
   2521     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
   2522   }
   2523 
   2524   if (divisor == -1) {  // Nice shortcut, not needed for correctness.
   2525     __ Neg(result, dividend);
   2526     return;
   2527   }
   2528   int32_t shift = WhichPowerOf2Abs(divisor);
   2529   if (shift == 0) {
   2530     __ Mov(result, dividend);
   2531   } else if (shift == 1) {
   2532     __ Add(result, dividend, Operand(dividend, LSR, 31));
   2533   } else {
   2534     __ Mov(result, Operand(dividend, ASR, 31));
   2535     __ Add(result, dividend, Operand(result, LSR, 32 - shift));
   2536   }
   2537   if (shift > 0) __ Mov(result, Operand(result, ASR, shift));
   2538   if (divisor < 0) __ Neg(result, result);
   2539 }
   2540 
   2541 
   2542 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
   2543   Register dividend = ToRegister32(instr->dividend());
   2544   int32_t divisor = instr->divisor();
   2545   Register result = ToRegister32(instr->result());
   2546   DCHECK(!AreAliased(dividend, result));
   2547 
   2548   if (divisor == 0) {
   2549     Deoptimize(instr, DeoptimizeReason::kDivisionByZero);
   2550     return;
   2551   }
   2552 
   2553   // Check for (0 / -x) that will produce negative zero.
   2554   HDiv* hdiv = instr->hydrogen();
   2555   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   2556     DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kMinusZero);
   2557   }
   2558 
   2559   __ TruncatingDiv(result, dividend, Abs(divisor));
   2560   if (divisor < 0) __ Neg(result, result);
   2561 
   2562   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
   2563     Register temp = ToRegister32(instr->temp());
   2564     DCHECK(!AreAliased(dividend, result, temp));
   2565     __ Sxtw(dividend.X(), dividend);
   2566     __ Mov(temp, divisor);
   2567     __ Smsubl(temp.X(), result, temp, dividend.X());
   2568     DeoptimizeIfNotZero(temp, instr, DeoptimizeReason::kLostPrecision);
   2569   }
   2570 }
   2571 
   2572 
   2573 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
   2574 void LCodeGen::DoDivI(LDivI* instr) {
   2575   HBinaryOperation* hdiv = instr->hydrogen();
   2576   Register dividend = ToRegister32(instr->dividend());
   2577   Register divisor = ToRegister32(instr->divisor());
   2578   Register result = ToRegister32(instr->result());
   2579 
   2580   // Issue the division first, and then check for any deopt cases whilst the
   2581   // result is computed.
   2582   __ Sdiv(result, dividend, divisor);
   2583 
   2584   if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
   2585     DCHECK(!instr->temp());
   2586     return;
   2587   }
   2588 
   2589   // Check for x / 0.
   2590   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   2591     DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero);
   2592   }
   2593 
   2594   // Check for (0 / -x) as that will produce negative zero.
   2595   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   2596     __ Cmp(divisor, 0);
   2597 
   2598     // If the divisor < 0 (mi), compare the dividend, and deopt if it is
   2599     // zero, ie. zero dividend with negative divisor deopts.
   2600     // If the divisor >= 0 (pl, the opposite of mi) set the flags to
   2601     // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
   2602     __ Ccmp(dividend, 0, NoFlag, mi);
   2603     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
   2604   }
   2605 
   2606   // Check for (kMinInt / -1).
   2607   if (hdiv->CheckFlag(HValue::kCanOverflow)) {
   2608     // Test dividend for kMinInt by subtracting one (cmp) and checking for
   2609     // overflow.
   2610     __ Cmp(dividend, 1);
   2611     // If overflow is set, ie. dividend = kMinInt, compare the divisor with
   2612     // -1. If overflow is clear, set the flags for condition ne, as the
   2613     // dividend isn't -1, and thus we shouldn't deopt.
   2614     __ Ccmp(divisor, -1, NoFlag, vs);
   2615     DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
   2616   }
   2617 
   2618   // Compute remainder and deopt if it's not zero.
   2619   Register remainder = ToRegister32(instr->temp());
   2620   __ Msub(remainder, result, divisor, dividend);
   2621   DeoptimizeIfNotZero(remainder, instr, DeoptimizeReason::kLostPrecision);
   2622 }
   2623 
   2624 
   2625 void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
   2626   DoubleRegister input = ToDoubleRegister(instr->value());
   2627   Register result = ToRegister32(instr->result());
   2628 
   2629   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   2630     DeoptimizeIfMinusZero(input, instr, DeoptimizeReason::kMinusZero);
   2631   }
   2632 
   2633   __ TryRepresentDoubleAsInt32(result, input, double_scratch());
   2634   DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
   2635 
   2636   if (instr->tag_result()) {
   2637     __ SmiTag(result.X());
   2638   }
   2639 }
   2640 
   2641 
   2642 void LCodeGen::DoDrop(LDrop* instr) {
   2643   __ Drop(instr->count());
   2644 
   2645   RecordPushedArgumentsDelta(instr->hydrogen_value()->argument_delta());
   2646 }
   2647 
   2648 
   2649 void LCodeGen::DoDummy(LDummy* instr) {
   2650   // Nothing to see here, move on!
   2651 }
   2652 
   2653 
   2654 void LCodeGen::DoDummyUse(LDummyUse* instr) {
   2655   // Nothing to see here, move on!
   2656 }
   2657 
   2658 
   2659 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
   2660   Register map = ToRegister(instr->map());
   2661   Register result = ToRegister(instr->result());
   2662   Label load_cache, done;
   2663 
   2664   __ EnumLengthUntagged(result, map);
   2665   __ Cbnz(result, &load_cache);
   2666 
   2667   __ Mov(result, Operand(isolate()->factory()->empty_fixed_array()));
   2668   __ B(&done);
   2669 
   2670   __ Bind(&load_cache);
   2671   __ LoadInstanceDescriptors(map, result);
   2672   __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
   2673   __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
   2674   DeoptimizeIfZero(result, instr, DeoptimizeReason::kNoCache);
   2675 
   2676   __ Bind(&done);
   2677 }
   2678 
   2679 
   2680 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   2681   Register object = ToRegister(instr->object());
   2682 
   2683   DCHECK(instr->IsMarkedAsCall());
   2684   DCHECK(object.Is(x0));
   2685 
   2686   Label use_cache, call_runtime;
   2687   __ CheckEnumCache(object, x5, x1, x2, x3, x4, &call_runtime);
   2688 
   2689   __ Ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
   2690   __ B(&use_cache);
   2691 
   2692   // Get the set of properties to enumerate.
   2693   __ Bind(&call_runtime);
   2694   __ Push(object);
   2695   CallRuntime(Runtime::kForInEnumerate, instr);
   2696   __ Bind(&use_cache);
   2697 }
   2698 
   2699 void LCodeGen::EmitGoto(int block) {
   2700   // Do not emit jump if we are emitting a goto to the next block.
   2701   if (!IsNextEmittedBlock(block)) {
   2702     __ B(chunk_->GetAssemblyLabel(LookupDestination(block)));
   2703   }
   2704 }
   2705 
   2706 void LCodeGen::DoGoto(LGoto* instr) {
   2707   EmitGoto(instr->block_id());
   2708 }
   2709 
   2710 // HHasInstanceTypeAndBranch instruction is built with an interval of type
   2711 // to test but is only used in very restricted ways. The only possible kinds
   2712 // of intervals are:
   2713 //  - [ FIRST_TYPE, instr->to() ]
   2714 //  - [ instr->form(), LAST_TYPE ]
   2715 //  - instr->from() == instr->to()
   2716 //
   2717 // These kinds of intervals can be check with only one compare instruction
   2718 // providing the correct value and test condition are used.
   2719 //
   2720 // TestType() will return the value to use in the compare instruction and
   2721 // BranchCondition() will return the condition to use depending on the kind
   2722 // of interval actually specified in the instruction.
   2723 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   2724   InstanceType from = instr->from();
   2725   InstanceType to = instr->to();
   2726   if (from == FIRST_TYPE) return to;
   2727   DCHECK((from == to) || (to == LAST_TYPE));
   2728   return from;
   2729 }
   2730 
   2731 
   2732 // See comment above TestType function for what this function does.
   2733 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
   2734   InstanceType from = instr->from();
   2735   InstanceType to = instr->to();
   2736   if (from == to) return eq;
   2737   if (to == LAST_TYPE) return hs;
   2738   if (from == FIRST_TYPE) return ls;
   2739   UNREACHABLE();
   2740   return eq;
   2741 }
   2742 
   2743 
   2744 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   2745   Register input = ToRegister(instr->value());
   2746   Register scratch = ToRegister(instr->temp());
   2747 
   2748   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2749     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2750   }
   2751   __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
   2752   EmitBranch(instr, BranchCondition(instr->hydrogen()));
   2753 }
   2754 
   2755 
   2756 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
   2757   Register result = ToRegister(instr->result());
   2758   Register base = ToRegister(instr->base_object());
   2759   if (instr->offset()->IsConstantOperand()) {
   2760     __ Add(result, base, ToOperand32(instr->offset()));
   2761   } else {
   2762     __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
   2763   }
   2764 }
   2765 
   2766 
   2767 void LCodeGen::DoHasInPrototypeChainAndBranch(
   2768     LHasInPrototypeChainAndBranch* instr) {
   2769   Register const object = ToRegister(instr->object());
   2770   Register const object_map = ToRegister(instr->scratch1());
   2771   Register const object_instance_type = ToRegister(instr->scratch2());
   2772   Register const object_prototype = object_map;
   2773   Register const prototype = ToRegister(instr->prototype());
   2774 
   2775   // The {object} must be a spec object.  It's sufficient to know that {object}
   2776   // is not a smi, since all other non-spec objects have {null} prototypes and
   2777   // will be ruled out below.
   2778   if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
   2779     __ JumpIfSmi(object, instr->FalseLabel(chunk_));
   2780   }
   2781 
   2782   // Loop through the {object}s prototype chain looking for the {prototype}.
   2783   __ Ldr(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
   2784   Label loop;
   2785   __ Bind(&loop);
   2786 
   2787   // Deoptimize if the object needs to be access checked.
   2788   __ Ldrb(object_instance_type,
   2789           FieldMemOperand(object_map, Map::kBitFieldOffset));
   2790   __ Tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded));
   2791   DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck);
   2792   // Deoptimize for proxies.
   2793   __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
   2794   DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
   2795 
   2796   __ Ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
   2797   __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
   2798   __ B(eq, instr->FalseLabel(chunk_));
   2799   __ Cmp(object_prototype, prototype);
   2800   __ B(eq, instr->TrueLabel(chunk_));
   2801   __ Ldr(object_map, FieldMemOperand(object_prototype, HeapObject::kMapOffset));
   2802   __ B(&loop);
   2803 }
   2804 
   2805 
   2806 void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
   2807   DoGap(instr);
   2808 }
   2809 
   2810 
   2811 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   2812   Register value = ToRegister32(instr->value());
   2813   DoubleRegister result = ToDoubleRegister(instr->result());
   2814   __ Scvtf(result, value);
   2815 }
   2816 
   2817 void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
   2818                                   Register scratch1, Register scratch2,
   2819                                   Register scratch3) {
   2820 #if DEBUG
   2821   if (actual.is_reg()) {
   2822     DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
   2823   } else {
   2824     DCHECK(!AreAliased(scratch1, scratch2, scratch3));
   2825   }
   2826 #endif
   2827   if (FLAG_code_comments) {
   2828     if (actual.is_reg()) {
   2829       Comment(";;; PrepareForTailCall, actual: %s {",
   2830               RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
   2831                   actual.reg().code()));
   2832     } else {
   2833       Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
   2834     }
   2835   }
   2836 
   2837   // Check if next frame is an arguments adaptor frame.
   2838   Register caller_args_count_reg = scratch1;
   2839   Label no_arguments_adaptor, formal_parameter_count_loaded;
   2840   __ Ldr(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   2841   __ Ldr(scratch3,
   2842          MemOperand(scratch2, StandardFrameConstants::kContextOffset));
   2843   __ Cmp(scratch3,
   2844          Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   2845   __ B(ne, &no_arguments_adaptor);
   2846 
   2847   // Drop current frame and load arguments count from arguments adaptor frame.
   2848   __ mov(fp, scratch2);
   2849   __ Ldr(caller_args_count_reg,
   2850          MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
   2851   __ SmiUntag(caller_args_count_reg);
   2852   __ B(&formal_parameter_count_loaded);
   2853 
   2854   __ bind(&no_arguments_adaptor);
   2855   // Load caller's formal parameter count
   2856   __ Mov(caller_args_count_reg,
   2857          Immediate(info()->literal()->parameter_count()));
   2858 
   2859   __ bind(&formal_parameter_count_loaded);
   2860   __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
   2861 
   2862   Comment(";;; }");
   2863 }
   2864 
   2865 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   2866   HInvokeFunction* hinstr = instr->hydrogen();
   2867   DCHECK(ToRegister(instr->context()).is(cp));
   2868   // The function is required to be in x1.
   2869   DCHECK(ToRegister(instr->function()).is(x1));
   2870   DCHECK(instr->HasPointerMap());
   2871 
   2872   bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
   2873 
   2874   if (is_tail_call) {
   2875     DCHECK(!info()->saves_caller_doubles());
   2876     ParameterCount actual(instr->arity());
   2877     // It is safe to use x3, x4 and x5 as scratch registers here given that
   2878     // 1) we are not going to return to caller function anyway,
   2879     // 2) x3 (new.target) will be initialized below.
   2880     PrepareForTailCall(actual, x3, x4, x5);
   2881   }
   2882 
   2883   Handle<JSFunction> known_function = hinstr->known_function();
   2884   if (known_function.is_null()) {
   2885     LPointerMap* pointers = instr->pointer_map();
   2886     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   2887     ParameterCount actual(instr->arity());
   2888     InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
   2889     __ InvokeFunction(x1, no_reg, actual, flag, generator);
   2890   } else {
   2891     CallKnownFunction(known_function, hinstr->formal_parameter_count(),
   2892                       instr->arity(), is_tail_call, instr);
   2893   }
   2894   RecordPushedArgumentsDelta(instr->hydrogen()->argument_delta());
   2895 }
   2896 
   2897 
   2898 Condition LCodeGen::EmitIsString(Register input,
   2899                                  Register temp1,
   2900                                  Label* is_not_string,
   2901                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
   2902   if (check_needed == INLINE_SMI_CHECK) {
   2903     __ JumpIfSmi(input, is_not_string);
   2904   }
   2905   __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
   2906 
   2907   return lt;
   2908 }
   2909 
   2910 
   2911 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
   2912   Register val = ToRegister(instr->value());
   2913   Register scratch = ToRegister(instr->temp());
   2914 
   2915   SmiCheck check_needed =
   2916       instr->hydrogen()->value()->type().IsHeapObject()
   2917           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   2918   Condition true_cond =
   2919       EmitIsString(val, scratch, instr->FalseLabel(chunk_), check_needed);
   2920 
   2921   EmitBranch(instr, true_cond);
   2922 }
   2923 
   2924 
   2925 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   2926   Register value = ToRegister(instr->value());
   2927   STATIC_ASSERT(kSmiTag == 0);
   2928   EmitTestAndBranch(instr, eq, value, kSmiTagMask);
   2929 }
   2930 
   2931 
   2932 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
   2933   Register input = ToRegister(instr->value());
   2934   Register temp = ToRegister(instr->temp());
   2935 
   2936   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2937     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2938   }
   2939   __ Ldr(temp, FieldMemOperand(input, HeapObject::kMapOffset));
   2940   __ Ldrb(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
   2941 
   2942   EmitTestAndBranch(instr, ne, temp, 1 << Map::kIsUndetectable);
   2943 }
   2944 
   2945 
   2946 static const char* LabelType(LLabel* label) {
   2947   if (label->is_loop_header()) return " (loop header)";
   2948   if (label->is_osr_entry()) return " (OSR entry)";
   2949   return "";
   2950 }
   2951 
   2952 
   2953 void LCodeGen::DoLabel(LLabel* label) {
   2954   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
   2955           current_instruction_,
   2956           label->hydrogen_value()->id(),
   2957           label->block_id(),
   2958           LabelType(label));
   2959 
   2960   // Inherit pushed_arguments_ from the predecessor's argument count.
   2961   if (label->block()->HasPredecessor()) {
   2962     pushed_arguments_ = label->block()->predecessors()->at(0)->argument_count();
   2963 #ifdef DEBUG
   2964     for (auto p : *label->block()->predecessors()) {
   2965       DCHECK_EQ(p->argument_count(), pushed_arguments_);
   2966     }
   2967 #endif
   2968   }
   2969 
   2970   __ Bind(label->label());
   2971   current_block_ = label->block_id();
   2972   DoGap(label);
   2973 }
   2974 
   2975 
   2976 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   2977   Register context = ToRegister(instr->context());
   2978   Register result = ToRegister(instr->result());
   2979   __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
   2980   if (instr->hydrogen()->RequiresHoleCheck()) {
   2981     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2982       DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
   2983                        DeoptimizeReason::kHole);
   2984     } else {
   2985       Label not_the_hole;
   2986       __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
   2987       __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
   2988       __ Bind(&not_the_hole);
   2989     }
   2990   }
   2991 }
   2992 
   2993 
   2994 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   2995   Register function = ToRegister(instr->function());
   2996   Register result = ToRegister(instr->result());
   2997   Register temp = ToRegister(instr->temp());
   2998 
   2999   // Get the prototype or initial map from the function.
   3000   __ Ldr(result, FieldMemOperand(function,
   3001                                  JSFunction::kPrototypeOrInitialMapOffset));
   3002 
   3003   // Check that the function has a prototype or an initial map.
   3004   DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
   3005                    DeoptimizeReason::kHole);
   3006 
   3007   // If the function does not have an initial map, we're done.
   3008   Label done;
   3009   __ CompareObjectType(result, temp, temp, MAP_TYPE);
   3010   __ B(ne, &done);
   3011 
   3012   // Get the prototype from the initial map.
   3013   __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
   3014 
   3015   // All done.
   3016   __ Bind(&done);
   3017 }
   3018 
   3019 
   3020 MemOperand LCodeGen::PrepareKeyedExternalArrayOperand(
   3021     Register key,
   3022     Register base,
   3023     Register scratch,
   3024     bool key_is_smi,
   3025     bool key_is_constant,
   3026     int constant_key,
   3027     ElementsKind elements_kind,
   3028     int base_offset) {
   3029   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   3030 
   3031   if (key_is_constant) {
   3032     int key_offset = constant_key << element_size_shift;
   3033     return MemOperand(base, key_offset + base_offset);
   3034   }
   3035 
   3036   if (key_is_smi) {
   3037     __ Add(scratch, base, Operand::UntagSmiAndScale(key, element_size_shift));
   3038     return MemOperand(scratch, base_offset);
   3039   }
   3040 
   3041   if (base_offset == 0) {
   3042     return MemOperand(base, key, SXTW, element_size_shift);
   3043   }
   3044 
   3045   DCHECK(!AreAliased(scratch, key));
   3046   __ Add(scratch, base, base_offset);
   3047   return MemOperand(scratch, key, SXTW, element_size_shift);
   3048 }
   3049 
   3050 
   3051 void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
   3052   Register ext_ptr = ToRegister(instr->elements());
   3053   Register scratch;
   3054   ElementsKind elements_kind = instr->elements_kind();
   3055 
   3056   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
   3057   bool key_is_constant = instr->key()->IsConstantOperand();
   3058   Register key = no_reg;
   3059   int constant_key = 0;
   3060   if (key_is_constant) {
   3061     DCHECK(instr->temp() == NULL);
   3062     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   3063     if (constant_key & 0xf0000000) {
   3064       Abort(kArrayIndexConstantValueTooBig);
   3065     }
   3066   } else {
   3067     scratch = ToRegister(instr->temp());
   3068     key = ToRegister(instr->key());
   3069   }
   3070 
   3071   MemOperand mem_op =
   3072       PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
   3073                                        key_is_constant, constant_key,
   3074                                        elements_kind,
   3075                                        instr->base_offset());
   3076 
   3077   if (elements_kind == FLOAT32_ELEMENTS) {
   3078     DoubleRegister result = ToDoubleRegister(instr->result());
   3079     __ Ldr(result.S(), mem_op);
   3080     __ Fcvt(result, result.S());
   3081   } else if (elements_kind == FLOAT64_ELEMENTS) {
   3082     DoubleRegister result = ToDoubleRegister(instr->result());
   3083     __ Ldr(result, mem_op);
   3084   } else {
   3085     Register result = ToRegister(instr->result());
   3086 
   3087     switch (elements_kind) {
   3088       case INT8_ELEMENTS:
   3089         __ Ldrsb(result, mem_op);
   3090         break;
   3091       case UINT8_ELEMENTS:
   3092       case UINT8_CLAMPED_ELEMENTS:
   3093         __ Ldrb(result, mem_op);
   3094         break;
   3095       case INT16_ELEMENTS:
   3096         __ Ldrsh(result, mem_op);
   3097         break;
   3098       case UINT16_ELEMENTS:
   3099         __ Ldrh(result, mem_op);
   3100         break;
   3101       case INT32_ELEMENTS:
   3102         __ Ldrsw(result, mem_op);
   3103         break;
   3104       case UINT32_ELEMENTS:
   3105         __ Ldr(result.W(), mem_op);
   3106         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
   3107           // Deopt if value > 0x80000000.
   3108           __ Tst(result, 0xFFFFFFFF80000000);
   3109           DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue);
   3110         }
   3111         break;
   3112       case FLOAT32_ELEMENTS:
   3113       case FLOAT64_ELEMENTS:
   3114       case FAST_HOLEY_DOUBLE_ELEMENTS:
   3115       case FAST_HOLEY_ELEMENTS:
   3116       case FAST_HOLEY_SMI_ELEMENTS:
   3117       case FAST_DOUBLE_ELEMENTS:
   3118       case FAST_ELEMENTS:
   3119       case FAST_SMI_ELEMENTS:
   3120       case DICTIONARY_ELEMENTS:
   3121       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
   3122       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
   3123       case FAST_STRING_WRAPPER_ELEMENTS:
   3124       case SLOW_STRING_WRAPPER_ELEMENTS:
   3125       case NO_ELEMENTS:
   3126         UNREACHABLE();
   3127         break;
   3128     }
   3129   }
   3130 }
   3131 
   3132 
   3133 MemOperand LCodeGen::PrepareKeyedArrayOperand(Register base,
   3134                                               Register elements,
   3135                                               Register key,
   3136                                               bool key_is_tagged,
   3137                                               ElementsKind elements_kind,
   3138                                               Representation representation,
   3139                                               int base_offset) {
   3140   STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
   3141   STATIC_ASSERT(kSmiTag == 0);
   3142   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   3143 
   3144   // Even though the HLoad/StoreKeyed instructions force the input
   3145   // representation for the key to be an integer, the input gets replaced during
   3146   // bounds check elimination with the index argument to the bounds check, which
   3147   // can be tagged, so that case must be handled here, too.
   3148   if (key_is_tagged) {
   3149     __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
   3150     if (representation.IsInteger32()) {
   3151       DCHECK(elements_kind == FAST_SMI_ELEMENTS);
   3152       // Read or write only the smi payload in the case of fast smi arrays.
   3153       return UntagSmiMemOperand(base, base_offset);
   3154     } else {
   3155       return MemOperand(base, base_offset);
   3156     }
   3157   } else {
   3158     // Sign extend key because it could be a 32-bit negative value or contain
   3159     // garbage in the top 32-bits. The address computation happens in 64-bit.
   3160     DCHECK((element_size_shift >= 0) && (element_size_shift <= 4));
   3161     if (representation.IsInteger32()) {
   3162       DCHECK(elements_kind == FAST_SMI_ELEMENTS);
   3163       // Read or write only the smi payload in the case of fast smi arrays.
   3164       __ Add(base, elements, Operand(key, SXTW, element_size_shift));
   3165       return UntagSmiMemOperand(base, base_offset);
   3166     } else {
   3167       __ Add(base, elements, base_offset);
   3168       return MemOperand(base, key, SXTW, element_size_shift);
   3169     }
   3170   }
   3171 }
   3172 
   3173 
   3174 void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
   3175   Register elements = ToRegister(instr->elements());
   3176   DoubleRegister result = ToDoubleRegister(instr->result());
   3177   MemOperand mem_op;
   3178 
   3179   if (instr->key()->IsConstantOperand()) {
   3180     DCHECK(instr->hydrogen()->RequiresHoleCheck() ||
   3181            (instr->temp() == NULL));
   3182 
   3183     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   3184     if (constant_key & 0xf0000000) {
   3185       Abort(kArrayIndexConstantValueTooBig);
   3186     }
   3187     int offset = instr->base_offset() + constant_key * kDoubleSize;
   3188     mem_op = MemOperand(elements, offset);
   3189   } else {
   3190     Register load_base = ToRegister(instr->temp());
   3191     Register key = ToRegister(instr->key());
   3192     bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
   3193     mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
   3194                                       instr->hydrogen()->elements_kind(),
   3195                                       instr->hydrogen()->representation(),
   3196                                       instr->base_offset());
   3197   }
   3198 
   3199   __ Ldr(result, mem_op);
   3200 
   3201   if (instr->hydrogen()->RequiresHoleCheck()) {
   3202     Register scratch = ToRegister(instr->temp());
   3203     __ Fmov(scratch, result);
   3204     __ Eor(scratch, scratch, kHoleNanInt64);
   3205     DeoptimizeIfZero(scratch, instr, DeoptimizeReason::kHole);
   3206   }
   3207 }
   3208 
   3209 
   3210 void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
   3211   Register elements = ToRegister(instr->elements());
   3212   Register result = ToRegister(instr->result());
   3213   MemOperand mem_op;
   3214 
   3215   Representation representation = instr->hydrogen()->representation();
   3216   if (instr->key()->IsConstantOperand()) {
   3217     DCHECK(instr->temp() == NULL);
   3218     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   3219     int offset = instr->base_offset() +
   3220         ToInteger32(const_operand) * kPointerSize;
   3221     if (representation.IsInteger32()) {
   3222       DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
   3223       STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
   3224       STATIC_ASSERT(kSmiTag == 0);
   3225       mem_op = UntagSmiMemOperand(elements, offset);
   3226     } else {
   3227       mem_op = MemOperand(elements, offset);
   3228     }
   3229   } else {
   3230     Register load_base = ToRegister(instr->temp());
   3231     Register key = ToRegister(instr->key());
   3232     bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
   3233 
   3234     mem_op = PrepareKeyedArrayOperand(load_base, elements, key, key_is_tagged,
   3235                                       instr->hydrogen()->elements_kind(),
   3236                                       representation, instr->base_offset());
   3237   }
   3238 
   3239   __ Load(result, mem_op, representation);
   3240 
   3241   if (instr->hydrogen()->RequiresHoleCheck()) {
   3242     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
   3243       DeoptimizeIfNotSmi(result, instr, DeoptimizeReason::kNotASmi);
   3244     } else {
   3245       DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
   3246                        DeoptimizeReason::kHole);
   3247     }
   3248   } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
   3249     DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
   3250     Label done;
   3251     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
   3252     __ B(ne, &done);
   3253     if (info()->IsStub()) {
   3254       // A stub can safely convert the hole to undefined only if the array
   3255       // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
   3256       // it needs to bail out.
   3257       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
   3258       __ Ldr(result, FieldMemOperand(result, PropertyCell::kValueOffset));
   3259       __ Cmp(result, Operand(Smi::FromInt(Isolate::kProtectorValid)));
   3260       DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
   3261     }
   3262     __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
   3263     __ Bind(&done);
   3264   }
   3265 }
   3266 
   3267 
   3268 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   3269   HObjectAccess access = instr->hydrogen()->access();
   3270   int offset = access.offset();
   3271   Register object = ToRegister(instr->object());
   3272 
   3273   if (access.IsExternalMemory()) {
   3274     Register result = ToRegister(instr->result());
   3275     __ Load(result, MemOperand(object, offset), access.representation());
   3276     return;
   3277   }
   3278 
   3279   if (instr->hydrogen()->representation().IsDouble()) {
   3280     DCHECK(access.IsInobject());
   3281     FPRegister result = ToDoubleRegister(instr->result());
   3282     __ Ldr(result, FieldMemOperand(object, offset));
   3283     return;
   3284   }
   3285 
   3286   Register result = ToRegister(instr->result());
   3287   Register source;
   3288   if (access.IsInobject()) {
   3289     source = object;
   3290   } else {
   3291     // Load the properties array, using result as a scratch register.
   3292     __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   3293     source = result;
   3294   }
   3295 
   3296   if (access.representation().IsSmi() &&
   3297       instr->hydrogen()->representation().IsInteger32()) {
   3298     // Read int value directly from upper half of the smi.
   3299     STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
   3300     STATIC_ASSERT(kSmiTag == 0);
   3301     __ Load(result, UntagSmiFieldMemOperand(source, offset),
   3302             Representation::Integer32());
   3303   } else {
   3304     __ Load(result, FieldMemOperand(source, offset), access.representation());
   3305   }
   3306 }
   3307 
   3308 
   3309 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
   3310   Register result = ToRegister(instr->result());
   3311   __ LoadRoot(result, instr->index());
   3312 }
   3313 
   3314 
   3315 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   3316   Representation r = instr->hydrogen()->value()->representation();
   3317   if (r.IsDouble()) {
   3318     DoubleRegister input = ToDoubleRegister(instr->value());
   3319     DoubleRegister result = ToDoubleRegister(instr->result());
   3320     __ Fabs(result, input);
   3321   } else if (r.IsSmi() || r.IsInteger32()) {
   3322     Register input = r.IsSmi() ? ToRegister(instr->value())
   3323                                : ToRegister32(instr->value());
   3324     Register result = r.IsSmi() ? ToRegister(instr->result())
   3325                                 : ToRegister32(instr->result());
   3326     __ Abs(result, input);
   3327     DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
   3328   }
   3329 }
   3330 
   3331 
   3332 void LCodeGen::DoDeferredMathAbsTagged(LMathAbsTagged* instr,
   3333                                        Label* exit,
   3334                                        Label* allocation_entry) {
   3335   // Handle the tricky cases of MathAbsTagged:
   3336   //  - HeapNumber inputs.
   3337   //    - Negative inputs produce a positive result, so a new HeapNumber is
   3338   //      allocated to hold it.
   3339   //    - Positive inputs are returned as-is, since there is no need to allocate
   3340   //      a new HeapNumber for the result.
   3341   //  - The (smi) input -0x80000000, produces +0x80000000, which does not fit
   3342   //    a smi. In this case, the inline code sets the result and jumps directly
   3343   //    to the allocation_entry label.
   3344   DCHECK(instr->context() != NULL);
   3345   DCHECK(ToRegister(instr->context()).is(cp));
   3346   Register input = ToRegister(instr->value());
   3347   Register temp1 = ToRegister(instr->temp1());
   3348   Register temp2 = ToRegister(instr->temp2());
   3349   Register result_bits = ToRegister(instr->temp3());
   3350   Register result = ToRegister(instr->result());
   3351 
   3352   Label runtime_allocation;
   3353 
   3354   // Deoptimize if the input is not a HeapNumber.
   3355   DeoptimizeIfNotHeapNumber(input, instr);
   3356 
   3357   // If the argument is positive, we can return it as-is, without any need to
   3358   // allocate a new HeapNumber for the result. We have to do this in integer
   3359   // registers (rather than with fabs) because we need to be able to distinguish
   3360   // the two zeroes.
   3361   __ Ldr(result_bits, FieldMemOperand(input, HeapNumber::kValueOffset));
   3362   __ Mov(result, input);
   3363   __ Tbz(result_bits, kXSignBit, exit);
   3364 
   3365   // Calculate abs(input) by clearing the sign bit.
   3366   __ Bic(result_bits, result_bits, kXSignMask);
   3367 
   3368   // Allocate a new HeapNumber to hold the result.
   3369   //  result_bits   The bit representation of the (double) result.
   3370   __ Bind(allocation_entry);
   3371   __ AllocateHeapNumber(result, &runtime_allocation, temp1, temp2);
   3372   // The inline (non-deferred) code will store result_bits into result.
   3373   __ B(exit);
   3374 
   3375   __ Bind(&runtime_allocation);
   3376   if (FLAG_debug_code) {
   3377     // Because result is in the pointer map, we need to make sure it has a valid
   3378     // tagged value before we call the runtime. We speculatively set it to the
   3379     // input (for abs(+x)) or to a smi (for abs(-SMI_MIN)), so it should already
   3380     // be valid.
   3381     Label result_ok;
   3382     Register input = ToRegister(instr->value());
   3383     __ JumpIfSmi(result, &result_ok);
   3384     __ Cmp(input, result);
   3385     __ Assert(eq, kUnexpectedValue);
   3386     __ Bind(&result_ok);
   3387   }
   3388 
   3389   { PushSafepointRegistersScope scope(this);
   3390     CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
   3391                             instr->context());
   3392     __ StoreToSafepointRegisterSlot(x0, result);
   3393   }
   3394   // The inline (non-deferred) code will store result_bits into result.
   3395 }
   3396 
   3397 
   3398 void LCodeGen::DoMathAbsTagged(LMathAbsTagged* instr) {
   3399   // Class for deferred case.
   3400   class DeferredMathAbsTagged: public LDeferredCode {
   3401    public:
   3402     DeferredMathAbsTagged(LCodeGen* codegen, LMathAbsTagged* instr)
   3403         : LDeferredCode(codegen), instr_(instr) { }
   3404     virtual void Generate() {
   3405       codegen()->DoDeferredMathAbsTagged(instr_, exit(),
   3406                                          allocation_entry());
   3407     }
   3408     virtual LInstruction* instr() { return instr_; }
   3409     Label* allocation_entry() { return &allocation; }
   3410    private:
   3411     LMathAbsTagged* instr_;
   3412     Label allocation;
   3413   };
   3414 
   3415   // TODO(jbramley): The early-exit mechanism would skip the new frame handling
   3416   // in GenerateDeferredCode. Tidy this up.
   3417   DCHECK(!NeedsDeferredFrame());
   3418 
   3419   DeferredMathAbsTagged* deferred =
   3420       new(zone()) DeferredMathAbsTagged(this, instr);
   3421 
   3422   DCHECK(instr->hydrogen()->value()->representation().IsTagged() ||
   3423          instr->hydrogen()->value()->representation().IsSmi());
   3424   Register input = ToRegister(instr->value());
   3425   Register result_bits = ToRegister(instr->temp3());
   3426   Register result = ToRegister(instr->result());
   3427   Label done;
   3428 
   3429   // Handle smis inline.
   3430   // We can treat smis as 64-bit integers, since the (low-order) tag bits will
   3431   // never get set by the negation. This is therefore the same as the Integer32
   3432   // case in DoMathAbs, except that it operates on 64-bit values.
   3433   STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
   3434 
   3435   __ JumpIfNotSmi(input, deferred->entry());
   3436 
   3437   __ Abs(result, input, NULL, &done);
   3438 
   3439   // The result is the magnitude (abs) of the smallest value a smi can
   3440   // represent, encoded as a double.
   3441   __ Mov(result_bits, double_to_rawbits(0x80000000));
   3442   __ B(deferred->allocation_entry());
   3443 
   3444   __ Bind(deferred->exit());
   3445   __ Str(result_bits, FieldMemOperand(result, HeapNumber::kValueOffset));
   3446 
   3447   __ Bind(&done);
   3448 }
   3449 
   3450 void LCodeGen::DoMathCos(LMathCos* instr) {
   3451   DCHECK(instr->IsMarkedAsCall());
   3452   DCHECK(ToDoubleRegister(instr->value()).is(d0));
   3453   __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
   3454   DCHECK(ToDoubleRegister(instr->result()).Is(d0));
   3455 }
   3456 
   3457 void LCodeGen::DoMathSin(LMathSin* instr) {
   3458   DCHECK(instr->IsMarkedAsCall());
   3459   DCHECK(ToDoubleRegister(instr->value()).is(d0));
   3460   __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
   3461   DCHECK(ToDoubleRegister(instr->result()).Is(d0));
   3462 }
   3463 
   3464 void LCodeGen::DoMathExp(LMathExp* instr) {
   3465   DCHECK(instr->IsMarkedAsCall());
   3466   DCHECK(ToDoubleRegister(instr->value()).is(d0));
   3467   __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
   3468   DCHECK(ToDoubleRegister(instr->result()).Is(d0));
   3469 }
   3470 
   3471 
   3472 void LCodeGen::DoMathFloorD(LMathFloorD* instr) {
   3473   DoubleRegister input = ToDoubleRegister(instr->value());
   3474   DoubleRegister result = ToDoubleRegister(instr->result());
   3475 
   3476   __ Frintm(result, input);
   3477 }
   3478 
   3479 
   3480 void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
   3481   DoubleRegister input = ToDoubleRegister(instr->value());
   3482   Register result = ToRegister(instr->result());
   3483 
   3484   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3485     DeoptimizeIfMinusZero(input, instr, DeoptimizeReason::kMinusZero);
   3486   }
   3487 
   3488   __ Fcvtms(result, input);
   3489 
   3490   // Check that the result fits into a 32-bit integer.
   3491   //  - The result did not overflow.
   3492   __ Cmp(result, Operand(result, SXTW));
   3493   //  - The input was not NaN.
   3494   __ Fccmp(input, input, NoFlag, eq);
   3495   DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
   3496 }
   3497 
   3498 
   3499 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
   3500   Register dividend = ToRegister32(instr->dividend());
   3501   Register result = ToRegister32(instr->result());
   3502   int32_t divisor = instr->divisor();
   3503 
   3504   // If the divisor is 1, return the dividend.
   3505   if (divisor == 1) {
   3506     __ Mov(result, dividend, kDiscardForSameWReg);
   3507     return;
   3508   }
   3509 
   3510   // If the divisor is positive, things are easy: There can be no deopts and we
   3511   // can simply do an arithmetic right shift.
   3512   int32_t shift = WhichPowerOf2Abs(divisor);
   3513   if (divisor > 1) {
   3514     __ Mov(result, Operand(dividend, ASR, shift));
   3515     return;
   3516   }
   3517 
   3518   // If the divisor is negative, we have to negate and handle edge cases.
   3519   __ Negs(result, dividend);
   3520   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3521     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
   3522   }
   3523 
   3524   // Dividing by -1 is basically negation, unless we overflow.
   3525   if (divisor == -1) {
   3526     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
   3527       DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
   3528     }
   3529     return;
   3530   }
   3531 
   3532   // If the negation could not overflow, simply shifting is OK.
   3533   if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
   3534     __ Mov(result, Operand(dividend, ASR, shift));
   3535     return;
   3536   }
   3537 
   3538   __ Asr(result, result, shift);
   3539   __ Csel(result, result, kMinInt / divisor, vc);
   3540 }
   3541 
   3542 
   3543 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
   3544   Register dividend = ToRegister32(instr->dividend());
   3545   int32_t divisor = instr->divisor();
   3546   Register result = ToRegister32(instr->result());
   3547   DCHECK(!AreAliased(dividend, result));
   3548 
   3549   if (divisor == 0) {
   3550     Deoptimize(instr, DeoptimizeReason::kDivisionByZero);
   3551     return;
   3552   }
   3553 
   3554   // Check for (0 / -x) that will produce negative zero.
   3555   HMathFloorOfDiv* hdiv = instr->hydrogen();
   3556   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   3557     DeoptimizeIfZero(dividend, instr, DeoptimizeReason::kMinusZero);
   3558   }
   3559 
   3560   // Easy case: We need no dynamic check for the dividend and the flooring
   3561   // division is the same as the truncating division.
   3562   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
   3563       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
   3564     __ TruncatingDiv(result, dividend, Abs(divisor));
   3565     if (divisor < 0) __ Neg(result, result);
   3566     return;
   3567   }
   3568 
   3569   // In the general case we may need to adjust before and after the truncating
   3570   // division to get a flooring division.
   3571   Register temp = ToRegister32(instr->temp());
   3572   DCHECK(!AreAliased(temp, dividend, result));
   3573   Label needs_adjustment, done;
   3574   __ Cmp(dividend, 0);
   3575   __ B(divisor > 0 ? lt : gt, &needs_adjustment);
   3576   __ TruncatingDiv(result, dividend, Abs(divisor));
   3577   if (divisor < 0) __ Neg(result, result);
   3578   __ B(&done);
   3579   __ Bind(&needs_adjustment);
   3580   __ Add(temp, dividend, Operand(divisor > 0 ? 1 : -1));
   3581   __ TruncatingDiv(result, temp, Abs(divisor));
   3582   if (divisor < 0) __ Neg(result, result);
   3583   __ Sub(result, result, Operand(1));
   3584   __ Bind(&done);
   3585 }
   3586 
   3587 
   3588 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
   3589 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
   3590   Register dividend = ToRegister32(instr->dividend());
   3591   Register divisor = ToRegister32(instr->divisor());
   3592   Register remainder = ToRegister32(instr->temp());
   3593   Register result = ToRegister32(instr->result());
   3594 
   3595   // This can't cause an exception on ARM, so we can speculatively
   3596   // execute it already now.
   3597   __ Sdiv(result, dividend, divisor);
   3598 
   3599   // Check for x / 0.
   3600   DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero);
   3601 
   3602   // Check for (kMinInt / -1).
   3603   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   3604     // The V flag will be set iff dividend == kMinInt.
   3605     __ Cmp(dividend, 1);
   3606     __ Ccmp(divisor, -1, NoFlag, vs);
   3607     DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
   3608   }
   3609 
   3610   // Check for (0 / -x) that will produce negative zero.
   3611   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3612     __ Cmp(divisor, 0);
   3613     __ Ccmp(dividend, 0, ZFlag, mi);
   3614     // "divisor" can't be null because the code would have already been
   3615     // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
   3616     // In this case we need to deoptimize to produce a -0.
   3617     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
   3618   }
   3619 
   3620   Label done;
   3621   // If both operands have the same sign then we are done.
   3622   __ Eor(remainder, dividend, divisor);
   3623   __ Tbz(remainder, kWSignBit, &done);
   3624 
   3625   // Check if the result needs to be corrected.
   3626   __ Msub(remainder, result, divisor, dividend);
   3627   __ Cbz(remainder, &done);
   3628   __ Sub(result, result, 1);
   3629 
   3630   __ Bind(&done);
   3631 }
   3632 
   3633 
   3634 void LCodeGen::DoMathLog(LMathLog* instr) {
   3635   DCHECK(instr->IsMarkedAsCall());
   3636   DCHECK(ToDoubleRegister(instr->value()).is(d0));
   3637   __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
   3638   DCHECK(ToDoubleRegister(instr->result()).Is(d0));
   3639 }
   3640 
   3641 
   3642 void LCodeGen::DoMathClz32(LMathClz32* instr) {
   3643   Register input = ToRegister32(instr->value());
   3644   Register result = ToRegister32(instr->result());
   3645   __ Clz(result, input);
   3646 }
   3647 
   3648 
   3649 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   3650   DoubleRegister input = ToDoubleRegister(instr->value());
   3651   DoubleRegister result = ToDoubleRegister(instr->result());
   3652   Label done;
   3653 
   3654   // Math.pow(x, 0.5) differs from fsqrt(x) in the following cases:
   3655   //  Math.pow(-Infinity, 0.5) == +Infinity
   3656   //  Math.pow(-0.0, 0.5) == +0.0
   3657 
   3658   // Catch -infinity inputs first.
   3659   // TODO(jbramley): A constant infinity register would be helpful here.
   3660   __ Fmov(double_scratch(), kFP64NegativeInfinity);
   3661   __ Fcmp(double_scratch(), input);
   3662   __ Fabs(result, input);
   3663   __ B(&done, eq);
   3664 
   3665   // Add +0.0 to convert -0.0 to +0.0.
   3666   __ Fadd(double_scratch(), input, fp_zero);
   3667   __ Fsqrt(result, double_scratch());
   3668 
   3669   __ Bind(&done);
   3670 }
   3671 
   3672 
   3673 void LCodeGen::DoPower(LPower* instr) {
   3674   Representation exponent_type = instr->hydrogen()->right()->representation();
   3675   // Having marked this as a call, we can use any registers.
   3676   // Just make sure that the input/output registers are the expected ones.
   3677   Register tagged_exponent = MathPowTaggedDescriptor::exponent();
   3678   Register integer_exponent = MathPowIntegerDescriptor::exponent();
   3679   DCHECK(!instr->right()->IsDoubleRegister() ||
   3680          ToDoubleRegister(instr->right()).is(d1));
   3681   DCHECK(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
   3682          ToRegister(instr->right()).is(tagged_exponent));
   3683   DCHECK(!exponent_type.IsInteger32() ||
   3684          ToRegister(instr->right()).is(integer_exponent));
   3685   DCHECK(ToDoubleRegister(instr->left()).is(d0));
   3686   DCHECK(ToDoubleRegister(instr->result()).is(d0));
   3687 
   3688   if (exponent_type.IsSmi()) {
   3689     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3690     __ CallStub(&stub);
   3691   } else if (exponent_type.IsTagged()) {
   3692     Label no_deopt;
   3693     __ JumpIfSmi(tagged_exponent, &no_deopt);
   3694     DeoptimizeIfNotHeapNumber(tagged_exponent, instr);
   3695     __ Bind(&no_deopt);
   3696     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3697     __ CallStub(&stub);
   3698   } else if (exponent_type.IsInteger32()) {
   3699     // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
   3700     // supports large integer exponents.
   3701     __ Sxtw(integer_exponent, integer_exponent);
   3702     MathPowStub stub(isolate(), MathPowStub::INTEGER);
   3703     __ CallStub(&stub);
   3704   } else {
   3705     DCHECK(exponent_type.IsDouble());
   3706     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
   3707     __ CallStub(&stub);
   3708   }
   3709 }
   3710 
   3711 
   3712 void LCodeGen::DoMathRoundD(LMathRoundD* instr) {
   3713   DoubleRegister input = ToDoubleRegister(instr->value());
   3714   DoubleRegister result = ToDoubleRegister(instr->result());
   3715   DoubleRegister scratch_d = double_scratch();
   3716 
   3717   DCHECK(!AreAliased(input, result, scratch_d));
   3718 
   3719   Label done;
   3720 
   3721   __ Frinta(result, input);
   3722   __ Fcmp(input, 0.0);
   3723   __ Fccmp(result, input, ZFlag, lt);
   3724   // The result is correct if the input was in [-0, +infinity], or was a
   3725   // negative integral value.
   3726   __ B(eq, &done);
   3727 
   3728   // Here the input is negative, non integral, with an exponent lower than 52.
   3729   // We do not have to worry about the 0.49999999999999994 (0x3fdfffffffffffff)
   3730   // case. So we can safely add 0.5.
   3731   __ Fmov(scratch_d, 0.5);
   3732   __ Fadd(result, input, scratch_d);
   3733   __ Frintm(result, result);
   3734   // The range [-0.5, -0.0[ yielded +0.0. Force the sign to negative.
   3735   __ Fabs(result, result);
   3736   __ Fneg(result, result);
   3737 
   3738   __ Bind(&done);
   3739 }
   3740 
   3741 
   3742 void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
   3743   DoubleRegister input = ToDoubleRegister(instr->value());
   3744   DoubleRegister temp = ToDoubleRegister(instr->temp1());
   3745   DoubleRegister dot_five = double_scratch();
   3746   Register result = ToRegister(instr->result());
   3747   Label done;
   3748 
   3749   // Math.round() rounds to the nearest integer, with ties going towards
   3750   // +infinity. This does not match any IEEE-754 rounding mode.
   3751   //  - Infinities and NaNs are propagated unchanged, but cause deopts because
   3752   //    they can't be represented as integers.
   3753   //  - The sign of the result is the same as the sign of the input. This means
   3754   //    that -0.0 rounds to itself, and values -0.5 <= input < 0 also produce a
   3755   //    result of -0.0.
   3756 
   3757   // Add 0.5 and round towards -infinity.
   3758   __ Fmov(dot_five, 0.5);
   3759   __ Fadd(temp, input, dot_five);
   3760   __ Fcvtms(result, temp);
   3761 
   3762   // The result is correct if:
   3763   //  result is not 0, as the input could be NaN or [-0.5, -0.0].
   3764   //  result is not 1, as 0.499...94 will wrongly map to 1.
   3765   //  result fits in 32 bits.
   3766   __ Cmp(result, Operand(result.W(), SXTW));
   3767   __ Ccmp(result, 1, ZFlag, eq);
   3768   __ B(hi, &done);
   3769 
   3770   // At this point, we have to handle possible inputs of NaN or numbers in the
   3771   // range [-0.5, 1.5[, or numbers larger than 32 bits.
   3772 
   3773   // Deoptimize if the result > 1, as it must be larger than 32 bits.
   3774   __ Cmp(result, 1);
   3775   DeoptimizeIf(hi, instr, DeoptimizeReason::kOverflow);
   3776 
   3777   // Deoptimize for negative inputs, which at this point are only numbers in
   3778   // the range [-0.5, -0.0]
   3779   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3780     __ Fmov(result, input);
   3781     DeoptimizeIfNegative(result, instr, DeoptimizeReason::kMinusZero);
   3782   }
   3783 
   3784   // Deoptimize if the input was NaN.
   3785   __ Fcmp(input, dot_five);
   3786   DeoptimizeIf(vs, instr, DeoptimizeReason::kNaN);
   3787 
   3788   // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
   3789   // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
   3790   // else 0; we avoid dealing with 0.499...94 directly.
   3791   __ Cset(result, ge);
   3792   __ Bind(&done);
   3793 }
   3794 
   3795 
   3796 void LCodeGen::DoMathFround(LMathFround* instr) {
   3797   DoubleRegister input = ToDoubleRegister(instr->value());
   3798   DoubleRegister result = ToDoubleRegister(instr->result());
   3799   __ Fcvt(result.S(), input);
   3800   __ Fcvt(result, result.S());
   3801 }
   3802 
   3803 
   3804 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   3805   DoubleRegister input = ToDoubleRegister(instr->value());
   3806   DoubleRegister result = ToDoubleRegister(instr->result());
   3807   __ Fsqrt(result, input);
   3808 }
   3809 
   3810 
   3811 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
   3812   HMathMinMax::Operation op = instr->hydrogen()->operation();
   3813   if (instr->hydrogen()->representation().IsInteger32()) {
   3814     Register result = ToRegister32(instr->result());
   3815     Register left = ToRegister32(instr->left());
   3816     Operand right = ToOperand32(instr->right());
   3817 
   3818     __ Cmp(left, right);
   3819     __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
   3820   } else if (instr->hydrogen()->representation().IsSmi()) {
   3821     Register result = ToRegister(instr->result());
   3822     Register left = ToRegister(instr->left());
   3823     Operand right = ToOperand(instr->right());
   3824 
   3825     __ Cmp(left, right);
   3826     __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
   3827   } else {
   3828     DCHECK(instr->hydrogen()->representation().IsDouble());
   3829     DoubleRegister result = ToDoubleRegister(instr->result());
   3830     DoubleRegister left = ToDoubleRegister(instr->left());
   3831     DoubleRegister right = ToDoubleRegister(instr->right());
   3832 
   3833     if (op == HMathMinMax::kMathMax) {
   3834       __ Fmax(result, left, right);
   3835     } else {
   3836       DCHECK(op == HMathMinMax::kMathMin);
   3837       __ Fmin(result, left, right);
   3838     }
   3839   }
   3840 }
   3841 
   3842 
   3843 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
   3844   Register dividend = ToRegister32(instr->dividend());
   3845   int32_t divisor = instr->divisor();
   3846   DCHECK(dividend.is(ToRegister32(instr->result())));
   3847 
   3848   // Theoretically, a variation of the branch-free code for integer division by
   3849   // a power of 2 (calculating the remainder via an additional multiplication
   3850   // (which gets simplified to an 'and') and subtraction) should be faster, and
   3851   // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
   3852   // indicate that positive dividends are heavily favored, so the branching
   3853   // version performs better.
   3854   HMod* hmod = instr->hydrogen();
   3855   int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
   3856   Label dividend_is_not_negative, done;
   3857   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
   3858     __ Tbz(dividend, kWSignBit, &dividend_is_not_negative);
   3859     // Note that this is correct even for kMinInt operands.
   3860     __ Neg(dividend, dividend);
   3861     __ And(dividend, dividend, mask);
   3862     __ Negs(dividend, dividend);
   3863     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3864       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
   3865     }
   3866     __ B(&done);
   3867   }
   3868 
   3869   __ bind(&dividend_is_not_negative);
   3870   __ And(dividend, dividend, mask);
   3871   __ bind(&done);
   3872 }
   3873 
   3874 
   3875 void LCodeGen::DoModByConstI(LModByConstI* instr) {
   3876   Register dividend = ToRegister32(instr->dividend());
   3877   int32_t divisor = instr->divisor();
   3878   Register result = ToRegister32(instr->result());
   3879   Register temp = ToRegister32(instr->temp());
   3880   DCHECK(!AreAliased(dividend, result, temp));
   3881 
   3882   if (divisor == 0) {
   3883     Deoptimize(instr, DeoptimizeReason::kDivisionByZero);
   3884     return;
   3885   }
   3886 
   3887   __ TruncatingDiv(result, dividend, Abs(divisor));
   3888   __ Sxtw(dividend.X(), dividend);
   3889   __ Mov(temp, Abs(divisor));
   3890   __ Smsubl(result.X(), result, temp, dividend.X());
   3891 
   3892   // Check for negative zero.
   3893   HMod* hmod = instr->hydrogen();
   3894   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3895     Label remainder_not_zero;
   3896     __ Cbnz(result, &remainder_not_zero);
   3897     DeoptimizeIfNegative(dividend, instr, DeoptimizeReason::kMinusZero);
   3898     __ bind(&remainder_not_zero);
   3899   }
   3900 }
   3901 
   3902 
   3903 void LCodeGen::DoModI(LModI* instr) {
   3904   Register dividend = ToRegister32(instr->left());
   3905   Register divisor = ToRegister32(instr->right());
   3906   Register result = ToRegister32(instr->result());
   3907 
   3908   Label done;
   3909   // modulo = dividend - quotient * divisor
   3910   __ Sdiv(result, dividend, divisor);
   3911   if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
   3912     DeoptimizeIfZero(divisor, instr, DeoptimizeReason::kDivisionByZero);
   3913   }
   3914   __ Msub(result, result, divisor, dividend);
   3915   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3916     __ Cbnz(result, &done);
   3917     DeoptimizeIfNegative(dividend, instr, DeoptimizeReason::kMinusZero);
   3918   }
   3919   __ Bind(&done);
   3920 }
   3921 
   3922 
   3923 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
   3924   DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32());
   3925   bool is_smi = instr->hydrogen()->representation().IsSmi();
   3926   Register result =
   3927       is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
   3928   Register left =
   3929       is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left());
   3930   int32_t right = ToInteger32(instr->right());
   3931   DCHECK((right > -kMaxInt) && (right < kMaxInt));
   3932 
   3933   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   3934   bool bailout_on_minus_zero =
   3935     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
   3936 
   3937   if (bailout_on_minus_zero) {
   3938     if (right < 0) {
   3939       // The result is -0 if right is negative and left is zero.
   3940       DeoptimizeIfZero(left, instr, DeoptimizeReason::kMinusZero);
   3941     } else if (right == 0) {
   3942       // The result is -0 if the right is zero and the left is negative.
   3943       DeoptimizeIfNegative(left, instr, DeoptimizeReason::kMinusZero);
   3944     }
   3945   }
   3946 
   3947   switch (right) {
   3948     // Cases which can detect overflow.
   3949     case -1:
   3950       if (can_overflow) {
   3951         // Only 0x80000000 can overflow here.
   3952         __ Negs(result, left);
   3953         DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
   3954       } else {
   3955         __ Neg(result, left);
   3956       }
   3957       break;
   3958     case 0:
   3959       // This case can never overflow.
   3960       __ Mov(result, 0);
   3961       break;
   3962     case 1:
   3963       // This case can never overflow.
   3964       __ Mov(result, left, kDiscardForSameWReg);
   3965       break;
   3966     case 2:
   3967       if (can_overflow) {
   3968         __ Adds(result, left, left);
   3969         DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
   3970       } else {
   3971         __ Add(result, left, left);
   3972       }
   3973       break;
   3974 
   3975     default:
   3976       // Multiplication by constant powers of two (and some related values)
   3977       // can be done efficiently with shifted operands.
   3978       int32_t right_abs = Abs(right);
   3979 
   3980       if (base::bits::IsPowerOfTwo32(right_abs)) {
   3981         int right_log2 = WhichPowerOf2(right_abs);
   3982 
   3983         if (can_overflow) {
   3984           Register scratch = result;
   3985           DCHECK(!AreAliased(scratch, left));
   3986           __ Cls(scratch, left);
   3987           __ Cmp(scratch, right_log2);
   3988           DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow);
   3989         }
   3990 
   3991         if (right >= 0) {
   3992           // result = left << log2(right)
   3993           __ Lsl(result, left, right_log2);
   3994         } else {
   3995           // result = -left << log2(-right)
   3996           if (can_overflow) {
   3997             __ Negs(result, Operand(left, LSL, right_log2));
   3998             DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
   3999           } else {
   4000             __ Neg(result, Operand(left, LSL, right_log2));
   4001           }
   4002         }
   4003         return;
   4004       }
   4005 
   4006 
   4007       // For the following cases, we could perform a conservative overflow check
   4008       // with CLS as above. However the few cycles saved are likely not worth
   4009       // the risk of deoptimizing more often than required.
   4010       DCHECK(!can_overflow);
   4011 
   4012       if (right >= 0) {
   4013         if (base::bits::IsPowerOfTwo32(right - 1)) {
   4014           // result = left + left << log2(right - 1)
   4015           __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
   4016         } else if (base::bits::IsPowerOfTwo32(right + 1)) {
   4017           // result = -left + left << log2(right + 1)
   4018           __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
   4019           __ Neg(result, result);
   4020         } else {
   4021           UNREACHABLE();
   4022         }
   4023       } else {
   4024         if (base::bits::IsPowerOfTwo32(-right + 1)) {
   4025           // result = left - left << log2(-right + 1)
   4026           __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
   4027         } else if (base::bits::IsPowerOfTwo32(-right - 1)) {
   4028           // result = -left - left << log2(-right - 1)
   4029           __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
   4030           __ Neg(result, result);
   4031         } else {
   4032           UNREACHABLE();
   4033         }
   4034       }
   4035   }
   4036 }
   4037 
   4038 
   4039 void LCodeGen::DoMulI(LMulI* instr) {
   4040   Register result = ToRegister32(instr->result());
   4041   Register left = ToRegister32(instr->left());
   4042   Register right = ToRegister32(instr->right());
   4043 
   4044   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   4045   bool bailout_on_minus_zero =
   4046     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
   4047 
   4048   if (bailout_on_minus_zero && !left.Is(right)) {
   4049     // If one operand is zero and the other is negative, the result is -0.
   4050     //  - Set Z (eq) if either left or right, or both, are 0.
   4051     __ Cmp(left, 0);
   4052     __ Ccmp(right, 0, ZFlag, ne);
   4053     //  - If so (eq), set N (mi) if left + right is negative.
   4054     //  - Otherwise, clear N.
   4055     __ Ccmn(left, right, NoFlag, eq);
   4056     DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
   4057   }
   4058 
   4059   if (can_overflow) {
   4060     __ Smull(result.X(), left, right);
   4061     __ Cmp(result.X(), Operand(result, SXTW));
   4062     DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
   4063   } else {
   4064     __ Mul(result, left, right);
   4065   }
   4066 }
   4067 
   4068 
   4069 void LCodeGen::DoMulS(LMulS* instr) {
   4070   Register result = ToRegister(instr->result());
   4071   Register left = ToRegister(instr->left());
   4072   Register right = ToRegister(instr->right());
   4073 
   4074   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   4075   bool bailout_on_minus_zero =
   4076     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
   4077 
   4078   if (bailout_on_minus_zero && !left.Is(right)) {
   4079     // If one operand is zero and the other is negative, the result is -0.
   4080     //  - Set Z (eq) if either left or right, or both, are 0.
   4081     __ Cmp(left, 0);
   4082     __ Ccmp(right, 0, ZFlag, ne);
   4083     //  - If so (eq), set N (mi) if left + right is negative.
   4084     //  - Otherwise, clear N.
   4085     __ Ccmn(left, right, NoFlag, eq);
   4086     DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
   4087   }
   4088 
   4089   STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
   4090   if (can_overflow) {
   4091     __ Smulh(result, left, right);
   4092     __ Cmp(result, Operand(result.W(), SXTW));
   4093     __ SmiTag(result);
   4094     DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
   4095   } else {
   4096     if (AreAliased(result, left, right)) {
   4097       // All three registers are the same: half untag the input and then
   4098       // multiply, giving a tagged result.
   4099       STATIC_ASSERT((kSmiShift % 2) == 0);
   4100       __ Asr(result, left, kSmiShift / 2);
   4101       __ Mul(result, result, result);
   4102     } else if (result.Is(left) && !left.Is(right)) {
   4103       // Registers result and left alias, right is distinct: untag left into
   4104       // result, and then multiply by right, giving a tagged result.
   4105       __ SmiUntag(result, left);
   4106       __ Mul(result, result, right);
   4107     } else {
   4108       DCHECK(!left.Is(result));
   4109       // Registers result and right alias, left is distinct, or all registers
   4110       // are distinct: untag right into result, and then multiply by left,
   4111       // giving a tagged result.
   4112       __ SmiUntag(result, right);
   4113       __ Mul(result, left, result);
   4114     }
   4115   }
   4116 }
   4117 
   4118 
   4119 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
   4120   // TODO(3095996): Get rid of this. For now, we need to make the
   4121   // result register contain a valid pointer because it is already
   4122   // contained in the register pointer map.
   4123   Register result = ToRegister(instr->result());
   4124   __ Mov(result, 0);
   4125 
   4126   PushSafepointRegistersScope scope(this);
   4127   // Reset the context register.
   4128   if (!result.is(cp)) {
   4129     __ Mov(cp, 0);
   4130   }
   4131   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4132   RecordSafepointWithRegisters(
   4133       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4134   __ StoreToSafepointRegisterSlot(x0, result);
   4135 }
   4136 
   4137 
   4138 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
   4139   class DeferredNumberTagD: public LDeferredCode {
   4140    public:
   4141     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
   4142         : LDeferredCode(codegen), instr_(instr) { }
   4143     virtual void Generate() { codegen()->DoDeferredNumberTagD(instr_); }
   4144     virtual LInstruction* instr() { return instr_; }
   4145    private:
   4146     LNumberTagD* instr_;
   4147   };
   4148 
   4149   DoubleRegister input = ToDoubleRegister(instr->value());
   4150   Register result = ToRegister(instr->result());
   4151   Register temp1 = ToRegister(instr->temp1());
   4152   Register temp2 = ToRegister(instr->temp2());
   4153 
   4154   DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
   4155   if (FLAG_inline_new) {
   4156     __ AllocateHeapNumber(result, deferred->entry(), temp1, temp2);
   4157   } else {
   4158     __ B(deferred->entry());
   4159   }
   4160 
   4161   __ Bind(deferred->exit());
   4162   __ Str(input, FieldMemOperand(result, HeapNumber::kValueOffset));
   4163 }
   4164 
   4165 
   4166 void LCodeGen::DoDeferredNumberTagU(LInstruction* instr,
   4167                                     LOperand* value,
   4168                                     LOperand* temp1,
   4169                                     LOperand* temp2) {
   4170   Label slow, convert_and_store;
   4171   Register src = ToRegister32(value);
   4172   Register dst = ToRegister(instr->result());
   4173   Register scratch1 = ToRegister(temp1);
   4174 
   4175   if (FLAG_inline_new) {
   4176     Register scratch2 = ToRegister(temp2);
   4177     __ AllocateHeapNumber(dst, &slow, scratch1, scratch2);
   4178     __ B(&convert_and_store);
   4179   }
   4180 
   4181   // Slow case: call the runtime system to do the number allocation.
   4182   __ Bind(&slow);
   4183   // TODO(3095996): Put a valid pointer value in the stack slot where the result
   4184   // register is stored, as this register is in the pointer map, but contains an
   4185   // integer value.
   4186   __ Mov(dst, 0);
   4187   {
   4188     // Preserve the value of all registers.
   4189     PushSafepointRegistersScope scope(this);
   4190     // Reset the context register.
   4191     if (!dst.is(cp)) {
   4192       __ Mov(cp, 0);
   4193     }
   4194     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4195     RecordSafepointWithRegisters(
   4196       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   4197     __ StoreToSafepointRegisterSlot(x0, dst);
   4198   }
   4199 
   4200   // Convert number to floating point and store in the newly allocated heap
   4201   // number.
   4202   __ Bind(&convert_and_store);
   4203   DoubleRegister dbl_scratch = double_scratch();
   4204   __ Ucvtf(dbl_scratch, src);
   4205   __ Str(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
   4206 }
   4207 
   4208 
   4209 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
   4210   class DeferredNumberTagU: public LDeferredCode {
   4211    public:
   4212     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
   4213         : LDeferredCode(codegen), instr_(instr) { }
   4214     virtual void Generate() {
   4215       codegen()->DoDeferredNumberTagU(instr_,
   4216                                       instr_->value(),
   4217                                       instr_->temp1(),
   4218                                       instr_->temp2());
   4219     }
   4220     virtual LInstruction* instr() { return instr_; }
   4221    private:
   4222     LNumberTagU* instr_;
   4223   };
   4224 
   4225   Register value = ToRegister32(instr->value());
   4226   Register result = ToRegister(instr->result());
   4227 
   4228   DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
   4229   __ Cmp(value, Smi::kMaxValue);
   4230   __ B(hi, deferred->entry());
   4231   __ SmiTag(result, value.X());
   4232   __ Bind(deferred->exit());
   4233 }
   4234 
   4235 
   4236 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   4237   Register input = ToRegister(instr->value());
   4238   Register scratch = ToRegister(instr->temp());
   4239   DoubleRegister result = ToDoubleRegister(instr->result());
   4240   bool can_convert_undefined_to_nan = instr->truncating();
   4241 
   4242   Label done, load_smi;
   4243 
   4244   // Work out what untag mode we're working with.
   4245   HValue* value = instr->hydrogen()->value();
   4246   NumberUntagDMode mode = value->representation().IsSmi()
   4247       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
   4248 
   4249   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
   4250     __ JumpIfSmi(input, &load_smi);
   4251 
   4252     Label convert_undefined;
   4253 
   4254     // Heap number map check.
   4255     if (can_convert_undefined_to_nan) {
   4256       __ JumpIfNotHeapNumber(input, &convert_undefined);
   4257     } else {
   4258       DeoptimizeIfNotHeapNumber(input, instr);
   4259     }
   4260 
   4261     // Load heap number.
   4262     __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
   4263     if (instr->hydrogen()->deoptimize_on_minus_zero()) {
   4264       DeoptimizeIfMinusZero(result, instr, DeoptimizeReason::kMinusZero);
   4265     }
   4266     __ B(&done);
   4267 
   4268     if (can_convert_undefined_to_nan) {
   4269       __ Bind(&convert_undefined);
   4270       DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
   4271                           DeoptimizeReason::kNotAHeapNumberUndefined);
   4272 
   4273       __ LoadRoot(scratch, Heap::kNanValueRootIndex);
   4274       __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
   4275       __ B(&done);
   4276     }
   4277 
   4278   } else {
   4279     DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
   4280     // Fall through to load_smi.
   4281   }
   4282 
   4283   // Smi to double register conversion.
   4284   __ Bind(&load_smi);
   4285   __ SmiUntagToDouble(result, input);
   4286 
   4287   __ Bind(&done);
   4288 }
   4289 
   4290 
   4291 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
   4292   // This is a pseudo-instruction that ensures that the environment here is
   4293   // properly registered for deoptimization and records the assembler's PC
   4294   // offset.
   4295   LEnvironment* environment = instr->environment();
   4296 
   4297   // If the environment were already registered, we would have no way of
   4298   // backpatching it with the spill slot operands.
   4299   DCHECK(!environment->HasBeenRegistered());
   4300   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   4301 
   4302   GenerateOsrPrologue();
   4303 }
   4304 
   4305 
   4306 void LCodeGen::DoParameter(LParameter* instr) {
   4307   // Nothing to do.
   4308 }
   4309 
   4310 
   4311 void LCodeGen::DoPreparePushArguments(LPreparePushArguments* instr) {
   4312   __ PushPreamble(instr->argc(), kPointerSize);
   4313 }
   4314 
   4315 
   4316 void LCodeGen::DoPushArguments(LPushArguments* instr) {
   4317   MacroAssembler::PushPopQueue args(masm());
   4318 
   4319   for (int i = 0; i < instr->ArgumentCount(); ++i) {
   4320     LOperand* arg = instr->argument(i);
   4321     if (arg->IsDoubleRegister() || arg->IsDoubleStackSlot()) {
   4322       Abort(kDoPushArgumentNotImplementedForDoubleType);
   4323       return;
   4324     }
   4325     args.Queue(ToRegister(arg));
   4326   }
   4327 
   4328   // The preamble was done by LPreparePushArguments.
   4329   args.PushQueued(MacroAssembler::PushPopQueue::SKIP_PREAMBLE);
   4330 
   4331   RecordPushedArgumentsDelta(instr->ArgumentCount());
   4332 }
   4333 
   4334 
   4335 void LCodeGen::DoReturn(LReturn* instr) {
   4336   if (FLAG_trace && info()->IsOptimizing()) {
   4337     // Push the return value on the stack as the parameter.
   4338     // Runtime::TraceExit returns its parameter in x0.  We're leaving the code
   4339     // managed by the register allocator and tearing down the frame, it's
   4340     // safe to write to the context register.
   4341     __ Push(x0);
   4342     __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   4343     __ CallRuntime(Runtime::kTraceExit);
   4344   }
   4345 
   4346   if (info()->saves_caller_doubles()) {
   4347     RestoreCallerDoubles();
   4348   }
   4349 
   4350   if (NeedsEagerFrame()) {
   4351     Register stack_pointer = masm()->StackPointer();
   4352     __ Mov(stack_pointer, fp);
   4353     __ Pop(fp, lr);
   4354   }
   4355 
   4356   if (instr->has_constant_parameter_count()) {
   4357     int parameter_count = ToInteger32(instr->constant_parameter_count());
   4358     __ Drop(parameter_count + 1);
   4359   } else {
   4360     DCHECK(info()->IsStub());  // Functions would need to drop one more value.
   4361     Register parameter_count = ToRegister(instr->parameter_count());
   4362     __ DropBySMI(parameter_count);
   4363   }
   4364   __ Ret();
   4365 }
   4366 
   4367 
   4368 MemOperand LCodeGen::BuildSeqStringOperand(Register string,
   4369                                            Register temp,
   4370                                            LOperand* index,
   4371                                            String::Encoding encoding) {
   4372   if (index->IsConstantOperand()) {
   4373     int offset = ToInteger32(LConstantOperand::cast(index));
   4374     if (encoding == String::TWO_BYTE_ENCODING) {
   4375       offset *= kUC16Size;
   4376     }
   4377     STATIC_ASSERT(kCharSize == 1);
   4378     return FieldMemOperand(string, SeqString::kHeaderSize + offset);
   4379   }
   4380 
   4381   __ Add(temp, string, SeqString::kHeaderSize - kHeapObjectTag);
   4382   if (encoding == String::ONE_BYTE_ENCODING) {
   4383     return MemOperand(temp, ToRegister32(index), SXTW);
   4384   } else {
   4385     STATIC_ASSERT(kUC16Size == 2);
   4386     return MemOperand(temp, ToRegister32(index), SXTW, 1);
   4387   }
   4388 }
   4389 
   4390 
   4391 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
   4392   String::Encoding encoding = instr->hydrogen()->encoding();
   4393   Register string = ToRegister(instr->string());
   4394   Register result = ToRegister(instr->result());
   4395   Register temp = ToRegister(instr->temp());
   4396 
   4397   if (FLAG_debug_code) {
   4398     // Even though this lithium instruction comes with a temp register, we
   4399     // can't use it here because we want to use "AtStart" constraints on the
   4400     // inputs and the debug code here needs a scratch register.
   4401     UseScratchRegisterScope temps(masm());
   4402     Register dbg_temp = temps.AcquireX();
   4403 
   4404     __ Ldr(dbg_temp, FieldMemOperand(string, HeapObject::kMapOffset));
   4405     __ Ldrb(dbg_temp, FieldMemOperand(dbg_temp, Map::kInstanceTypeOffset));
   4406 
   4407     __ And(dbg_temp, dbg_temp,
   4408            Operand(kStringRepresentationMask | kStringEncodingMask));
   4409     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   4410     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   4411     __ Cmp(dbg_temp, Operand(encoding == String::ONE_BYTE_ENCODING
   4412                              ? one_byte_seq_type : two_byte_seq_type));
   4413     __ Check(eq, kUnexpectedStringType);
   4414   }
   4415 
   4416   MemOperand operand =
   4417       BuildSeqStringOperand(string, temp, instr->index(), encoding);
   4418   if (encoding == String::ONE_BYTE_ENCODING) {
   4419     __ Ldrb(result, operand);
   4420   } else {
   4421     __ Ldrh(result, operand);
   4422   }
   4423 }
   4424 
   4425 
   4426 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
   4427   String::Encoding encoding = instr->hydrogen()->encoding();
   4428   Register string = ToRegister(instr->string());
   4429   Register value = ToRegister(instr->value());
   4430   Register temp = ToRegister(instr->temp());
   4431 
   4432   if (FLAG_debug_code) {
   4433     DCHECK(ToRegister(instr->context()).is(cp));
   4434     Register index = ToRegister(instr->index());
   4435     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   4436     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   4437     int encoding_mask =
   4438         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
   4439         ? one_byte_seq_type : two_byte_seq_type;
   4440     __ EmitSeqStringSetCharCheck(string, index, kIndexIsInteger32, temp,
   4441                                  encoding_mask);
   4442   }
   4443   MemOperand operand =
   4444       BuildSeqStringOperand(string, temp, instr->index(), encoding);
   4445   if (encoding == String::ONE_BYTE_ENCODING) {
   4446     __ Strb(value, operand);
   4447   } else {
   4448     __ Strh(value, operand);
   4449   }
   4450 }
   4451 
   4452 
   4453 void LCodeGen::DoSmiTag(LSmiTag* instr) {
   4454   HChange* hchange = instr->hydrogen();
   4455   Register input = ToRegister(instr->value());
   4456   Register output = ToRegister(instr->result());
   4457   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4458       hchange->value()->CheckFlag(HValue::kUint32)) {
   4459     DeoptimizeIfNegative(input.W(), instr, DeoptimizeReason::kOverflow);
   4460   }
   4461   __ SmiTag(output, input);
   4462 }
   4463 
   4464 
   4465 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
   4466   Register input = ToRegister(instr->value());
   4467   Register result = ToRegister(instr->result());
   4468   Label done, untag;
   4469 
   4470   if (instr->needs_check()) {
   4471     DeoptimizeIfNotSmi(input, instr, DeoptimizeReason::kNotASmi);
   4472   }
   4473 
   4474   __ Bind(&untag);
   4475   __ SmiUntag(result, input);
   4476   __ Bind(&done);
   4477 }
   4478 
   4479 
   4480 void LCodeGen::DoShiftI(LShiftI* instr) {
   4481   LOperand* right_op = instr->right();
   4482   Register left = ToRegister32(instr->left());
   4483   Register result = ToRegister32(instr->result());
   4484 
   4485   if (right_op->IsRegister()) {
   4486     Register right = ToRegister32(instr->right());
   4487     switch (instr->op()) {
   4488       case Token::ROR: __ Ror(result, left, right); break;
   4489       case Token::SAR: __ Asr(result, left, right); break;
   4490       case Token::SHL: __ Lsl(result, left, right); break;
   4491       case Token::SHR:
   4492         __ Lsr(result, left, right);
   4493         if (instr->can_deopt()) {
   4494           // If `left >>> right` >= 0x80000000, the result is not representable
   4495           // in a signed 32-bit smi.
   4496           DeoptimizeIfNegative(result, instr, DeoptimizeReason::kNegativeValue);
   4497         }
   4498         break;
   4499       default: UNREACHABLE();
   4500     }
   4501   } else {
   4502     DCHECK(right_op->IsConstantOperand());
   4503     int shift_count = JSShiftAmountFromLConstant(right_op);
   4504     if (shift_count == 0) {
   4505       if ((instr->op() == Token::SHR) && instr->can_deopt()) {
   4506         DeoptimizeIfNegative(left, instr, DeoptimizeReason::kNegativeValue);
   4507       }
   4508       __ Mov(result, left, kDiscardForSameWReg);
   4509     } else {
   4510       switch (instr->op()) {
   4511         case Token::ROR: __ Ror(result, left, shift_count); break;
   4512         case Token::SAR: __ Asr(result, left, shift_count); break;
   4513         case Token::SHL: __ Lsl(result, left, shift_count); break;
   4514         case Token::SHR: __ Lsr(result, left, shift_count); break;
   4515         default: UNREACHABLE();
   4516       }
   4517     }
   4518   }
   4519 }
   4520 
   4521 
   4522 void LCodeGen::DoShiftS(LShiftS* instr) {
   4523   LOperand* right_op = instr->right();
   4524   Register left = ToRegister(instr->left());
   4525   Register result = ToRegister(instr->result());
   4526 
   4527   if (right_op->IsRegister()) {
   4528     Register right = ToRegister(instr->right());
   4529 
   4530     // JavaScript shifts only look at the bottom 5 bits of the 'right' operand.
   4531     // Since we're handling smis in X registers, we have to extract these bits
   4532     // explicitly.
   4533     __ Ubfx(result, right, kSmiShift, 5);
   4534 
   4535     switch (instr->op()) {
   4536       case Token::ROR: {
   4537         // This is the only case that needs a scratch register. To keep things
   4538         // simple for the other cases, borrow a MacroAssembler scratch register.
   4539         UseScratchRegisterScope temps(masm());
   4540         Register temp = temps.AcquireW();
   4541         __ SmiUntag(temp, left);
   4542         __ Ror(result.W(), temp.W(), result.W());
   4543         __ SmiTag(result);
   4544         break;
   4545       }
   4546       case Token::SAR:
   4547         __ Asr(result, left, result);
   4548         __ Bic(result, result, kSmiShiftMask);
   4549         break;
   4550       case Token::SHL:
   4551         __ Lsl(result, left, result);
   4552         break;
   4553       case Token::SHR:
   4554         __ Lsr(result, left, result);
   4555         __ Bic(result, result, kSmiShiftMask);
   4556         if (instr->can_deopt()) {
   4557           // If `left >>> right` >= 0x80000000, the result is not representable
   4558           // in a signed 32-bit smi.
   4559           DeoptimizeIfNegative(result, instr, DeoptimizeReason::kNegativeValue);
   4560         }
   4561         break;
   4562       default: UNREACHABLE();
   4563     }
   4564   } else {
   4565     DCHECK(right_op->IsConstantOperand());
   4566     int shift_count = JSShiftAmountFromLConstant(right_op);
   4567     if (shift_count == 0) {
   4568       if ((instr->op() == Token::SHR) && instr->can_deopt()) {
   4569         DeoptimizeIfNegative(left, instr, DeoptimizeReason::kNegativeValue);
   4570       }
   4571       __ Mov(result, left);
   4572     } else {
   4573       switch (instr->op()) {
   4574         case Token::ROR:
   4575           __ SmiUntag(result, left);
   4576           __ Ror(result.W(), result.W(), shift_count);
   4577           __ SmiTag(result);
   4578           break;
   4579         case Token::SAR:
   4580           __ Asr(result, left, shift_count);
   4581           __ Bic(result, result, kSmiShiftMask);
   4582           break;
   4583         case Token::SHL:
   4584           __ Lsl(result, left, shift_count);
   4585           break;
   4586         case Token::SHR:
   4587           __ Lsr(result, left, shift_count);
   4588           __ Bic(result, result, kSmiShiftMask);
   4589           break;
   4590         default: UNREACHABLE();
   4591       }
   4592     }
   4593   }
   4594 }
   4595 
   4596 
   4597 void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
   4598   __ Debug("LDebugBreak", 0, BREAK);
   4599 }
   4600 
   4601 
   4602 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   4603   DCHECK(ToRegister(instr->context()).is(cp));
   4604   Register scratch1 = x5;
   4605   Register scratch2 = x6;
   4606   DCHECK(instr->IsMarkedAsCall());
   4607 
   4608   // TODO(all): if Mov could handle object in new space then it could be used
   4609   // here.
   4610   __ LoadHeapObject(scratch1, instr->hydrogen()->declarations());
   4611   __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
   4612   __ Push(scratch1, scratch2);
   4613   __ LoadHeapObject(scratch1, instr->hydrogen()->feedback_vector());
   4614   __ Push(scratch1);
   4615   CallRuntime(Runtime::kDeclareGlobals, instr);
   4616 }
   4617 
   4618 
   4619 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   4620   PushSafepointRegistersScope scope(this);
   4621   LoadContextFromDeferred(instr->context());
   4622   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   4623   RecordSafepointWithLazyDeopt(
   4624       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   4625   DCHECK(instr->HasEnvironment());
   4626   LEnvironment* env = instr->environment();
   4627   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   4628 }
   4629 
   4630 
   4631 void LCodeGen::DoStackCheck(LStackCheck* instr) {
   4632   class DeferredStackCheck: public LDeferredCode {
   4633    public:
   4634     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
   4635         : LDeferredCode(codegen), instr_(instr) { }
   4636     virtual void Generate() { codegen()->DoDeferredStackCheck(instr_); }
   4637     virtual LInstruction* instr() { return instr_; }
   4638    private:
   4639     LStackCheck* instr_;
   4640   };
   4641 
   4642   DCHECK(instr->HasEnvironment());
   4643   LEnvironment* env = instr->environment();
   4644   // There is no LLazyBailout instruction for stack-checks. We have to
   4645   // prepare for lazy deoptimization explicitly here.
   4646   if (instr->hydrogen()->is_function_entry()) {
   4647     // Perform stack overflow check.
   4648     Label done;
   4649     __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
   4650     __ B(hs, &done);
   4651 
   4652     PredictableCodeSizeScope predictable(masm_,
   4653                                          Assembler::kCallSizeWithRelocation);
   4654     DCHECK(instr->context()->IsRegister());
   4655     DCHECK(ToRegister(instr->context()).is(cp));
   4656     CallCode(isolate()->builtins()->StackCheck(),
   4657              RelocInfo::CODE_TARGET,
   4658              instr);
   4659     __ Bind(&done);
   4660   } else {
   4661     DCHECK(instr->hydrogen()->is_backwards_branch());
   4662     // Perform stack overflow check if this goto needs it before jumping.
   4663     DeferredStackCheck* deferred_stack_check =
   4664         new(zone()) DeferredStackCheck(this, instr);
   4665     __ CompareRoot(masm()->StackPointer(), Heap::kStackLimitRootIndex);
   4666     __ B(lo, deferred_stack_check->entry());
   4667 
   4668     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   4669     __ Bind(instr->done_label());
   4670     deferred_stack_check->SetExit(instr->done_label());
   4671     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   4672     // Don't record a deoptimization index for the safepoint here.
   4673     // This will be done explicitly when emitting call and the safepoint in
   4674     // the deferred code.
   4675   }
   4676 }
   4677 
   4678 
   4679 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
   4680   Register function = ToRegister(instr->function());
   4681   Register code_object = ToRegister(instr->code_object());
   4682   Register temp = ToRegister(instr->temp());
   4683   __ Add(temp, code_object, Code::kHeaderSize - kHeapObjectTag);
   4684   __ Str(temp, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
   4685 }
   4686 
   4687 
   4688 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   4689   Register context = ToRegister(instr->context());
   4690   Register value = ToRegister(instr->value());
   4691   Register scratch = ToRegister(instr->temp());
   4692   MemOperand target = ContextMemOperand(context, instr->slot_index());
   4693 
   4694   Label skip_assignment;
   4695 
   4696   if (instr->hydrogen()->RequiresHoleCheck()) {
   4697     __ Ldr(scratch, target);
   4698     if (instr->hydrogen()->DeoptimizesOnHole()) {
   4699       DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr,
   4700                        DeoptimizeReason::kHole);
   4701     } else {
   4702       __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
   4703     }
   4704   }
   4705 
   4706   __ Str(value, target);
   4707   if (instr->hydrogen()->NeedsWriteBarrier()) {
   4708     SmiCheck check_needed =
   4709         instr->hydrogen()->value()->type().IsHeapObject()
   4710             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   4711     __ RecordWriteContextSlot(context, static_cast<int>(target.offset()), value,
   4712                               scratch, GetLinkRegisterState(), kSaveFPRegs,
   4713                               EMIT_REMEMBERED_SET, check_needed);
   4714   }
   4715   __ Bind(&skip_assignment);
   4716 }
   4717 
   4718 
   4719 void LCodeGen::DoStoreKeyedExternal(LStoreKeyedExternal* instr) {
   4720   Register ext_ptr = ToRegister(instr->elements());
   4721   Register key = no_reg;
   4722   Register scratch;
   4723   ElementsKind elements_kind = instr->elements_kind();
   4724 
   4725   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
   4726   bool key_is_constant = instr->key()->IsConstantOperand();
   4727   int constant_key = 0;
   4728   if (key_is_constant) {
   4729     DCHECK(instr->temp() == NULL);
   4730     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   4731     if (constant_key & 0xf0000000) {
   4732       Abort(kArrayIndexConstantValueTooBig);
   4733     }
   4734   } else {
   4735     key = ToRegister(instr->key());
   4736     scratch = ToRegister(instr->temp());
   4737   }
   4738 
   4739   MemOperand dst =
   4740     PrepareKeyedExternalArrayOperand(key, ext_ptr, scratch, key_is_smi,
   4741                                      key_is_constant, constant_key,
   4742                                      elements_kind,
   4743                                      instr->base_offset());
   4744 
   4745   if (elements_kind == FLOAT32_ELEMENTS) {
   4746     DoubleRegister value = ToDoubleRegister(instr->value());
   4747     DoubleRegister dbl_scratch = double_scratch();
   4748     __ Fcvt(dbl_scratch.S(), value);
   4749     __ Str(dbl_scratch.S(), dst);
   4750   } else if (elements_kind == FLOAT64_ELEMENTS) {
   4751     DoubleRegister value = ToDoubleRegister(instr->value());
   4752     __ Str(value, dst);
   4753   } else {
   4754     Register value = ToRegister(instr->value());
   4755 
   4756     switch (elements_kind) {
   4757       case UINT8_ELEMENTS:
   4758       case UINT8_CLAMPED_ELEMENTS:
   4759       case INT8_ELEMENTS:
   4760         __ Strb(value, dst);
   4761         break;
   4762       case INT16_ELEMENTS:
   4763       case UINT16_ELEMENTS:
   4764         __ Strh(value, dst);
   4765         break;
   4766       case INT32_ELEMENTS:
   4767       case UINT32_ELEMENTS:
   4768         __ Str(value.W(), dst);
   4769         break;
   4770       case FLOAT32_ELEMENTS:
   4771       case FLOAT64_ELEMENTS:
   4772       case FAST_DOUBLE_ELEMENTS:
   4773       case FAST_ELEMENTS:
   4774       case FAST_SMI_ELEMENTS:
   4775       case FAST_HOLEY_DOUBLE_ELEMENTS:
   4776       case FAST_HOLEY_ELEMENTS:
   4777       case FAST_HOLEY_SMI_ELEMENTS:
   4778       case DICTIONARY_ELEMENTS:
   4779       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
   4780       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
   4781       case FAST_STRING_WRAPPER_ELEMENTS:
   4782       case SLOW_STRING_WRAPPER_ELEMENTS:
   4783       case NO_ELEMENTS:
   4784         UNREACHABLE();
   4785         break;
   4786     }
   4787   }
   4788 }
   4789 
   4790 
   4791 void LCodeGen::DoStoreKeyedFixedDouble(LStoreKeyedFixedDouble* instr) {
   4792   Register elements = ToRegister(instr->elements());
   4793   DoubleRegister value = ToDoubleRegister(instr->value());
   4794   MemOperand mem_op;
   4795 
   4796   if (instr->key()->IsConstantOperand()) {
   4797     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   4798     if (constant_key & 0xf0000000) {
   4799       Abort(kArrayIndexConstantValueTooBig);
   4800     }
   4801     int offset = instr->base_offset() + constant_key * kDoubleSize;
   4802     mem_op = MemOperand(elements, offset);
   4803   } else {
   4804     Register store_base = ToRegister(instr->temp());
   4805     Register key = ToRegister(instr->key());
   4806     bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
   4807     mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
   4808                                       instr->hydrogen()->elements_kind(),
   4809                                       instr->hydrogen()->representation(),
   4810                                       instr->base_offset());
   4811   }
   4812 
   4813   if (instr->NeedsCanonicalization()) {
   4814     __ CanonicalizeNaN(double_scratch(), value);
   4815     __ Str(double_scratch(), mem_op);
   4816   } else {
   4817     __ Str(value, mem_op);
   4818   }
   4819 }
   4820 
   4821 
   4822 void LCodeGen::DoStoreKeyedFixed(LStoreKeyedFixed* instr) {
   4823   Register value = ToRegister(instr->value());
   4824   Register elements = ToRegister(instr->elements());
   4825   Register scratch = no_reg;
   4826   Register store_base = no_reg;
   4827   Register key = no_reg;
   4828   MemOperand mem_op;
   4829 
   4830   if (!instr->key()->IsConstantOperand() ||
   4831       instr->hydrogen()->NeedsWriteBarrier()) {
   4832     scratch = ToRegister(instr->temp());
   4833   }
   4834 
   4835   Representation representation = instr->hydrogen()->value()->representation();
   4836   if (instr->key()->IsConstantOperand()) {
   4837     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   4838     int offset = instr->base_offset() +
   4839         ToInteger32(const_operand) * kPointerSize;
   4840     store_base = elements;
   4841     if (representation.IsInteger32()) {
   4842       DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
   4843       DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
   4844       STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
   4845       STATIC_ASSERT(kSmiTag == 0);
   4846       mem_op = UntagSmiMemOperand(store_base, offset);
   4847     } else {
   4848       mem_op = MemOperand(store_base, offset);
   4849     }
   4850   } else {
   4851     store_base = scratch;
   4852     key = ToRegister(instr->key());
   4853     bool key_is_tagged = instr->hydrogen()->key()->representation().IsSmi();
   4854 
   4855     mem_op = PrepareKeyedArrayOperand(store_base, elements, key, key_is_tagged,
   4856                                       instr->hydrogen()->elements_kind(),
   4857                                       representation, instr->base_offset());
   4858   }
   4859 
   4860   __ Store(value, mem_op, representation);
   4861 
   4862   if (instr->hydrogen()->NeedsWriteBarrier()) {
   4863     DCHECK(representation.IsTagged());
   4864     // This assignment may cause element_addr to alias store_base.
   4865     Register element_addr = scratch;
   4866     SmiCheck check_needed =
   4867         instr->hydrogen()->value()->type().IsHeapObject()
   4868             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
   4869     // Compute address of modified element and store it into key register.
   4870     __ Add(element_addr, mem_op.base(), mem_op.OffsetAsOperand());
   4871     __ RecordWrite(elements, element_addr, value, GetLinkRegisterState(),
   4872                    kSaveFPRegs, EMIT_REMEMBERED_SET, check_needed,
   4873                    instr->hydrogen()->PointersToHereCheckForValue());
   4874   }
   4875 }
   4876 
   4877 
   4878 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
   4879   class DeferredMaybeGrowElements final : public LDeferredCode {
   4880    public:
   4881     DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
   4882         : LDeferredCode(codegen), instr_(instr) {}
   4883     void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
   4884     LInstruction* instr() override { return instr_; }
   4885 
   4886    private:
   4887     LMaybeGrowElements* instr_;
   4888   };
   4889 
   4890   Register result = x0;
   4891   DeferredMaybeGrowElements* deferred =
   4892       new (zone()) DeferredMaybeGrowElements(this, instr);
   4893   LOperand* key = instr->key();
   4894   LOperand* current_capacity = instr->current_capacity();
   4895 
   4896   DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
   4897   DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
   4898   DCHECK(key->IsConstantOperand() || key->IsRegister());
   4899   DCHECK(current_capacity->IsConstantOperand() ||
   4900          current_capacity->IsRegister());
   4901 
   4902   if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
   4903     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
   4904     int32_t constant_capacity =
   4905         ToInteger32(LConstantOperand::cast(current_capacity));
   4906     if (constant_key >= constant_capacity) {
   4907       // Deferred case.
   4908       __ B(deferred->entry());
   4909     }
   4910   } else if (key->IsConstantOperand()) {
   4911     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
   4912     __ Cmp(ToRegister(current_capacity), Operand(constant_key));
   4913     __ B(le, deferred->entry());
   4914   } else if (current_capacity->IsConstantOperand()) {
   4915     int32_t constant_capacity =
   4916         ToInteger32(LConstantOperand::cast(current_capacity));
   4917     __ Cmp(ToRegister(key), Operand(constant_capacity));
   4918     __ B(ge, deferred->entry());
   4919   } else {
   4920     __ Cmp(ToRegister(key), ToRegister(current_capacity));
   4921     __ B(ge, deferred->entry());
   4922   }
   4923 
   4924   __ Mov(result, ToRegister(instr->elements()));
   4925 
   4926   __ Bind(deferred->exit());
   4927 }
   4928 
   4929 
   4930 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
   4931   // TODO(3095996): Get rid of this. For now, we need to make the
   4932   // result register contain a valid pointer because it is already
   4933   // contained in the register pointer map.
   4934   Register result = x0;
   4935   __ Mov(result, 0);
   4936 
   4937   // We have to call a stub.
   4938   {
   4939     PushSafepointRegistersScope scope(this);
   4940     __ Move(result, ToRegister(instr->object()));
   4941 
   4942     LOperand* key = instr->key();
   4943     if (key->IsConstantOperand()) {
   4944       __ Mov(x3, Operand(ToSmi(LConstantOperand::cast(key))));
   4945     } else {
   4946       __ Mov(x3, ToRegister(key));
   4947       __ SmiTag(x3);
   4948     }
   4949 
   4950     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
   4951     __ CallStub(&stub);
   4952     RecordSafepointWithLazyDeopt(
   4953         instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   4954     __ StoreToSafepointRegisterSlot(result, result);
   4955   }
   4956 
   4957   // Deopt on smi, which means the elements array changed to dictionary mode.
   4958   DeoptimizeIfSmi(result, instr, DeoptimizeReason::kSmi);
   4959 }
   4960 
   4961 
   4962 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
   4963   Representation representation = instr->representation();
   4964 
   4965   Register object = ToRegister(instr->object());
   4966   HObjectAccess access = instr->hydrogen()->access();
   4967   int offset = access.offset();
   4968 
   4969   if (access.IsExternalMemory()) {
   4970     DCHECK(!instr->hydrogen()->has_transition());
   4971     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
   4972     Register value = ToRegister(instr->value());
   4973     __ Store(value, MemOperand(object, offset), representation);
   4974     return;
   4975   }
   4976 
   4977   __ AssertNotSmi(object);
   4978 
   4979   if (!FLAG_unbox_double_fields && representation.IsDouble()) {
   4980     DCHECK(access.IsInobject());
   4981     DCHECK(!instr->hydrogen()->has_transition());
   4982     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
   4983     FPRegister value = ToDoubleRegister(instr->value());
   4984     __ Str(value, FieldMemOperand(object, offset));
   4985     return;
   4986   }
   4987 
   4988   DCHECK(!representation.IsSmi() ||
   4989          !instr->value()->IsConstantOperand() ||
   4990          IsInteger32Constant(LConstantOperand::cast(instr->value())));
   4991 
   4992   if (instr->hydrogen()->has_transition()) {
   4993     Handle<Map> transition = instr->hydrogen()->transition_map();
   4994     AddDeprecationDependency(transition);
   4995     // Store the new map value.
   4996     Register new_map_value = ToRegister(instr->temp0());
   4997     __ Mov(new_map_value, Operand(transition));
   4998     __ Str(new_map_value, FieldMemOperand(object, HeapObject::kMapOffset));
   4999     if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
   5000       // Update the write barrier for the map field.
   5001       __ RecordWriteForMap(object,
   5002                            new_map_value,
   5003                            ToRegister(instr->temp1()),
   5004                            GetLinkRegisterState(),
   5005                            kSaveFPRegs);
   5006     }
   5007   }
   5008 
   5009   // Do the store.
   5010   Register destination;
   5011   if (access.IsInobject()) {
   5012     destination = object;
   5013   } else {
   5014     Register temp0 = ToRegister(instr->temp0());
   5015     __ Ldr(temp0, FieldMemOperand(object, JSObject::kPropertiesOffset));
   5016     destination = temp0;
   5017   }
   5018 
   5019   if (FLAG_unbox_double_fields && representation.IsDouble()) {
   5020     DCHECK(access.IsInobject());
   5021     FPRegister value = ToDoubleRegister(instr->value());
   5022     __ Str(value, FieldMemOperand(object, offset));
   5023   } else if (representation.IsSmi() &&
   5024              instr->hydrogen()->value()->representation().IsInteger32()) {
   5025     DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
   5026 #ifdef DEBUG
   5027     Register temp0 = ToRegister(instr->temp0());
   5028     __ Ldr(temp0, FieldMemOperand(destination, offset));
   5029     __ AssertSmi(temp0);
   5030     // If destination aliased temp0, restore it to the address calculated
   5031     // earlier.
   5032     if (destination.Is(temp0)) {
   5033       DCHECK(!access.IsInobject());
   5034       __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
   5035     }
   5036 #endif
   5037     STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
   5038     STATIC_ASSERT(kSmiTag == 0);
   5039     Register value = ToRegister(instr->value());
   5040     __ Store(value, UntagSmiFieldMemOperand(destination, offset),
   5041              Representation::Integer32());
   5042   } else {
   5043     Register value = ToRegister(instr->value());
   5044     __ Store(value, FieldMemOperand(destination, offset), representation);
   5045   }
   5046   if (instr->hydrogen()->NeedsWriteBarrier()) {
   5047     Register value = ToRegister(instr->value());
   5048     __ RecordWriteField(destination,
   5049                         offset,
   5050                         value,                        // Clobbered.
   5051                         ToRegister(instr->temp1()),   // Clobbered.
   5052                         GetLinkRegisterState(),
   5053                         kSaveFPRegs,
   5054                         EMIT_REMEMBERED_SET,
   5055                         instr->hydrogen()->SmiCheckForWriteBarrier(),
   5056                         instr->hydrogen()->PointersToHereCheckForValue());
   5057   }
   5058 }
   5059 
   5060 
   5061 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   5062   DCHECK(ToRegister(instr->context()).is(cp));
   5063   DCHECK(ToRegister(instr->left()).Is(x1));
   5064   DCHECK(ToRegister(instr->right()).Is(x0));
   5065   StringAddStub stub(isolate(),
   5066                      instr->hydrogen()->flags(),
   5067                      instr->hydrogen()->pretenure_flag());
   5068   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   5069 }
   5070 
   5071 
   5072 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   5073   class DeferredStringCharCodeAt: public LDeferredCode {
   5074    public:
   5075     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
   5076         : LDeferredCode(codegen), instr_(instr) { }
   5077     virtual void Generate() { codegen()->DoDeferredStringCharCodeAt(instr_); }
   5078     virtual LInstruction* instr() { return instr_; }
   5079    private:
   5080     LStringCharCodeAt* instr_;
   5081   };
   5082 
   5083   DeferredStringCharCodeAt* deferred =
   5084       new(zone()) DeferredStringCharCodeAt(this, instr);
   5085 
   5086   StringCharLoadGenerator::Generate(masm(),
   5087                                     ToRegister(instr->string()),
   5088                                     ToRegister32(instr->index()),
   5089                                     ToRegister(instr->result()),
   5090                                     deferred->entry());
   5091   __ Bind(deferred->exit());
   5092 }
   5093 
   5094 
   5095 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
   5096   Register string = ToRegister(instr->string());
   5097   Register result = ToRegister(instr->result());
   5098 
   5099   // TODO(3095996): Get rid of this. For now, we need to make the
   5100   // result register contain a valid pointer because it is already
   5101   // contained in the register pointer map.
   5102   __ Mov(result, 0);
   5103 
   5104   PushSafepointRegistersScope scope(this);
   5105   __ Push(string);
   5106   // Push the index as a smi. This is safe because of the checks in
   5107   // DoStringCharCodeAt above.
   5108   Register index = ToRegister(instr->index());
   5109   __ SmiTagAndPush(index);
   5110 
   5111   CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
   5112                           instr->context());
   5113   __ AssertSmi(x0);
   5114   __ SmiUntag(x0);
   5115   __ StoreToSafepointRegisterSlot(x0, result);
   5116 }
   5117 
   5118 
   5119 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
   5120   class DeferredStringCharFromCode: public LDeferredCode {
   5121    public:
   5122     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
   5123         : LDeferredCode(codegen), instr_(instr) { }
   5124     virtual void Generate() { codegen()->DoDeferredStringCharFromCode(instr_); }
   5125     virtual LInstruction* instr() { return instr_; }
   5126    private:
   5127     LStringCharFromCode* instr_;
   5128   };
   5129 
   5130   DeferredStringCharFromCode* deferred =
   5131       new(zone()) DeferredStringCharFromCode(this, instr);
   5132 
   5133   DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
   5134   Register char_code = ToRegister32(instr->char_code());
   5135   Register result = ToRegister(instr->result());
   5136 
   5137   __ Cmp(char_code, String::kMaxOneByteCharCode);
   5138   __ B(hi, deferred->entry());
   5139   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
   5140   __ Add(result, result, FixedArray::kHeaderSize - kHeapObjectTag);
   5141   __ Ldr(result, MemOperand(result, char_code, SXTW, kPointerSizeLog2));
   5142   __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
   5143   __ B(eq, deferred->entry());
   5144   __ Bind(deferred->exit());
   5145 }
   5146 
   5147 
   5148 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
   5149   Register char_code = ToRegister(instr->char_code());
   5150   Register result = ToRegister(instr->result());
   5151 
   5152   // TODO(3095996): Get rid of this. For now, we need to make the
   5153   // result register contain a valid pointer because it is already
   5154   // contained in the register pointer map.
   5155   __ Mov(result, 0);
   5156 
   5157   PushSafepointRegistersScope scope(this);
   5158   __ SmiTagAndPush(char_code);
   5159   CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
   5160                           instr->context());
   5161   __ StoreToSafepointRegisterSlot(x0, result);
   5162 }
   5163 
   5164 
   5165 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   5166   DCHECK(ToRegister(instr->context()).is(cp));
   5167   DCHECK(ToRegister(instr->left()).is(x1));
   5168   DCHECK(ToRegister(instr->right()).is(x0));
   5169 
   5170   Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
   5171   CallCode(code, RelocInfo::CODE_TARGET, instr);
   5172   __ CompareRoot(x0, Heap::kTrueValueRootIndex);
   5173   EmitBranch(instr, eq);
   5174 }
   5175 
   5176 
   5177 void LCodeGen::DoSubI(LSubI* instr) {
   5178   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   5179   Register result = ToRegister32(instr->result());
   5180   Register left = ToRegister32(instr->left());
   5181   Operand right = ToShiftedRightOperand32(instr->right(), instr);
   5182 
   5183   if (can_overflow) {
   5184     __ Subs(result, left, right);
   5185     DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
   5186   } else {
   5187     __ Sub(result, left, right);
   5188   }
   5189 }
   5190 
   5191 
   5192 void LCodeGen::DoSubS(LSubS* instr) {
   5193   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   5194   Register result = ToRegister(instr->result());
   5195   Register left = ToRegister(instr->left());
   5196   Operand right = ToOperand(instr->right());
   5197   if (can_overflow) {
   5198     __ Subs(result, left, right);
   5199     DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
   5200   } else {
   5201     __ Sub(result, left, right);
   5202   }
   5203 }
   5204 
   5205 
   5206 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
   5207                                    LOperand* value,
   5208                                    LOperand* temp1,
   5209                                    LOperand* temp2) {
   5210   Register input = ToRegister(value);
   5211   Register scratch1 = ToRegister(temp1);
   5212   DoubleRegister dbl_scratch1 = double_scratch();
   5213 
   5214   Label done;
   5215 
   5216   if (instr->truncating()) {
   5217     UseScratchRegisterScope temps(masm());
   5218     Register output = ToRegister(instr->result());
   5219     Register input_map = temps.AcquireX();
   5220     Register input_instance_type = input_map;
   5221     Label truncate;
   5222     __ CompareObjectType(input, input_map, input_instance_type,
   5223                          HEAP_NUMBER_TYPE);
   5224     __ B(eq, &truncate);
   5225     __ Cmp(input_instance_type, ODDBALL_TYPE);
   5226     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball);
   5227     __ Bind(&truncate);
   5228     __ TruncateHeapNumberToI(output, input);
   5229   } else {
   5230     Register output = ToRegister32(instr->result());
   5231     DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
   5232 
   5233     DeoptimizeIfNotHeapNumber(input, instr);
   5234 
   5235     // A heap number: load value and convert to int32 using non-truncating
   5236     // function. If the result is out of range, branch to deoptimize.
   5237     __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
   5238     __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
   5239     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
   5240 
   5241     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   5242       __ Cmp(output, 0);
   5243       __ B(ne, &done);
   5244       __ Fmov(scratch1, dbl_scratch1);
   5245       DeoptimizeIfNegative(scratch1, instr, DeoptimizeReason::kMinusZero);
   5246     }
   5247   }
   5248   __ Bind(&done);
   5249 }
   5250 
   5251 
   5252 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
   5253   class DeferredTaggedToI: public LDeferredCode {
   5254    public:
   5255     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
   5256         : LDeferredCode(codegen), instr_(instr) { }
   5257     virtual void Generate() {
   5258       codegen()->DoDeferredTaggedToI(instr_, instr_->value(), instr_->temp1(),
   5259                                      instr_->temp2());
   5260     }
   5261 
   5262     virtual LInstruction* instr() { return instr_; }
   5263    private:
   5264     LTaggedToI* instr_;
   5265   };
   5266 
   5267   Register input = ToRegister(instr->value());
   5268   Register output = ToRegister(instr->result());
   5269 
   5270   if (instr->hydrogen()->value()->representation().IsSmi()) {
   5271     __ SmiUntag(output, input);
   5272   } else {
   5273     DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
   5274 
   5275     __ JumpIfNotSmi(input, deferred->entry());
   5276     __ SmiUntag(output, input);
   5277     __ Bind(deferred->exit());
   5278   }
   5279 }
   5280 
   5281 
   5282 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   5283   Register result = ToRegister(instr->result());
   5284   __ Ldr(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   5285 }
   5286 
   5287 
   5288 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
   5289   Register object = ToRegister(instr->object());
   5290 
   5291   Handle<Map> from_map = instr->original_map();
   5292   Handle<Map> to_map = instr->transitioned_map();
   5293   ElementsKind from_kind = instr->from_kind();
   5294   ElementsKind to_kind = instr->to_kind();
   5295 
   5296   Label not_applicable;
   5297 
   5298   if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
   5299     Register temp1 = ToRegister(instr->temp1());
   5300     Register new_map = ToRegister(instr->temp2());
   5301     __ CheckMap(object, temp1, from_map, &not_applicable, DONT_DO_SMI_CHECK);
   5302     __ Mov(new_map, Operand(to_map));
   5303     __ Str(new_map, FieldMemOperand(object, HeapObject::kMapOffset));
   5304     // Write barrier.
   5305     __ RecordWriteForMap(object, new_map, temp1, GetLinkRegisterState(),
   5306                          kDontSaveFPRegs);
   5307   } else {
   5308     {
   5309       UseScratchRegisterScope temps(masm());
   5310       // Use the temp register only in a restricted scope - the codegen checks
   5311       // that we do not use any register across a call.
   5312       __ CheckMap(object, temps.AcquireX(), from_map, &not_applicable,
   5313                   DONT_DO_SMI_CHECK);
   5314     }
   5315     DCHECK(object.is(x0));
   5316     DCHECK(ToRegister(instr->context()).is(cp));
   5317     PushSafepointRegistersScope scope(this);
   5318     __ Mov(x1, Operand(to_map));
   5319     TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
   5320     __ CallStub(&stub);
   5321     RecordSafepointWithRegisters(
   5322         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
   5323   }
   5324   __ Bind(&not_applicable);
   5325 }
   5326 
   5327 
   5328 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
   5329   Register object = ToRegister(instr->object());
   5330   Register temp1 = ToRegister(instr->temp1());
   5331   Register temp2 = ToRegister(instr->temp2());
   5332 
   5333   Label no_memento_found;
   5334   __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
   5335   DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
   5336   __ Bind(&no_memento_found);
   5337 }
   5338 
   5339 
   5340 void LCodeGen::DoTruncateDoubleToIntOrSmi(LTruncateDoubleToIntOrSmi* instr) {
   5341   DoubleRegister input = ToDoubleRegister(instr->value());
   5342   Register result = ToRegister(instr->result());
   5343   __ TruncateDoubleToI(result, input);
   5344   if (instr->tag_result()) {
   5345     __ SmiTag(result, result);
   5346   }
   5347 }
   5348 
   5349 
   5350 void LCodeGen::DoTypeof(LTypeof* instr) {
   5351   DCHECK(ToRegister(instr->value()).is(x3));
   5352   DCHECK(ToRegister(instr->result()).is(x0));
   5353   Label end, do_call;
   5354   Register value_register = ToRegister(instr->value());
   5355   __ JumpIfNotSmi(value_register, &do_call);
   5356   __ Mov(x0, Immediate(isolate()->factory()->number_string()));
   5357   __ B(&end);
   5358   __ Bind(&do_call);
   5359   Callable callable = CodeFactory::Typeof(isolate());
   5360   CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
   5361   __ Bind(&end);
   5362 }
   5363 
   5364 
   5365 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   5366   Handle<String> type_name = instr->type_literal();
   5367   Label* true_label = instr->TrueLabel(chunk_);
   5368   Label* false_label = instr->FalseLabel(chunk_);
   5369   Register value = ToRegister(instr->value());
   5370 
   5371   Factory* factory = isolate()->factory();
   5372   if (String::Equals(type_name, factory->number_string())) {
   5373     __ JumpIfSmi(value, true_label);
   5374 
   5375     int true_block = instr->TrueDestination(chunk_);
   5376     int false_block = instr->FalseDestination(chunk_);
   5377     int next_block = GetNextEmittedBlock();
   5378 
   5379     if (true_block == false_block) {
   5380       EmitGoto(true_block);
   5381     } else if (true_block == next_block) {
   5382       __ JumpIfNotHeapNumber(value, chunk_->GetAssemblyLabel(false_block));
   5383     } else {
   5384       __ JumpIfHeapNumber(value, chunk_->GetAssemblyLabel(true_block));
   5385       if (false_block != next_block) {
   5386         __ B(chunk_->GetAssemblyLabel(false_block));
   5387       }
   5388     }
   5389 
   5390   } else if (String::Equals(type_name, factory->string_string())) {
   5391     DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
   5392     Register map = ToRegister(instr->temp1());
   5393     Register scratch = ToRegister(instr->temp2());
   5394 
   5395     __ JumpIfSmi(value, false_label);
   5396     __ CompareObjectType(value, map, scratch, FIRST_NONSTRING_TYPE);
   5397     EmitBranch(instr, lt);
   5398 
   5399   } else if (String::Equals(type_name, factory->symbol_string())) {
   5400     DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
   5401     Register map = ToRegister(instr->temp1());
   5402     Register scratch = ToRegister(instr->temp2());
   5403 
   5404     __ JumpIfSmi(value, false_label);
   5405     __ CompareObjectType(value, map, scratch, SYMBOL_TYPE);
   5406     EmitBranch(instr, eq);
   5407 
   5408   } else if (String::Equals(type_name, factory->boolean_string())) {
   5409     __ JumpIfRoot(value, Heap::kTrueValueRootIndex, true_label);
   5410     __ CompareRoot(value, Heap::kFalseValueRootIndex);
   5411     EmitBranch(instr, eq);
   5412 
   5413   } else if (String::Equals(type_name, factory->undefined_string())) {
   5414     DCHECK(instr->temp1() != NULL);
   5415     Register scratch = ToRegister(instr->temp1());
   5416 
   5417     __ JumpIfRoot(value, Heap::kNullValueRootIndex, false_label);
   5418     __ JumpIfSmi(value, false_label);
   5419     // Check for undetectable objects and jump to the true branch in this case.
   5420     __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
   5421     __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5422     EmitTestAndBranch(instr, ne, scratch, 1 << Map::kIsUndetectable);
   5423 
   5424   } else if (String::Equals(type_name, factory->function_string())) {
   5425     DCHECK(instr->temp1() != NULL);
   5426     Register scratch = ToRegister(instr->temp1());
   5427 
   5428     __ JumpIfSmi(value, false_label);
   5429     __ Ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
   5430     __ Ldrb(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5431     __ And(scratch, scratch,
   5432            (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
   5433     EmitCompareAndBranch(instr, eq, scratch, 1 << Map::kIsCallable);
   5434 
   5435   } else if (String::Equals(type_name, factory->object_string())) {
   5436     DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
   5437     Register map = ToRegister(instr->temp1());
   5438     Register scratch = ToRegister(instr->temp2());
   5439 
   5440     __ JumpIfSmi(value, false_label);
   5441     __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
   5442     STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
   5443     __ JumpIfObjectType(value, map, scratch, FIRST_JS_RECEIVER_TYPE,
   5444                         false_label, lt);
   5445     // Check for callable or undetectable objects => false.
   5446     __ Ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
   5447     EmitTestAndBranch(instr, eq, scratch,
   5448                       (1 << Map::kIsCallable) | (1 << Map::kIsUndetectable));
   5449 
   5450   } else {
   5451     __ B(false_label);
   5452   }
   5453 }
   5454 
   5455 
   5456 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
   5457   __ Ucvtf(ToDoubleRegister(instr->result()), ToRegister32(instr->value()));
   5458 }
   5459 
   5460 
   5461 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
   5462   Register object = ToRegister(instr->value());
   5463   Register map = ToRegister(instr->map());
   5464   Register temp = ToRegister(instr->temp());
   5465   __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
   5466   __ Cmp(map, temp);
   5467   DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
   5468 }
   5469 
   5470 
   5471 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
   5472   Register receiver = ToRegister(instr->receiver());
   5473   Register function = ToRegister(instr->function());
   5474   Register result = ToRegister(instr->result());
   5475 
   5476   // If the receiver is null or undefined, we have to pass the global object as
   5477   // a receiver to normal functions. Values have to be passed unchanged to
   5478   // builtins and strict-mode functions.
   5479   Label global_object, done, copy_receiver;
   5480 
   5481   if (!instr->hydrogen()->known_function()) {
   5482     __ Ldr(result, FieldMemOperand(function,
   5483                                    JSFunction::kSharedFunctionInfoOffset));
   5484 
   5485     // CompilerHints is an int32 field. See objects.h.
   5486     __ Ldr(result.W(),
   5487            FieldMemOperand(result, SharedFunctionInfo::kCompilerHintsOffset));
   5488 
   5489     // Do not transform the receiver to object for strict mode functions.
   5490     __ Tbnz(result, SharedFunctionInfo::kStrictModeFunction, &copy_receiver);
   5491 
   5492     // Do not transform the receiver to object for builtins.
   5493     __ Tbnz(result, SharedFunctionInfo::kNative, &copy_receiver);
   5494   }
   5495 
   5496   // Normal function. Replace undefined or null with global receiver.
   5497   __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &global_object);
   5498   __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
   5499 
   5500   // Deoptimize if the receiver is not a JS object.
   5501   DeoptimizeIfSmi(receiver, instr, DeoptimizeReason::kSmi);
   5502   __ CompareObjectType(receiver, result, result, FIRST_JS_RECEIVER_TYPE);
   5503   __ B(ge, &copy_receiver);
   5504   Deoptimize(instr, DeoptimizeReason::kNotAJavaScriptObject);
   5505 
   5506   __ Bind(&global_object);
   5507   __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
   5508   __ Ldr(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
   5509   __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
   5510   __ B(&done);
   5511 
   5512   __ Bind(&copy_receiver);
   5513   __ Mov(result, receiver);
   5514   __ Bind(&done);
   5515 }
   5516 
   5517 
   5518 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
   5519                                            Register result,
   5520                                            Register object,
   5521                                            Register index) {
   5522   PushSafepointRegistersScope scope(this);
   5523   __ Push(object);
   5524   __ Push(index);
   5525   __ Mov(cp, 0);
   5526   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
   5527   RecordSafepointWithRegisters(
   5528       instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
   5529   __ StoreToSafepointRegisterSlot(x0, result);
   5530 }
   5531 
   5532 
   5533 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
   5534   class DeferredLoadMutableDouble final : public LDeferredCode {
   5535    public:
   5536     DeferredLoadMutableDouble(LCodeGen* codegen,
   5537                               LLoadFieldByIndex* instr,
   5538                               Register result,
   5539                               Register object,
   5540                               Register index)
   5541         : LDeferredCode(codegen),
   5542           instr_(instr),
   5543           result_(result),
   5544           object_(object),
   5545           index_(index) {
   5546     }
   5547     void Generate() override {
   5548       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
   5549     }
   5550     LInstruction* instr() override { return instr_; }
   5551 
   5552    private:
   5553     LLoadFieldByIndex* instr_;
   5554     Register result_;
   5555     Register object_;
   5556     Register index_;
   5557   };
   5558   Register object = ToRegister(instr->object());
   5559   Register index = ToRegister(instr->index());
   5560   Register result = ToRegister(instr->result());
   5561 
   5562   __ AssertSmi(index);
   5563 
   5564   DeferredLoadMutableDouble* deferred;
   5565   deferred = new(zone()) DeferredLoadMutableDouble(
   5566       this, instr, result, object, index);
   5567 
   5568   Label out_of_object, done;
   5569 
   5570   __ TestAndBranchIfAnySet(
   5571       index, reinterpret_cast<uint64_t>(Smi::FromInt(1)), deferred->entry());
   5572   __ Mov(index, Operand(index, ASR, 1));
   5573 
   5574   __ Cmp(index, Smi::kZero);
   5575   __ B(lt, &out_of_object);
   5576 
   5577   STATIC_ASSERT(kPointerSizeLog2 > kSmiTagSize);
   5578   __ Add(result, object, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   5579   __ Ldr(result, FieldMemOperand(result, JSObject::kHeaderSize));
   5580 
   5581   __ B(&done);
   5582 
   5583   __ Bind(&out_of_object);
   5584   __ Ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   5585   // Index is equal to negated out of object property index plus 1.
   5586   __ Sub(result, result, Operand::UntagSmiAndScale(index, kPointerSizeLog2));
   5587   __ Ldr(result, FieldMemOperand(result,
   5588                                  FixedArray::kHeaderSize - kPointerSize));
   5589   __ Bind(deferred->exit());
   5590   __ Bind(&done);
   5591 }
   5592 
   5593 }  // namespace internal
   5594 }  // namespace v8
   5595