Home | History | Annotate | Download | only in arm64
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/compiler/code-generator.h"
      6 
      7 #include "src/arm64/frames-arm64.h"
      8 #include "src/arm64/macro-assembler-arm64.h"
      9 #include "src/ast/scopes.h"
     10 #include "src/compiler/code-generator-impl.h"
     11 #include "src/compiler/gap-resolver.h"
     12 #include "src/compiler/node-matchers.h"
     13 #include "src/compiler/osr.h"
     14 
     15 namespace v8 {
     16 namespace internal {
     17 namespace compiler {
     18 
     19 #define __ masm()->
     20 
     21 
     22 // Adds Arm64-specific methods to convert InstructionOperands.
     23 class Arm64OperandConverter final : public InstructionOperandConverter {
     24  public:
     25   Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
     26       : InstructionOperandConverter(gen, instr) {}
     27 
     28   DoubleRegister InputFloat32Register(size_t index) {
     29     return InputDoubleRegister(index).S();
     30   }
     31 
     32   DoubleRegister InputFloat64Register(size_t index) {
     33     return InputDoubleRegister(index);
     34   }
     35 
     36   CPURegister InputFloat32OrZeroRegister(size_t index) {
     37     if (instr_->InputAt(index)->IsImmediate()) {
     38       DCHECK(bit_cast<int32_t>(InputFloat32(index)) == 0);
     39       return wzr;
     40     }
     41     DCHECK(instr_->InputAt(index)->IsFPRegister());
     42     return InputDoubleRegister(index).S();
     43   }
     44 
     45   CPURegister InputFloat64OrZeroRegister(size_t index) {
     46     if (instr_->InputAt(index)->IsImmediate()) {
     47       DCHECK(bit_cast<int64_t>(InputDouble(index)) == 0);
     48       return xzr;
     49     }
     50     DCHECK(instr_->InputAt(index)->IsDoubleRegister());
     51     return InputDoubleRegister(index);
     52   }
     53 
     54   size_t OutputCount() { return instr_->OutputCount(); }
     55 
     56   DoubleRegister OutputFloat32Register() { return OutputDoubleRegister().S(); }
     57 
     58   DoubleRegister OutputFloat64Register() { return OutputDoubleRegister(); }
     59 
     60   Register InputRegister32(size_t index) {
     61     return ToRegister(instr_->InputAt(index)).W();
     62   }
     63 
     64   Register InputOrZeroRegister32(size_t index) {
     65     DCHECK(instr_->InputAt(index)->IsRegister() ||
     66            (instr_->InputAt(index)->IsImmediate() && (InputInt32(index) == 0)));
     67     if (instr_->InputAt(index)->IsImmediate()) {
     68       return wzr;
     69     }
     70     return InputRegister32(index);
     71   }
     72 
     73   Register InputRegister64(size_t index) { return InputRegister(index); }
     74 
     75   Register InputOrZeroRegister64(size_t index) {
     76     DCHECK(instr_->InputAt(index)->IsRegister() ||
     77            (instr_->InputAt(index)->IsImmediate() && (InputInt64(index) == 0)));
     78     if (instr_->InputAt(index)->IsImmediate()) {
     79       return xzr;
     80     }
     81     return InputRegister64(index);
     82   }
     83 
     84   Operand InputImmediate(size_t index) {
     85     return ToImmediate(instr_->InputAt(index));
     86   }
     87 
     88   Operand InputOperand(size_t index) {
     89     return ToOperand(instr_->InputAt(index));
     90   }
     91 
     92   Operand InputOperand64(size_t index) { return InputOperand(index); }
     93 
     94   Operand InputOperand32(size_t index) {
     95     return ToOperand32(instr_->InputAt(index));
     96   }
     97 
     98   Register OutputRegister64() { return OutputRegister(); }
     99 
    100   Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
    101 
    102   Operand InputOperand2_32(size_t index) {
    103     switch (AddressingModeField::decode(instr_->opcode())) {
    104       case kMode_None:
    105         return InputOperand32(index);
    106       case kMode_Operand2_R_LSL_I:
    107         return Operand(InputRegister32(index), LSL, InputInt5(index + 1));
    108       case kMode_Operand2_R_LSR_I:
    109         return Operand(InputRegister32(index), LSR, InputInt5(index + 1));
    110       case kMode_Operand2_R_ASR_I:
    111         return Operand(InputRegister32(index), ASR, InputInt5(index + 1));
    112       case kMode_Operand2_R_ROR_I:
    113         return Operand(InputRegister32(index), ROR, InputInt5(index + 1));
    114       case kMode_Operand2_R_UXTB:
    115         return Operand(InputRegister32(index), UXTB);
    116       case kMode_Operand2_R_UXTH:
    117         return Operand(InputRegister32(index), UXTH);
    118       case kMode_Operand2_R_SXTB:
    119         return Operand(InputRegister32(index), SXTB);
    120       case kMode_Operand2_R_SXTH:
    121         return Operand(InputRegister32(index), SXTH);
    122       case kMode_MRI:
    123       case kMode_MRR:
    124         break;
    125     }
    126     UNREACHABLE();
    127     return Operand(-1);
    128   }
    129 
    130   Operand InputOperand2_64(size_t index) {
    131     switch (AddressingModeField::decode(instr_->opcode())) {
    132       case kMode_None:
    133         return InputOperand64(index);
    134       case kMode_Operand2_R_LSL_I:
    135         return Operand(InputRegister64(index), LSL, InputInt6(index + 1));
    136       case kMode_Operand2_R_LSR_I:
    137         return Operand(InputRegister64(index), LSR, InputInt6(index + 1));
    138       case kMode_Operand2_R_ASR_I:
    139         return Operand(InputRegister64(index), ASR, InputInt6(index + 1));
    140       case kMode_Operand2_R_ROR_I:
    141         return Operand(InputRegister64(index), ROR, InputInt6(index + 1));
    142       case kMode_Operand2_R_UXTB:
    143         return Operand(InputRegister64(index), UXTB);
    144       case kMode_Operand2_R_UXTH:
    145         return Operand(InputRegister64(index), UXTH);
    146       case kMode_Operand2_R_SXTB:
    147         return Operand(InputRegister64(index), SXTB);
    148       case kMode_Operand2_R_SXTH:
    149         return Operand(InputRegister64(index), SXTH);
    150       case kMode_MRI:
    151       case kMode_MRR:
    152         break;
    153     }
    154     UNREACHABLE();
    155     return Operand(-1);
    156   }
    157 
    158   MemOperand MemoryOperand(size_t* first_index) {
    159     const size_t index = *first_index;
    160     switch (AddressingModeField::decode(instr_->opcode())) {
    161       case kMode_None:
    162       case kMode_Operand2_R_LSR_I:
    163       case kMode_Operand2_R_ASR_I:
    164       case kMode_Operand2_R_ROR_I:
    165       case kMode_Operand2_R_UXTB:
    166       case kMode_Operand2_R_UXTH:
    167       case kMode_Operand2_R_SXTB:
    168       case kMode_Operand2_R_SXTH:
    169         break;
    170       case kMode_Operand2_R_LSL_I:
    171         *first_index += 3;
    172         return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
    173                           LSL, InputInt32(index + 2));
    174       case kMode_MRI:
    175         *first_index += 2;
    176         return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
    177       case kMode_MRR:
    178         *first_index += 2;
    179         return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
    180     }
    181     UNREACHABLE();
    182     return MemOperand(no_reg);
    183   }
    184 
    185   MemOperand MemoryOperand(size_t first_index = 0) {
    186     return MemoryOperand(&first_index);
    187   }
    188 
    189   Operand ToOperand(InstructionOperand* op) {
    190     if (op->IsRegister()) {
    191       return Operand(ToRegister(op));
    192     }
    193     return ToImmediate(op);
    194   }
    195 
    196   Operand ToOperand32(InstructionOperand* op) {
    197     if (op->IsRegister()) {
    198       return Operand(ToRegister(op).W());
    199     }
    200     return ToImmediate(op);
    201   }
    202 
    203   Operand ToImmediate(InstructionOperand* operand) {
    204     Constant constant = ToConstant(operand);
    205     switch (constant.type()) {
    206       case Constant::kInt32:
    207         if (constant.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
    208           return Operand(constant.ToInt32(), constant.rmode());
    209         } else {
    210           return Operand(constant.ToInt32());
    211         }
    212       case Constant::kInt64:
    213         if (constant.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
    214             constant.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
    215           return Operand(constant.ToInt64(), constant.rmode());
    216         } else {
    217           DCHECK(constant.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
    218           return Operand(constant.ToInt64());
    219         }
    220       case Constant::kFloat32:
    221         return Operand(
    222             isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
    223       case Constant::kFloat64:
    224         return Operand(
    225             isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
    226       case Constant::kExternalReference:
    227         return Operand(constant.ToExternalReference());
    228       case Constant::kHeapObject:
    229         return Operand(constant.ToHeapObject());
    230       case Constant::kRpoNumber:
    231         UNREACHABLE();  // TODO(dcarney): RPO immediates on arm64.
    232         break;
    233     }
    234     UNREACHABLE();
    235     return Operand(-1);
    236   }
    237 
    238   MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
    239     DCHECK_NOT_NULL(op);
    240     DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
    241     return SlotToMemOperand(AllocatedOperand::cast(op)->index(), masm);
    242   }
    243 
    244   MemOperand SlotToMemOperand(int slot, MacroAssembler* masm) const {
    245     FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
    246     if (offset.from_frame_pointer()) {
    247       int from_sp = offset.offset() + frame_access_state()->GetSPToFPOffset();
    248       // Convert FP-offsets to SP-offsets if it results in better code.
    249       if (Assembler::IsImmLSUnscaled(from_sp) ||
    250           Assembler::IsImmLSScaled(from_sp, LSDoubleWord)) {
    251         offset = FrameOffset::FromStackPointer(from_sp);
    252       }
    253     }
    254     return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
    255                       offset.offset());
    256   }
    257 };
    258 
    259 
    260 namespace {
    261 
    262 class OutOfLineLoadNaN32 final : public OutOfLineCode {
    263  public:
    264   OutOfLineLoadNaN32(CodeGenerator* gen, DoubleRegister result)
    265       : OutOfLineCode(gen), result_(result) {}
    266 
    267   void Generate() final {
    268     __ Fmov(result_, std::numeric_limits<float>::quiet_NaN());
    269   }
    270 
    271  private:
    272   DoubleRegister const result_;
    273 };
    274 
    275 
    276 class OutOfLineLoadNaN64 final : public OutOfLineCode {
    277  public:
    278   OutOfLineLoadNaN64(CodeGenerator* gen, DoubleRegister result)
    279       : OutOfLineCode(gen), result_(result) {}
    280 
    281   void Generate() final {
    282     __ Fmov(result_, std::numeric_limits<double>::quiet_NaN());
    283   }
    284 
    285  private:
    286   DoubleRegister const result_;
    287 };
    288 
    289 
    290 class OutOfLineLoadZero final : public OutOfLineCode {
    291  public:
    292   OutOfLineLoadZero(CodeGenerator* gen, Register result)
    293       : OutOfLineCode(gen), result_(result) {}
    294 
    295   void Generate() final { __ Mov(result_, 0); }
    296 
    297  private:
    298   Register const result_;
    299 };
    300 
    301 
    302 class OutOfLineRecordWrite final : public OutOfLineCode {
    303  public:
    304   OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand index,
    305                        Register value, Register scratch0, Register scratch1,
    306                        RecordWriteMode mode)
    307       : OutOfLineCode(gen),
    308         object_(object),
    309         index_(index),
    310         value_(value),
    311         scratch0_(scratch0),
    312         scratch1_(scratch1),
    313         mode_(mode),
    314         must_save_lr_(!gen->frame_access_state()->has_frame()) {}
    315 
    316   void Generate() final {
    317     if (mode_ > RecordWriteMode::kValueIsPointer) {
    318       __ JumpIfSmi(value_, exit());
    319     }
    320     __ CheckPageFlagClear(value_, scratch0_,
    321                           MemoryChunk::kPointersToHereAreInterestingMask,
    322                           exit());
    323     RememberedSetAction const remembered_set_action =
    324         mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
    325                                              : OMIT_REMEMBERED_SET;
    326     SaveFPRegsMode const save_fp_mode =
    327         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
    328     if (must_save_lr_) {
    329       // We need to save and restore lr if the frame was elided.
    330       __ Push(lr);
    331     }
    332     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
    333                          remembered_set_action, save_fp_mode);
    334     __ Add(scratch1_, object_, index_);
    335     __ CallStub(&stub);
    336     if (must_save_lr_) {
    337       __ Pop(lr);
    338     }
    339   }
    340 
    341  private:
    342   Register const object_;
    343   Operand const index_;
    344   Register const value_;
    345   Register const scratch0_;
    346   Register const scratch1_;
    347   RecordWriteMode const mode_;
    348   bool must_save_lr_;
    349 };
    350 
    351 
    352 Condition FlagsConditionToCondition(FlagsCondition condition) {
    353   switch (condition) {
    354     case kEqual:
    355       return eq;
    356     case kNotEqual:
    357       return ne;
    358     case kSignedLessThan:
    359       return lt;
    360     case kSignedGreaterThanOrEqual:
    361       return ge;
    362     case kSignedLessThanOrEqual:
    363       return le;
    364     case kSignedGreaterThan:
    365       return gt;
    366     case kUnsignedLessThan:
    367       return lo;
    368     case kUnsignedGreaterThanOrEqual:
    369       return hs;
    370     case kUnsignedLessThanOrEqual:
    371       return ls;
    372     case kUnsignedGreaterThan:
    373       return hi;
    374     case kFloatLessThanOrUnordered:
    375       return lt;
    376     case kFloatGreaterThanOrEqual:
    377       return ge;
    378     case kFloatLessThanOrEqual:
    379       return ls;
    380     case kFloatGreaterThanOrUnordered:
    381       return hi;
    382     case kFloatLessThan:
    383       return lo;
    384     case kFloatGreaterThanOrEqualOrUnordered:
    385       return hs;
    386     case kFloatLessThanOrEqualOrUnordered:
    387       return le;
    388     case kFloatGreaterThan:
    389       return gt;
    390     case kOverflow:
    391       return vs;
    392     case kNotOverflow:
    393       return vc;
    394     case kUnorderedEqual:
    395     case kUnorderedNotEqual:
    396       break;
    397   }
    398   UNREACHABLE();
    399   return nv;
    400 }
    401 
    402 }  // namespace
    403 
    404 #define ASSEMBLE_BOUNDS_CHECK(offset, length, out_of_bounds)   \
    405   do {                                                         \
    406     if (length.IsImmediate() &&                                \
    407         base::bits::IsPowerOfTwo64(length.ImmediateValue())) { \
    408       __ Tst(offset, ~(length.ImmediateValue() - 1));          \
    409       __ B(ne, out_of_bounds);                                 \
    410     } else {                                                   \
    411       __ Cmp(offset, length);                                  \
    412       __ B(hs, out_of_bounds);                                 \
    413     }                                                          \
    414   } while (0)
    415 
    416 #define ASSEMBLE_CHECKED_LOAD_FLOAT(width)                         \
    417   do {                                                             \
    418     auto result = i.OutputFloat##width##Register();                \
    419     auto buffer = i.InputRegister(0);                              \
    420     auto offset = i.InputRegister32(1);                            \
    421     auto length = i.InputOperand32(2);                             \
    422     auto ool = new (zone()) OutOfLineLoadNaN##width(this, result); \
    423     ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry());           \
    424     __ Ldr(result, MemOperand(buffer, offset, UXTW));              \
    425     __ Bind(ool->exit());                                          \
    426   } while (0)
    427 
    428 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)             \
    429   do {                                                       \
    430     auto result = i.OutputRegister32();                      \
    431     auto buffer = i.InputRegister(0);                        \
    432     auto offset = i.InputRegister32(1);                      \
    433     auto length = i.InputOperand32(2);                       \
    434     auto ool = new (zone()) OutOfLineLoadZero(this, result); \
    435     ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry());     \
    436     __ asm_instr(result, MemOperand(buffer, offset, UXTW));  \
    437     __ Bind(ool->exit());                                    \
    438   } while (0)
    439 
    440 #define ASSEMBLE_CHECKED_LOAD_INTEGER_64(asm_instr)          \
    441   do {                                                       \
    442     auto result = i.OutputRegister();                        \
    443     auto buffer = i.InputRegister(0);                        \
    444     auto offset = i.InputRegister32(1);                      \
    445     auto length = i.InputOperand32(2);                       \
    446     auto ool = new (zone()) OutOfLineLoadZero(this, result); \
    447     ASSEMBLE_BOUNDS_CHECK(offset, length, ool->entry());     \
    448     __ asm_instr(result, MemOperand(buffer, offset, UXTW));  \
    449     __ Bind(ool->exit());                                    \
    450   } while (0)
    451 
    452 #define ASSEMBLE_CHECKED_STORE_FLOAT(width)              \
    453   do {                                                   \
    454     auto buffer = i.InputRegister(0);                    \
    455     auto offset = i.InputRegister32(1);                  \
    456     auto length = i.InputOperand32(2);                   \
    457     auto value = i.InputFloat##width##OrZeroRegister(3); \
    458     Label done;                                          \
    459     ASSEMBLE_BOUNDS_CHECK(offset, length, &done);        \
    460     __ Str(value, MemOperand(buffer, offset, UXTW));     \
    461     __ Bind(&done);                                      \
    462   } while (0)
    463 
    464 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)          \
    465   do {                                                     \
    466     auto buffer = i.InputRegister(0);                      \
    467     auto offset = i.InputRegister32(1);                    \
    468     auto length = i.InputOperand32(2);                     \
    469     auto value = i.InputOrZeroRegister32(3);               \
    470     Label done;                                            \
    471     ASSEMBLE_BOUNDS_CHECK(offset, length, &done);          \
    472     __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
    473     __ Bind(&done);                                        \
    474   } while (0)
    475 
    476 #define ASSEMBLE_CHECKED_STORE_INTEGER_64(asm_instr)       \
    477   do {                                                     \
    478     auto buffer = i.InputRegister(0);                      \
    479     auto offset = i.InputRegister32(1);                    \
    480     auto length = i.InputOperand32(2);                     \
    481     auto value = i.InputOrZeroRegister64(3);               \
    482     Label done;                                            \
    483     ASSEMBLE_BOUNDS_CHECK(offset, length, &done);          \
    484     __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
    485     __ Bind(&done);                                        \
    486   } while (0)
    487 
    488 #define ASSEMBLE_SHIFT(asm_instr, width)                                    \
    489   do {                                                                      \
    490     if (instr->InputAt(1)->IsRegister()) {                                  \
    491       __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0),    \
    492                    i.InputRegister##width(1));                              \
    493     } else {                                                                \
    494       uint32_t imm =                                                        \
    495           static_cast<uint32_t>(i.InputOperand##width(1).ImmediateValue()); \
    496       __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0),    \
    497                    imm % (width));                                          \
    498     }                                                                       \
    499   } while (0)
    500 
    501 #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr)                       \
    502   do {                                                                \
    503     __ asm_instr(i.OutputRegister(),                                  \
    504                  MemOperand(i.InputRegister(0), i.InputRegister(1))); \
    505     __ Dmb(InnerShareable, BarrierAll);                               \
    506   } while (0)
    507 
    508 #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr)                      \
    509   do {                                                                \
    510     __ Dmb(InnerShareable, BarrierAll);                               \
    511     __ asm_instr(i.InputRegister(2),                                  \
    512                  MemOperand(i.InputRegister(0), i.InputRegister(1))); \
    513     __ Dmb(InnerShareable, BarrierAll);                               \
    514   } while (0)
    515 
    516 #define ASSEMBLE_IEEE754_BINOP(name)                                          \
    517   do {                                                                        \
    518     FrameScope scope(masm(), StackFrame::MANUAL);                             \
    519     __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
    520                      0, 2);                                                   \
    521   } while (0)
    522 
    523 #define ASSEMBLE_IEEE754_UNOP(name)                                           \
    524   do {                                                                        \
    525     FrameScope scope(masm(), StackFrame::MANUAL);                             \
    526     __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()), \
    527                      0, 1);                                                   \
    528   } while (0)
    529 
    530 void CodeGenerator::AssembleDeconstructFrame() {
    531   const CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
    532   if (descriptor->IsCFunctionCall() || descriptor->UseNativeStack()) {
    533     __ Mov(csp, fp);
    534   } else {
    535     __ Mov(jssp, fp);
    536   }
    537   __ Pop(fp, lr);
    538 }
    539 
    540 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
    541   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
    542   if (sp_slot_delta > 0) {
    543     __ Drop(sp_slot_delta);
    544   }
    545   frame_access_state()->SetFrameAccessToDefault();
    546 }
    547 
    548 
    549 void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
    550   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
    551   if (sp_slot_delta < 0) {
    552     __ Claim(-sp_slot_delta);
    553     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
    554   }
    555   if (frame_access_state()->has_frame()) {
    556     __ Ldr(lr, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
    557     __ Ldr(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
    558   }
    559   frame_access_state()->SetFrameAccessToSP();
    560 }
    561 
    562 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
    563                                                      Register scratch1,
    564                                                      Register scratch2,
    565                                                      Register scratch3) {
    566   DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
    567   Label done;
    568 
    569   // Check if current frame is an arguments adaptor frame.
    570   __ Ldr(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
    571   __ Cmp(scratch1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
    572   __ B(ne, &done);
    573 
    574   // Load arguments count from current arguments adaptor frame (note, it
    575   // does not include receiver).
    576   Register caller_args_count_reg = scratch1;
    577   __ Ldr(caller_args_count_reg,
    578          MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
    579   __ SmiUntag(caller_args_count_reg);
    580 
    581   ParameterCount callee_args_count(args_reg);
    582   __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
    583                         scratch3);
    584   __ bind(&done);
    585 }
    586 
    587 // Assembles an instruction after register allocation, producing machine code.
    588 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
    589     Instruction* instr) {
    590   Arm64OperandConverter i(this, instr);
    591   InstructionCode opcode = instr->opcode();
    592   ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
    593   switch (arch_opcode) {
    594     case kArchCallCodeObject: {
    595       EnsureSpaceForLazyDeopt();
    596       if (instr->InputAt(0)->IsImmediate()) {
    597         __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
    598                 RelocInfo::CODE_TARGET);
    599       } else {
    600         Register target = i.InputRegister(0);
    601         __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
    602         __ Call(target);
    603       }
    604       RecordCallPosition(instr);
    605       // TODO(titzer): this is ugly. JSSP should be a caller-save register
    606       // in this case, but it is not possible to express in the register
    607       // allocator.
    608       CallDescriptor::Flags flags(MiscField::decode(opcode));
    609       if (flags & CallDescriptor::kRestoreJSSP) {
    610         __ Ldr(jssp, MemOperand(csp));
    611         __ Mov(csp, jssp);
    612       }
    613       if (flags & CallDescriptor::kRestoreCSP) {
    614         __ Mov(csp, jssp);
    615         __ AssertCspAligned();
    616       }
    617       frame_access_state()->ClearSPDelta();
    618       break;
    619     }
    620     case kArchTailCallCodeObjectFromJSFunction:
    621     case kArchTailCallCodeObject: {
    622       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
    623       AssembleDeconstructActivationRecord(stack_param_delta);
    624       if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
    625         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
    626                                          i.TempRegister(0), i.TempRegister(1),
    627                                          i.TempRegister(2));
    628       }
    629       if (instr->InputAt(0)->IsImmediate()) {
    630         __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
    631                 RelocInfo::CODE_TARGET);
    632       } else {
    633         Register target = i.InputRegister(0);
    634         __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
    635         __ Jump(target);
    636       }
    637       frame_access_state()->ClearSPDelta();
    638       break;
    639     }
    640     case kArchTailCallAddress: {
    641       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
    642       AssembleDeconstructActivationRecord(stack_param_delta);
    643       CHECK(!instr->InputAt(0)->IsImmediate());
    644       __ Jump(i.InputRegister(0));
    645       frame_access_state()->ClearSPDelta();
    646       break;
    647     }
    648     case kArchCallJSFunction: {
    649       EnsureSpaceForLazyDeopt();
    650       Register func = i.InputRegister(0);
    651       if (FLAG_debug_code) {
    652         // Check the function's context matches the context argument.
    653         UseScratchRegisterScope scope(masm());
    654         Register temp = scope.AcquireX();
    655         __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
    656         __ cmp(cp, temp);
    657         __ Assert(eq, kWrongFunctionContext);
    658       }
    659       __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
    660       __ Call(x10);
    661       RecordCallPosition(instr);
    662       // TODO(titzer): this is ugly. JSSP should be a caller-save register
    663       // in this case, but it is not possible to express in the register
    664       // allocator.
    665       CallDescriptor::Flags flags(MiscField::decode(opcode));
    666       if (flags & CallDescriptor::kRestoreJSSP) {
    667         __ Ldr(jssp, MemOperand(csp));
    668         __ Mov(csp, jssp);
    669       }
    670       if (flags & CallDescriptor::kRestoreCSP) {
    671         __ Mov(csp, jssp);
    672         __ AssertCspAligned();
    673       }
    674       frame_access_state()->ClearSPDelta();
    675       break;
    676     }
    677     case kArchTailCallJSFunctionFromJSFunction:
    678     case kArchTailCallJSFunction: {
    679       Register func = i.InputRegister(0);
    680       if (FLAG_debug_code) {
    681         // Check the function's context matches the context argument.
    682         UseScratchRegisterScope scope(masm());
    683         Register temp = scope.AcquireX();
    684         __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
    685         __ cmp(cp, temp);
    686         __ Assert(eq, kWrongFunctionContext);
    687       }
    688       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
    689       AssembleDeconstructActivationRecord(stack_param_delta);
    690       if (arch_opcode == kArchTailCallJSFunctionFromJSFunction) {
    691         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
    692                                          i.TempRegister(0), i.TempRegister(1),
    693                                          i.TempRegister(2));
    694       }
    695       __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
    696       __ Jump(x10);
    697       frame_access_state()->ClearSPDelta();
    698       break;
    699     }
    700     case kArchPrepareCallCFunction:
    701       // We don't need kArchPrepareCallCFunction on arm64 as the instruction
    702       // selector already perform a Claim to reserve space on the stack and
    703       // guarantee correct alignment of stack pointer.
    704       UNREACHABLE();
    705       break;
    706     case kArchPrepareTailCall:
    707       AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
    708       break;
    709     case kArchCallCFunction: {
    710       int const num_parameters = MiscField::decode(instr->opcode());
    711       if (instr->InputAt(0)->IsImmediate()) {
    712         ExternalReference ref = i.InputExternalReference(0);
    713         __ CallCFunction(ref, num_parameters, 0);
    714       } else {
    715         Register func = i.InputRegister(0);
    716         __ CallCFunction(func, num_parameters, 0);
    717       }
    718       // CallCFunction only supports register arguments so we never need to call
    719       // frame()->ClearOutgoingParameterSlots() here.
    720       DCHECK(frame_access_state()->sp_delta() == 0);
    721       break;
    722     }
    723     case kArchJmp:
    724       AssembleArchJump(i.InputRpo(0));
    725       break;
    726     case kArchTableSwitch:
    727       AssembleArchTableSwitch(instr);
    728       break;
    729     case kArchLookupSwitch:
    730       AssembleArchLookupSwitch(instr);
    731       break;
    732     case kArchDebugBreak:
    733       __ Debug("kArchDebugBreak", 0, BREAK);
    734       break;
    735     case kArchComment: {
    736       Address comment_string = i.InputExternalReference(0).address();
    737       __ RecordComment(reinterpret_cast<const char*>(comment_string));
    738       break;
    739     }
    740     case kArchNop:
    741     case kArchThrowTerminator:
    742       // don't emit code for nops.
    743       break;
    744     case kArchDeoptimize: {
    745       int deopt_state_id =
    746           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
    747       Deoptimizer::BailoutType bailout_type =
    748           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
    749       CodeGenResult result =
    750           AssembleDeoptimizerCall(deopt_state_id, bailout_type);
    751       if (result != kSuccess) return result;
    752       break;
    753     }
    754     case kArchRet:
    755       AssembleReturn();
    756       break;
    757     case kArchStackPointer:
    758       __ mov(i.OutputRegister(), masm()->StackPointer());
    759       break;
    760     case kArchFramePointer:
    761       __ mov(i.OutputRegister(), fp);
    762       break;
    763     case kArchParentFramePointer:
    764       if (frame_access_state()->has_frame()) {
    765         __ ldr(i.OutputRegister(), MemOperand(fp, 0));
    766       } else {
    767         __ mov(i.OutputRegister(), fp);
    768       }
    769       break;
    770     case kArchTruncateDoubleToI:
    771       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
    772       break;
    773     case kArchStoreWithWriteBarrier: {
    774       RecordWriteMode mode =
    775           static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
    776       AddressingMode addressing_mode =
    777           AddressingModeField::decode(instr->opcode());
    778       Register object = i.InputRegister(0);
    779       Operand index(0);
    780       if (addressing_mode == kMode_MRI) {
    781         index = Operand(i.InputInt64(1));
    782       } else {
    783         DCHECK_EQ(addressing_mode, kMode_MRR);
    784         index = Operand(i.InputRegister(1));
    785       }
    786       Register value = i.InputRegister(2);
    787       Register scratch0 = i.TempRegister(0);
    788       Register scratch1 = i.TempRegister(1);
    789       auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value,
    790                                                    scratch0, scratch1, mode);
    791       __ Str(value, MemOperand(object, index));
    792       __ CheckPageFlagSet(object, scratch0,
    793                           MemoryChunk::kPointersFromHereAreInterestingMask,
    794                           ool->entry());
    795       __ Bind(ool->exit());
    796       break;
    797     }
    798     case kArchStackSlot: {
    799       FrameOffset offset =
    800           frame_access_state()->GetFrameOffset(i.InputInt32(0));
    801       Register base;
    802       if (offset.from_stack_pointer()) {
    803         base = __ StackPointer();
    804       } else {
    805         base = fp;
    806       }
    807       __ Add(i.OutputRegister(0), base, Operand(offset.offset()));
    808       break;
    809     }
    810     case kIeee754Float64Atan:
    811       ASSEMBLE_IEEE754_UNOP(atan);
    812       break;
    813     case kIeee754Float64Atan2:
    814       ASSEMBLE_IEEE754_BINOP(atan2);
    815       break;
    816     case kIeee754Float64Cos:
    817       ASSEMBLE_IEEE754_UNOP(cos);
    818       break;
    819     case kIeee754Float64Cbrt:
    820       ASSEMBLE_IEEE754_UNOP(cbrt);
    821       break;
    822     case kIeee754Float64Exp:
    823       ASSEMBLE_IEEE754_UNOP(exp);
    824       break;
    825     case kIeee754Float64Expm1:
    826       ASSEMBLE_IEEE754_UNOP(expm1);
    827       break;
    828     case kIeee754Float64Atanh:
    829       ASSEMBLE_IEEE754_UNOP(atanh);
    830       break;
    831     case kIeee754Float64Log:
    832       ASSEMBLE_IEEE754_UNOP(log);
    833       break;
    834     case kIeee754Float64Log1p:
    835       ASSEMBLE_IEEE754_UNOP(log1p);
    836       break;
    837     case kIeee754Float64Log2:
    838       ASSEMBLE_IEEE754_UNOP(log2);
    839       break;
    840     case kIeee754Float64Log10:
    841       ASSEMBLE_IEEE754_UNOP(log10);
    842       break;
    843     case kIeee754Float64Sin:
    844       ASSEMBLE_IEEE754_UNOP(sin);
    845       break;
    846     case kIeee754Float64Tan:
    847       ASSEMBLE_IEEE754_UNOP(tan);
    848       break;
    849     case kArm64Float32RoundDown:
    850       __ Frintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
    851       break;
    852     case kArm64Float64RoundDown:
    853       __ Frintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
    854       break;
    855     case kArm64Float32RoundUp:
    856       __ Frintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
    857       break;
    858     case kArm64Float64RoundUp:
    859       __ Frintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
    860       break;
    861     case kArm64Float64RoundTiesAway:
    862       __ Frinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
    863       break;
    864     case kArm64Float32RoundTruncate:
    865       __ Frintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
    866       break;
    867     case kArm64Float64RoundTruncate:
    868       __ Frintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
    869       break;
    870     case kArm64Float32RoundTiesEven:
    871       __ Frintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
    872       break;
    873     case kArm64Float64RoundTiesEven:
    874       __ Frintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
    875       break;
    876     case kArm64Add:
    877       if (FlagsModeField::decode(opcode) != kFlags_none) {
    878         __ Adds(i.OutputRegister(), i.InputOrZeroRegister64(0),
    879                 i.InputOperand2_64(1));
    880       } else {
    881       __ Add(i.OutputRegister(), i.InputOrZeroRegister64(0),
    882              i.InputOperand2_64(1));
    883       }
    884       break;
    885     case kArm64Add32:
    886       if (FlagsModeField::decode(opcode) != kFlags_none) {
    887         __ Adds(i.OutputRegister32(), i.InputOrZeroRegister32(0),
    888                 i.InputOperand2_32(1));
    889       } else {
    890         __ Add(i.OutputRegister32(), i.InputOrZeroRegister32(0),
    891                i.InputOperand2_32(1));
    892       }
    893       break;
    894     case kArm64And:
    895       __ And(i.OutputRegister(), i.InputOrZeroRegister64(0),
    896              i.InputOperand2_64(1));
    897       break;
    898     case kArm64And32:
    899       __ And(i.OutputRegister32(), i.InputOrZeroRegister32(0),
    900              i.InputOperand2_32(1));
    901       break;
    902     case kArm64Bic:
    903       __ Bic(i.OutputRegister(), i.InputOrZeroRegister64(0),
    904              i.InputOperand2_64(1));
    905       break;
    906     case kArm64Bic32:
    907       __ Bic(i.OutputRegister32(), i.InputOrZeroRegister32(0),
    908              i.InputOperand2_32(1));
    909       break;
    910     case kArm64Mul:
    911       __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
    912       break;
    913     case kArm64Mul32:
    914       __ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
    915       break;
    916     case kArm64Smull:
    917       __ Smull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
    918       break;
    919     case kArm64Umull:
    920       __ Umull(i.OutputRegister(), i.InputRegister32(0), i.InputRegister32(1));
    921       break;
    922     case kArm64Madd:
    923       __ Madd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
    924               i.InputRegister(2));
    925       break;
    926     case kArm64Madd32:
    927       __ Madd(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1),
    928               i.InputRegister32(2));
    929       break;
    930     case kArm64Msub:
    931       __ Msub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
    932               i.InputRegister(2));
    933       break;
    934     case kArm64Msub32:
    935       __ Msub(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1),
    936               i.InputRegister32(2));
    937       break;
    938     case kArm64Mneg:
    939       __ Mneg(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
    940       break;
    941     case kArm64Mneg32:
    942       __ Mneg(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
    943       break;
    944     case kArm64Idiv:
    945       __ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
    946       break;
    947     case kArm64Idiv32:
    948       __ Sdiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
    949       break;
    950     case kArm64Udiv:
    951       __ Udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
    952       break;
    953     case kArm64Udiv32:
    954       __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
    955       break;
    956     case kArm64Imod: {
    957       UseScratchRegisterScope scope(masm());
    958       Register temp = scope.AcquireX();
    959       __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
    960       __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
    961       break;
    962     }
    963     case kArm64Imod32: {
    964       UseScratchRegisterScope scope(masm());
    965       Register temp = scope.AcquireW();
    966       __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
    967       __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
    968               i.InputRegister32(0));
    969       break;
    970     }
    971     case kArm64Umod: {
    972       UseScratchRegisterScope scope(masm());
    973       Register temp = scope.AcquireX();
    974       __ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
    975       __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
    976       break;
    977     }
    978     case kArm64Umod32: {
    979       UseScratchRegisterScope scope(masm());
    980       Register temp = scope.AcquireW();
    981       __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
    982       __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
    983               i.InputRegister32(0));
    984       break;
    985     }
    986     case kArm64Not:
    987       __ Mvn(i.OutputRegister(), i.InputOperand(0));
    988       break;
    989     case kArm64Not32:
    990       __ Mvn(i.OutputRegister32(), i.InputOperand32(0));
    991       break;
    992     case kArm64Or:
    993       __ Orr(i.OutputRegister(), i.InputOrZeroRegister64(0),
    994              i.InputOperand2_64(1));
    995       break;
    996     case kArm64Or32:
    997       __ Orr(i.OutputRegister32(), i.InputOrZeroRegister32(0),
    998              i.InputOperand2_32(1));
    999       break;
   1000     case kArm64Orn:
   1001       __ Orn(i.OutputRegister(), i.InputOrZeroRegister64(0),
   1002              i.InputOperand2_64(1));
   1003       break;
   1004     case kArm64Orn32:
   1005       __ Orn(i.OutputRegister32(), i.InputOrZeroRegister32(0),
   1006              i.InputOperand2_32(1));
   1007       break;
   1008     case kArm64Eor:
   1009       __ Eor(i.OutputRegister(), i.InputOrZeroRegister64(0),
   1010              i.InputOperand2_64(1));
   1011       break;
   1012     case kArm64Eor32:
   1013       __ Eor(i.OutputRegister32(), i.InputOrZeroRegister32(0),
   1014              i.InputOperand2_32(1));
   1015       break;
   1016     case kArm64Eon:
   1017       __ Eon(i.OutputRegister(), i.InputOrZeroRegister64(0),
   1018              i.InputOperand2_64(1));
   1019       break;
   1020     case kArm64Eon32:
   1021       __ Eon(i.OutputRegister32(), i.InputOrZeroRegister32(0),
   1022              i.InputOperand2_32(1));
   1023       break;
   1024     case kArm64Sub:
   1025       if (FlagsModeField::decode(opcode) != kFlags_none) {
   1026         __ Subs(i.OutputRegister(), i.InputOrZeroRegister64(0),
   1027                 i.InputOperand2_64(1));
   1028       } else {
   1029       __ Sub(i.OutputRegister(), i.InputOrZeroRegister64(0),
   1030              i.InputOperand2_64(1));
   1031       }
   1032       break;
   1033     case kArm64Sub32:
   1034       if (FlagsModeField::decode(opcode) != kFlags_none) {
   1035         __ Subs(i.OutputRegister32(), i.InputOrZeroRegister32(0),
   1036                 i.InputOperand2_32(1));
   1037       } else {
   1038         __ Sub(i.OutputRegister32(), i.InputOrZeroRegister32(0),
   1039                i.InputOperand2_32(1));
   1040       }
   1041       break;
   1042     case kArm64Lsl:
   1043       ASSEMBLE_SHIFT(Lsl, 64);
   1044       break;
   1045     case kArm64Lsl32:
   1046       ASSEMBLE_SHIFT(Lsl, 32);
   1047       break;
   1048     case kArm64Lsr:
   1049       ASSEMBLE_SHIFT(Lsr, 64);
   1050       break;
   1051     case kArm64Lsr32:
   1052       ASSEMBLE_SHIFT(Lsr, 32);
   1053       break;
   1054     case kArm64Asr:
   1055       ASSEMBLE_SHIFT(Asr, 64);
   1056       break;
   1057     case kArm64Asr32:
   1058       ASSEMBLE_SHIFT(Asr, 32);
   1059       break;
   1060     case kArm64Ror:
   1061       ASSEMBLE_SHIFT(Ror, 64);
   1062       break;
   1063     case kArm64Ror32:
   1064       ASSEMBLE_SHIFT(Ror, 32);
   1065       break;
   1066     case kArm64Mov32:
   1067       __ Mov(i.OutputRegister32(), i.InputRegister32(0));
   1068       break;
   1069     case kArm64Sxtb32:
   1070       __ Sxtb(i.OutputRegister32(), i.InputRegister32(0));
   1071       break;
   1072     case kArm64Sxth32:
   1073       __ Sxth(i.OutputRegister32(), i.InputRegister32(0));
   1074       break;
   1075     case kArm64Sxtw:
   1076       __ Sxtw(i.OutputRegister(), i.InputRegister32(0));
   1077       break;
   1078     case kArm64Sbfx32:
   1079       __ Sbfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
   1080               i.InputInt5(2));
   1081       break;
   1082     case kArm64Ubfx:
   1083       __ Ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt6(1),
   1084               i.InputInt6(2));
   1085       break;
   1086     case kArm64Ubfx32:
   1087       __ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
   1088               i.InputInt5(2));
   1089       break;
   1090     case kArm64Ubfiz32:
   1091       __ Ubfiz(i.OutputRegister32(), i.InputRegister32(0), i.InputInt5(1),
   1092                i.InputInt5(2));
   1093       break;
   1094     case kArm64Bfi:
   1095       __ Bfi(i.OutputRegister(), i.InputRegister(1), i.InputInt6(2),
   1096              i.InputInt6(3));
   1097       break;
   1098     case kArm64TestAndBranch32:
   1099     case kArm64TestAndBranch:
   1100       // Pseudo instructions turned into tbz/tbnz in AssembleArchBranch.
   1101       break;
   1102     case kArm64CompareAndBranch32:
   1103     case kArm64CompareAndBranch:
   1104       // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
   1105       break;
   1106     case kArm64ClaimCSP: {
   1107       int count = RoundUp(i.InputInt32(0), 2);
   1108       Register prev = __ StackPointer();
   1109       if (prev.Is(jssp)) {
   1110         // TODO(titzer): make this a macro-assembler method.
   1111         // Align the CSP and store the previous JSSP on the stack.
   1112         UseScratchRegisterScope scope(masm());
   1113         Register tmp = scope.AcquireX();
   1114 
   1115         int sp_alignment = __ ActivationFrameAlignment();
   1116         __ Sub(tmp, jssp, kPointerSize);
   1117         __ And(tmp, tmp, Operand(~static_cast<uint64_t>(sp_alignment - 1)));
   1118         __ Mov(csp, tmp);
   1119         __ Str(jssp, MemOperand(csp));
   1120         if (count > 0) {
   1121           __ SetStackPointer(csp);
   1122           __ Claim(count);
   1123           __ SetStackPointer(prev);
   1124         }
   1125       } else {
   1126         __ AssertCspAligned();
   1127         if (count > 0) {
   1128           __ Claim(count);
   1129           frame_access_state()->IncreaseSPDelta(count);
   1130         }
   1131       }
   1132       break;
   1133     }
   1134     case kArm64ClaimJSSP: {
   1135       int count = i.InputInt32(0);
   1136       if (csp.Is(__ StackPointer())) {
   1137         // No JSSP is set up. Compute it from the CSP.
   1138         __ AssertCspAligned();
   1139         if (count > 0) {
   1140           int even = RoundUp(count, 2);
   1141           __ Sub(jssp, csp, count * kPointerSize);
   1142           __ Sub(csp, csp, even * kPointerSize);  // Must always be aligned.
   1143           frame_access_state()->IncreaseSPDelta(even);
   1144         } else {
   1145           __ Mov(jssp, csp);
   1146         }
   1147       } else {
   1148         // JSSP is the current stack pointer, just use regular Claim().
   1149         __ Claim(count);
   1150         frame_access_state()->IncreaseSPDelta(count);
   1151       }
   1152       break;
   1153     }
   1154     case kArm64PokeCSP:  // fall through
   1155     case kArm64PokeJSSP: {
   1156       Register prev = __ StackPointer();
   1157       __ SetStackPointer(arch_opcode == kArm64PokeCSP ? csp : jssp);
   1158       Operand operand(i.InputInt32(1) * kPointerSize);
   1159       if (instr->InputAt(0)->IsFPRegister()) {
   1160         __ Poke(i.InputFloat64Register(0), operand);
   1161       } else {
   1162         __ Poke(i.InputRegister(0), operand);
   1163       }
   1164       __ SetStackPointer(prev);
   1165       break;
   1166     }
   1167     case kArm64PokePair: {
   1168       int slot = i.InputInt32(2) - 1;
   1169       if (instr->InputAt(0)->IsFPRegister()) {
   1170         __ PokePair(i.InputFloat64Register(1), i.InputFloat64Register(0),
   1171                     slot * kPointerSize);
   1172       } else {
   1173         __ PokePair(i.InputRegister(1), i.InputRegister(0),
   1174                     slot * kPointerSize);
   1175       }
   1176       break;
   1177     }
   1178     case kArm64Clz:
   1179       __ Clz(i.OutputRegister64(), i.InputRegister64(0));
   1180       break;
   1181     case kArm64Clz32:
   1182       __ Clz(i.OutputRegister32(), i.InputRegister32(0));
   1183       break;
   1184     case kArm64Rbit:
   1185       __ Rbit(i.OutputRegister64(), i.InputRegister64(0));
   1186       break;
   1187     case kArm64Rbit32:
   1188       __ Rbit(i.OutputRegister32(), i.InputRegister32(0));
   1189       break;
   1190     case kArm64Cmp:
   1191       __ Cmp(i.InputOrZeroRegister64(0), i.InputOperand(1));
   1192       break;
   1193     case kArm64Cmp32:
   1194       __ Cmp(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
   1195       break;
   1196     case kArm64Cmn:
   1197       __ Cmn(i.InputOrZeroRegister64(0), i.InputOperand(1));
   1198       break;
   1199     case kArm64Cmn32:
   1200       __ Cmn(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
   1201       break;
   1202     case kArm64Tst:
   1203       __ Tst(i.InputRegister(0), i.InputOperand(1));
   1204       break;
   1205     case kArm64Tst32:
   1206       __ Tst(i.InputRegister32(0), i.InputOperand32(1));
   1207       break;
   1208     case kArm64Float32Cmp:
   1209       if (instr->InputAt(1)->IsFPRegister()) {
   1210         __ Fcmp(i.InputFloat32Register(0), i.InputFloat32Register(1));
   1211       } else {
   1212         DCHECK(instr->InputAt(1)->IsImmediate());
   1213         // 0.0 is the only immediate supported by fcmp instructions.
   1214         DCHECK(i.InputFloat32(1) == 0.0f);
   1215         __ Fcmp(i.InputFloat32Register(0), i.InputFloat32(1));
   1216       }
   1217       break;
   1218     case kArm64Float32Add:
   1219       __ Fadd(i.OutputFloat32Register(), i.InputFloat32Register(0),
   1220               i.InputFloat32Register(1));
   1221       break;
   1222     case kArm64Float32Sub:
   1223       __ Fsub(i.OutputFloat32Register(), i.InputFloat32Register(0),
   1224               i.InputFloat32Register(1));
   1225       break;
   1226     case kArm64Float32Mul:
   1227       __ Fmul(i.OutputFloat32Register(), i.InputFloat32Register(0),
   1228               i.InputFloat32Register(1));
   1229       break;
   1230     case kArm64Float32Div:
   1231       __ Fdiv(i.OutputFloat32Register(), i.InputFloat32Register(0),
   1232               i.InputFloat32Register(1));
   1233       break;
   1234     case kArm64Float32Max:
   1235       // (b < a) ? a : b
   1236       __ Fcmp(i.InputFloat32Register(1), i.InputFloat32Register(0));
   1237       __ Fcsel(i.OutputFloat32Register(), i.InputFloat32Register(0),
   1238                i.InputFloat32Register(1), lo);
   1239       break;
   1240     case kArm64Float32Min:
   1241       // (a < b) ? a : b
   1242       __ Fcmp(i.InputFloat32Register(0), i.InputFloat32Register(1));
   1243       __ Fcsel(i.OutputFloat32Register(), i.InputFloat32Register(0),
   1244                i.InputFloat32Register(1), lo);
   1245       break;
   1246     case kArm64Float32Abs:
   1247       __ Fabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
   1248       break;
   1249     case kArm64Float32Neg:
   1250       __ Fneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
   1251       break;
   1252     case kArm64Float32Sqrt:
   1253       __ Fsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
   1254       break;
   1255     case kArm64Float64Cmp:
   1256       if (instr->InputAt(1)->IsFPRegister()) {
   1257         __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
   1258       } else {
   1259         DCHECK(instr->InputAt(1)->IsImmediate());
   1260         // 0.0 is the only immediate supported by fcmp instructions.
   1261         DCHECK(i.InputDouble(1) == 0.0);
   1262         __ Fcmp(i.InputDoubleRegister(0), i.InputDouble(1));
   1263       }
   1264       break;
   1265     case kArm64Float64Add:
   1266       __ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1267               i.InputDoubleRegister(1));
   1268       break;
   1269     case kArm64Float64Sub:
   1270       __ Fsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1271               i.InputDoubleRegister(1));
   1272       break;
   1273     case kArm64Float64Mul:
   1274       __ Fmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1275               i.InputDoubleRegister(1));
   1276       break;
   1277     case kArm64Float64Div:
   1278       __ Fdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1279               i.InputDoubleRegister(1));
   1280       break;
   1281     case kArm64Float64Mod: {
   1282       // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
   1283       FrameScope scope(masm(), StackFrame::MANUAL);
   1284       DCHECK(d0.is(i.InputDoubleRegister(0)));
   1285       DCHECK(d1.is(i.InputDoubleRegister(1)));
   1286       DCHECK(d0.is(i.OutputDoubleRegister()));
   1287       // TODO(dcarney): make sure this saves all relevant registers.
   1288       __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
   1289                        0, 2);
   1290       break;
   1291     }
   1292     case kArm64Float64Max:
   1293       // (b < a) ? a : b
   1294       __ Fcmp(i.InputDoubleRegister(1), i.InputDoubleRegister(0));
   1295       __ Fcsel(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1296                i.InputDoubleRegister(1), lo);
   1297       break;
   1298     case kArm64Float64Min:
   1299       // (a < b) ? a : b
   1300       __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
   1301       __ Fcsel(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1302                i.InputDoubleRegister(1), lo);
   1303       break;
   1304     case kArm64Float64Abs:
   1305       __ Fabs(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1306       break;
   1307     case kArm64Float64Neg:
   1308       __ Fneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1309       break;
   1310     case kArm64Float64Sqrt:
   1311       __ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1312       break;
   1313     case kArm64Float32ToFloat64:
   1314       __ Fcvt(i.OutputDoubleRegister(), i.InputDoubleRegister(0).S());
   1315       break;
   1316     case kArm64Float64ToFloat32:
   1317       __ Fcvt(i.OutputDoubleRegister().S(), i.InputDoubleRegister(0));
   1318       break;
   1319     case kArm64Float32ToInt32:
   1320       __ Fcvtzs(i.OutputRegister32(), i.InputFloat32Register(0));
   1321       break;
   1322     case kArm64Float64ToInt32:
   1323       __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
   1324       break;
   1325     case kArm64Float32ToUint32:
   1326       __ Fcvtzu(i.OutputRegister32(), i.InputFloat32Register(0));
   1327       break;
   1328     case kArm64Float64ToUint32:
   1329       __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
   1330       break;
   1331     case kArm64Float32ToInt64:
   1332       __ Fcvtzs(i.OutputRegister64(), i.InputFloat32Register(0));
   1333       if (i.OutputCount() > 1) {
   1334         __ Mov(i.OutputRegister(1), 1);
   1335         Label done;
   1336         __ Cmp(i.OutputRegister(0), 1);
   1337         __ Ccmp(i.OutputRegister(0), -1, VFlag, vc);
   1338         __ Fccmp(i.InputFloat32Register(0), i.InputFloat32Register(0), VFlag,
   1339                  vc);
   1340         __ B(vc, &done);
   1341         __ Fcmp(i.InputFloat32Register(0), static_cast<float>(INT64_MIN));
   1342         __ Cset(i.OutputRegister(1), eq);
   1343         __ Bind(&done);
   1344       }
   1345       break;
   1346     case kArm64Float64ToInt64:
   1347       __ Fcvtzs(i.OutputRegister(0), i.InputDoubleRegister(0));
   1348       if (i.OutputCount() > 1) {
   1349         __ Mov(i.OutputRegister(1), 1);
   1350         Label done;
   1351         __ Cmp(i.OutputRegister(0), 1);
   1352         __ Ccmp(i.OutputRegister(0), -1, VFlag, vc);
   1353         __ Fccmp(i.InputDoubleRegister(0), i.InputDoubleRegister(0), VFlag, vc);
   1354         __ B(vc, &done);
   1355         __ Fcmp(i.InputDoubleRegister(0), static_cast<double>(INT64_MIN));
   1356         __ Cset(i.OutputRegister(1), eq);
   1357         __ Bind(&done);
   1358       }
   1359       break;
   1360     case kArm64Float32ToUint64:
   1361       __ Fcvtzu(i.OutputRegister64(), i.InputFloat32Register(0));
   1362       if (i.OutputCount() > 1) {
   1363         __ Fcmp(i.InputFloat32Register(0), -1.0);
   1364         __ Ccmp(i.OutputRegister(0), -1, ZFlag, gt);
   1365         __ Cset(i.OutputRegister(1), ne);
   1366       }
   1367       break;
   1368     case kArm64Float64ToUint64:
   1369       __ Fcvtzu(i.OutputRegister64(), i.InputDoubleRegister(0));
   1370       if (i.OutputCount() > 1) {
   1371         __ Fcmp(i.InputDoubleRegister(0), -1.0);
   1372         __ Ccmp(i.OutputRegister(0), -1, ZFlag, gt);
   1373         __ Cset(i.OutputRegister(1), ne);
   1374       }
   1375       break;
   1376     case kArm64Int32ToFloat32:
   1377       __ Scvtf(i.OutputFloat32Register(), i.InputRegister32(0));
   1378       break;
   1379     case kArm64Int32ToFloat64:
   1380       __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
   1381       break;
   1382     case kArm64Int64ToFloat32:
   1383       __ Scvtf(i.OutputDoubleRegister().S(), i.InputRegister64(0));
   1384       break;
   1385     case kArm64Int64ToFloat64:
   1386       __ Scvtf(i.OutputDoubleRegister(), i.InputRegister64(0));
   1387       break;
   1388     case kArm64Uint32ToFloat32:
   1389       __ Ucvtf(i.OutputFloat32Register(), i.InputRegister32(0));
   1390       break;
   1391     case kArm64Uint32ToFloat64:
   1392       __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
   1393       break;
   1394     case kArm64Uint64ToFloat32:
   1395       __ Ucvtf(i.OutputDoubleRegister().S(), i.InputRegister64(0));
   1396       break;
   1397     case kArm64Uint64ToFloat64:
   1398       __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister64(0));
   1399       break;
   1400     case kArm64Float64ExtractLowWord32:
   1401       __ Fmov(i.OutputRegister32(), i.InputFloat32Register(0));
   1402       break;
   1403     case kArm64Float64ExtractHighWord32:
   1404       // TODO(arm64): This should use MOV (to general) when NEON is supported.
   1405       __ Fmov(i.OutputRegister(), i.InputFloat64Register(0));
   1406       __ Lsr(i.OutputRegister(), i.OutputRegister(), 32);
   1407       break;
   1408     case kArm64Float64InsertLowWord32: {
   1409       // TODO(arm64): This should use MOV (from general) when NEON is supported.
   1410       UseScratchRegisterScope scope(masm());
   1411       Register tmp = scope.AcquireX();
   1412       __ Fmov(tmp, i.InputFloat64Register(0));
   1413       __ Bfi(tmp, i.InputRegister(1), 0, 32);
   1414       __ Fmov(i.OutputFloat64Register(), tmp);
   1415       break;
   1416     }
   1417     case kArm64Float64InsertHighWord32: {
   1418       // TODO(arm64): This should use MOV (from general) when NEON is supported.
   1419       UseScratchRegisterScope scope(masm());
   1420       Register tmp = scope.AcquireX();
   1421       __ Fmov(tmp.W(), i.InputFloat32Register(0));
   1422       __ Bfi(tmp, i.InputRegister(1), 32, 32);
   1423       __ Fmov(i.OutputFloat64Register(), tmp);
   1424       break;
   1425     }
   1426     case kArm64Float64MoveU64:
   1427       __ Fmov(i.OutputFloat64Register(), i.InputRegister(0));
   1428       break;
   1429     case kArm64Float64SilenceNaN:
   1430       __ CanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1431       break;
   1432     case kArm64U64MoveFloat64:
   1433       __ Fmov(i.OutputRegister(), i.InputDoubleRegister(0));
   1434       break;
   1435     case kArm64Ldrb:
   1436       __ Ldrb(i.OutputRegister(), i.MemoryOperand());
   1437       break;
   1438     case kArm64Ldrsb:
   1439       __ Ldrsb(i.OutputRegister(), i.MemoryOperand());
   1440       break;
   1441     case kArm64Strb:
   1442       __ Strb(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
   1443       break;
   1444     case kArm64Ldrh:
   1445       __ Ldrh(i.OutputRegister(), i.MemoryOperand());
   1446       break;
   1447     case kArm64Ldrsh:
   1448       __ Ldrsh(i.OutputRegister(), i.MemoryOperand());
   1449       break;
   1450     case kArm64Strh:
   1451       __ Strh(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
   1452       break;
   1453     case kArm64LdrW:
   1454       __ Ldr(i.OutputRegister32(), i.MemoryOperand());
   1455       break;
   1456     case kArm64StrW:
   1457       __ Str(i.InputOrZeroRegister32(0), i.MemoryOperand(1));
   1458       break;
   1459     case kArm64Ldr:
   1460       __ Ldr(i.OutputRegister(), i.MemoryOperand());
   1461       break;
   1462     case kArm64Str:
   1463       __ Str(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
   1464       break;
   1465     case kArm64LdrS:
   1466       __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
   1467       break;
   1468     case kArm64StrS:
   1469       __ Str(i.InputFloat32OrZeroRegister(0), i.MemoryOperand(1));
   1470       break;
   1471     case kArm64LdrD:
   1472       __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
   1473       break;
   1474     case kArm64StrD:
   1475       __ Str(i.InputFloat64OrZeroRegister(0), i.MemoryOperand(1));
   1476       break;
   1477     case kCheckedLoadInt8:
   1478       ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
   1479       break;
   1480     case kCheckedLoadUint8:
   1481       ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrb);
   1482       break;
   1483     case kCheckedLoadInt16:
   1484       ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsh);
   1485       break;
   1486     case kCheckedLoadUint16:
   1487       ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrh);
   1488       break;
   1489     case kCheckedLoadWord32:
   1490       ASSEMBLE_CHECKED_LOAD_INTEGER(Ldr);
   1491       break;
   1492     case kCheckedLoadWord64:
   1493       ASSEMBLE_CHECKED_LOAD_INTEGER_64(Ldr);
   1494       break;
   1495     case kCheckedLoadFloat32:
   1496       ASSEMBLE_CHECKED_LOAD_FLOAT(32);
   1497       break;
   1498     case kCheckedLoadFloat64:
   1499       ASSEMBLE_CHECKED_LOAD_FLOAT(64);
   1500       break;
   1501     case kCheckedStoreWord8:
   1502       ASSEMBLE_CHECKED_STORE_INTEGER(Strb);
   1503       break;
   1504     case kCheckedStoreWord16:
   1505       ASSEMBLE_CHECKED_STORE_INTEGER(Strh);
   1506       break;
   1507     case kCheckedStoreWord32:
   1508       ASSEMBLE_CHECKED_STORE_INTEGER(Str);
   1509       break;
   1510     case kCheckedStoreWord64:
   1511       ASSEMBLE_CHECKED_STORE_INTEGER_64(Str);
   1512       break;
   1513     case kCheckedStoreFloat32:
   1514       ASSEMBLE_CHECKED_STORE_FLOAT(32);
   1515       break;
   1516     case kCheckedStoreFloat64:
   1517       ASSEMBLE_CHECKED_STORE_FLOAT(64);
   1518       break;
   1519     case kAtomicLoadInt8:
   1520       ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrsb);
   1521       break;
   1522     case kAtomicLoadUint8:
   1523       ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrb);
   1524       break;
   1525     case kAtomicLoadInt16:
   1526       ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrsh);
   1527       break;
   1528     case kAtomicLoadUint16:
   1529       ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldrh);
   1530       break;
   1531     case kAtomicLoadWord32:
   1532       __ Ldr(i.OutputRegister32(),
   1533              MemOperand(i.InputRegister(0), i.InputRegister(1)));
   1534       __ Dmb(InnerShareable, BarrierAll);
   1535       break;
   1536     case kAtomicStoreWord8:
   1537       ASSEMBLE_ATOMIC_STORE_INTEGER(Strb);
   1538       break;
   1539     case kAtomicStoreWord16:
   1540       ASSEMBLE_ATOMIC_STORE_INTEGER(Strh);
   1541       break;
   1542     case kAtomicStoreWord32:
   1543       __ Dmb(InnerShareable, BarrierAll);
   1544       __ Str(i.InputRegister32(2),
   1545              MemOperand(i.InputRegister(0), i.InputRegister(1)));
   1546       __ Dmb(InnerShareable, BarrierAll);
   1547       break;
   1548   }
   1549   return kSuccess;
   1550 }  // NOLINT(readability/fn_size)
   1551 
   1552 
   1553 // Assemble branches after this instruction.
   1554 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
   1555   Arm64OperandConverter i(this, instr);
   1556   Label* tlabel = branch->true_label;
   1557   Label* flabel = branch->false_label;
   1558   FlagsCondition condition = branch->condition;
   1559   ArchOpcode opcode = instr->arch_opcode();
   1560 
   1561   if (opcode == kArm64CompareAndBranch32) {
   1562     switch (condition) {
   1563       case kEqual:
   1564         __ Cbz(i.InputRegister32(0), tlabel);
   1565         break;
   1566       case kNotEqual:
   1567         __ Cbnz(i.InputRegister32(0), tlabel);
   1568         break;
   1569       default:
   1570         UNREACHABLE();
   1571     }
   1572   } else if (opcode == kArm64CompareAndBranch) {
   1573     switch (condition) {
   1574       case kEqual:
   1575         __ Cbz(i.InputRegister64(0), tlabel);
   1576         break;
   1577       case kNotEqual:
   1578         __ Cbnz(i.InputRegister64(0), tlabel);
   1579         break;
   1580       default:
   1581         UNREACHABLE();
   1582     }
   1583   } else if (opcode == kArm64TestAndBranch32) {
   1584     switch (condition) {
   1585       case kEqual:
   1586         __ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel);
   1587         break;
   1588       case kNotEqual:
   1589         __ Tbnz(i.InputRegister32(0), i.InputInt5(1), tlabel);
   1590         break;
   1591       default:
   1592         UNREACHABLE();
   1593     }
   1594   } else if (opcode == kArm64TestAndBranch) {
   1595     switch (condition) {
   1596       case kEqual:
   1597         __ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel);
   1598         break;
   1599       case kNotEqual:
   1600         __ Tbnz(i.InputRegister64(0), i.InputInt6(1), tlabel);
   1601         break;
   1602       default:
   1603         UNREACHABLE();
   1604     }
   1605   } else {
   1606     Condition cc = FlagsConditionToCondition(condition);
   1607     __ B(cc, tlabel);
   1608   }
   1609   if (!branch->fallthru) __ B(flabel);  // no fallthru to flabel.
   1610 }
   1611 
   1612 
   1613 void CodeGenerator::AssembleArchJump(RpoNumber target) {
   1614   if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target));
   1615 }
   1616 
   1617 
   1618 // Assemble boolean materializations after this instruction.
   1619 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
   1620                                         FlagsCondition condition) {
   1621   Arm64OperandConverter i(this, instr);
   1622 
   1623   // Materialize a full 64-bit 1 or 0 value. The result register is always the
   1624   // last output of the instruction.
   1625   DCHECK_NE(0u, instr->OutputCount());
   1626   Register reg = i.OutputRegister(instr->OutputCount() - 1);
   1627   Condition cc = FlagsConditionToCondition(condition);
   1628   __ Cset(reg, cc);
   1629 }
   1630 
   1631 
   1632 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
   1633   Arm64OperandConverter i(this, instr);
   1634   Register input = i.InputRegister32(0);
   1635   for (size_t index = 2; index < instr->InputCount(); index += 2) {
   1636     __ Cmp(input, i.InputInt32(index + 0));
   1637     __ B(eq, GetLabel(i.InputRpo(index + 1)));
   1638   }
   1639   AssembleArchJump(i.InputRpo(1));
   1640 }
   1641 
   1642 
   1643 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
   1644   Arm64OperandConverter i(this, instr);
   1645   UseScratchRegisterScope scope(masm());
   1646   Register input = i.InputRegister32(0);
   1647   Register temp = scope.AcquireX();
   1648   size_t const case_count = instr->InputCount() - 2;
   1649   Label table;
   1650   __ Cmp(input, case_count);
   1651   __ B(hs, GetLabel(i.InputRpo(1)));
   1652   __ Adr(temp, &table);
   1653   __ Add(temp, temp, Operand(input, UXTW, 2));
   1654   __ Br(temp);
   1655   __ StartBlockPools();
   1656   __ Bind(&table);
   1657   for (size_t index = 0; index < case_count; ++index) {
   1658     __ B(GetLabel(i.InputRpo(index + 2)));
   1659   }
   1660   __ EndBlockPools();
   1661 }
   1662 
   1663 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
   1664     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
   1665   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
   1666       isolate(), deoptimization_id, bailout_type);
   1667   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   1668   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   1669   return kSuccess;
   1670 }
   1671 
   1672 void CodeGenerator::FinishFrame(Frame* frame) {
   1673   frame->AlignFrame(16);
   1674   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   1675 
   1676   if (descriptor->UseNativeStack() || descriptor->IsCFunctionCall()) {
   1677     __ SetStackPointer(csp);
   1678   } else {
   1679     __ SetStackPointer(jssp);
   1680   }
   1681 
   1682   // Save FP registers.
   1683   CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
   1684                                    descriptor->CalleeSavedFPRegisters());
   1685   int saved_count = saves_fp.Count();
   1686   if (saved_count != 0) {
   1687     DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
   1688     frame->AllocateSavedCalleeRegisterSlots(saved_count *
   1689                                             (kDoubleSize / kPointerSize));
   1690   }
   1691 
   1692   CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
   1693                                 descriptor->CalleeSavedRegisters());
   1694   saved_count = saves.Count();
   1695   if (saved_count != 0) {
   1696     frame->AllocateSavedCalleeRegisterSlots(saved_count);
   1697   }
   1698 }
   1699 
   1700 void CodeGenerator::AssembleConstructFrame() {
   1701   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   1702   if (descriptor->UseNativeStack()) {
   1703     __ AssertCspAligned();
   1704   }
   1705 
   1706   if (frame_access_state()->has_frame()) {
   1707     if (descriptor->IsJSFunctionCall()) {
   1708       DCHECK(!descriptor->UseNativeStack());
   1709       __ Prologue(this->info()->GeneratePreagedPrologue());
   1710     } else {
   1711       if (descriptor->IsCFunctionCall()) {
   1712         __ Push(lr, fp);
   1713         __ Mov(fp, masm_.StackPointer());
   1714         __ Claim(frame()->GetSpillSlotCount());
   1715       } else {
   1716         __ StubPrologue(info()->GetOutputStackFrameType(),
   1717                         frame()->GetTotalFrameSlotCount());
   1718       }
   1719     }
   1720   }
   1721 
   1722   int shrink_slots = frame()->GetSpillSlotCount();
   1723 
   1724   if (info()->is_osr()) {
   1725     // TurboFan OSR-compiled functions cannot be entered directly.
   1726     __ Abort(kShouldNotDirectlyEnterOsrFunction);
   1727 
   1728     // Unoptimized code jumps directly to this entrypoint while the unoptimized
   1729     // frame is still on the stack. Optimized code uses OSR values directly from
   1730     // the unoptimized frame. Thus, all that needs to be done is to allocate the
   1731     // remaining stack slots.
   1732     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
   1733     osr_pc_offset_ = __ pc_offset();
   1734     shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   1735   }
   1736 
   1737   if (descriptor->IsJSFunctionCall()) {
   1738     __ Claim(shrink_slots);
   1739   }
   1740 
   1741   // Save FP registers.
   1742   CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
   1743                                    descriptor->CalleeSavedFPRegisters());
   1744   int saved_count = saves_fp.Count();
   1745   if (saved_count != 0) {
   1746     DCHECK(saves_fp.list() == CPURegList::GetCalleeSavedFP().list());
   1747     __ PushCPURegList(saves_fp);
   1748   }
   1749   // Save registers.
   1750   // TODO(palfia): TF save list is not in sync with
   1751   // CPURegList::GetCalleeSaved(): x30 is missing.
   1752   // DCHECK(saves.list() == CPURegList::GetCalleeSaved().list());
   1753   CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
   1754                                 descriptor->CalleeSavedRegisters());
   1755   saved_count = saves.Count();
   1756   if (saved_count != 0) {
   1757     __ PushCPURegList(saves);
   1758   }
   1759 }
   1760 
   1761 
   1762 void CodeGenerator::AssembleReturn() {
   1763   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   1764 
   1765   // Restore registers.
   1766   CPURegList saves = CPURegList(CPURegister::kRegister, kXRegSizeInBits,
   1767                                 descriptor->CalleeSavedRegisters());
   1768   if (saves.Count() != 0) {
   1769     __ PopCPURegList(saves);
   1770   }
   1771 
   1772   // Restore fp registers.
   1773   CPURegList saves_fp = CPURegList(CPURegister::kFPRegister, kDRegSizeInBits,
   1774                                    descriptor->CalleeSavedFPRegisters());
   1775   if (saves_fp.Count() != 0) {
   1776     __ PopCPURegList(saves_fp);
   1777   }
   1778 
   1779   int pop_count = static_cast<int>(descriptor->StackParameterCount());
   1780   if (descriptor->IsCFunctionCall()) {
   1781     AssembleDeconstructFrame();
   1782   } else if (frame_access_state()->has_frame()) {
   1783     // Canonicalize JSFunction return sites for now.
   1784     if (return_label_.is_bound()) {
   1785       __ B(&return_label_);
   1786       return;
   1787     } else {
   1788       __ Bind(&return_label_);
   1789       AssembleDeconstructFrame();
   1790       if (descriptor->UseNativeStack()) {
   1791         pop_count += (pop_count & 1);  // align
   1792       }
   1793     }
   1794   } else if (descriptor->UseNativeStack()) {
   1795     pop_count += (pop_count & 1);  // align
   1796   }
   1797   __ Drop(pop_count);
   1798 
   1799   if (descriptor->UseNativeStack()) {
   1800     __ AssertCspAligned();
   1801   }
   1802   __ Ret();
   1803 }
   1804 
   1805 
   1806 void CodeGenerator::AssembleMove(InstructionOperand* source,
   1807                                  InstructionOperand* destination) {
   1808   Arm64OperandConverter g(this, nullptr);
   1809   // Dispatch on the source and destination operand kinds.  Not all
   1810   // combinations are possible.
   1811   if (source->IsRegister()) {
   1812     DCHECK(destination->IsRegister() || destination->IsStackSlot());
   1813     Register src = g.ToRegister(source);
   1814     if (destination->IsRegister()) {
   1815       __ Mov(g.ToRegister(destination), src);
   1816     } else {
   1817       __ Str(src, g.ToMemOperand(destination, masm()));
   1818     }
   1819   } else if (source->IsStackSlot()) {
   1820     MemOperand src = g.ToMemOperand(source, masm());
   1821     DCHECK(destination->IsRegister() || destination->IsStackSlot());
   1822     if (destination->IsRegister()) {
   1823       __ Ldr(g.ToRegister(destination), src);
   1824     } else {
   1825       UseScratchRegisterScope scope(masm());
   1826       Register temp = scope.AcquireX();
   1827       __ Ldr(temp, src);
   1828       __ Str(temp, g.ToMemOperand(destination, masm()));
   1829     }
   1830   } else if (source->IsConstant()) {
   1831     Constant src = g.ToConstant(ConstantOperand::cast(source));
   1832     if (destination->IsRegister() || destination->IsStackSlot()) {
   1833       UseScratchRegisterScope scope(masm());
   1834       Register dst = destination->IsRegister() ? g.ToRegister(destination)
   1835                                                : scope.AcquireX();
   1836       if (src.type() == Constant::kHeapObject) {
   1837         Handle<HeapObject> src_object = src.ToHeapObject();
   1838         Heap::RootListIndex index;
   1839         int slot;
   1840         if (IsMaterializableFromFrame(src_object, &slot)) {
   1841           __ Ldr(dst, g.SlotToMemOperand(slot, masm()));
   1842         } else if (IsMaterializableFromRoot(src_object, &index)) {
   1843           __ LoadRoot(dst, index);
   1844         } else {
   1845           __ LoadObject(dst, src_object);
   1846         }
   1847       } else {
   1848         __ Mov(dst, g.ToImmediate(source));
   1849       }
   1850       if (destination->IsStackSlot()) {
   1851         __ Str(dst, g.ToMemOperand(destination, masm()));
   1852       }
   1853     } else if (src.type() == Constant::kFloat32) {
   1854       if (destination->IsFPRegister()) {
   1855         FPRegister dst = g.ToDoubleRegister(destination).S();
   1856         __ Fmov(dst, src.ToFloat32());
   1857       } else {
   1858         DCHECK(destination->IsFPStackSlot());
   1859         UseScratchRegisterScope scope(masm());
   1860         FPRegister temp = scope.AcquireS();
   1861         __ Fmov(temp, src.ToFloat32());
   1862         __ Str(temp, g.ToMemOperand(destination, masm()));
   1863       }
   1864     } else {
   1865       DCHECK_EQ(Constant::kFloat64, src.type());
   1866       if (destination->IsFPRegister()) {
   1867         FPRegister dst = g.ToDoubleRegister(destination);
   1868         __ Fmov(dst, src.ToFloat64());
   1869       } else {
   1870         DCHECK(destination->IsFPStackSlot());
   1871         UseScratchRegisterScope scope(masm());
   1872         FPRegister temp = scope.AcquireD();
   1873         __ Fmov(temp, src.ToFloat64());
   1874         __ Str(temp, g.ToMemOperand(destination, masm()));
   1875       }
   1876     }
   1877   } else if (source->IsFPRegister()) {
   1878     FPRegister src = g.ToDoubleRegister(source);
   1879     if (destination->IsFPRegister()) {
   1880       FPRegister dst = g.ToDoubleRegister(destination);
   1881       __ Fmov(dst, src);
   1882     } else {
   1883       DCHECK(destination->IsFPStackSlot());
   1884       __ Str(src, g.ToMemOperand(destination, masm()));
   1885     }
   1886   } else if (source->IsFPStackSlot()) {
   1887     DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
   1888     MemOperand src = g.ToMemOperand(source, masm());
   1889     if (destination->IsFPRegister()) {
   1890       __ Ldr(g.ToDoubleRegister(destination), src);
   1891     } else {
   1892       UseScratchRegisterScope scope(masm());
   1893       FPRegister temp = scope.AcquireD();
   1894       __ Ldr(temp, src);
   1895       __ Str(temp, g.ToMemOperand(destination, masm()));
   1896     }
   1897   } else {
   1898     UNREACHABLE();
   1899   }
   1900 }
   1901 
   1902 
   1903 void CodeGenerator::AssembleSwap(InstructionOperand* source,
   1904                                  InstructionOperand* destination) {
   1905   Arm64OperandConverter g(this, nullptr);
   1906   // Dispatch on the source and destination operand kinds.  Not all
   1907   // combinations are possible.
   1908   if (source->IsRegister()) {
   1909     // Register-register.
   1910     UseScratchRegisterScope scope(masm());
   1911     Register temp = scope.AcquireX();
   1912     Register src = g.ToRegister(source);
   1913     if (destination->IsRegister()) {
   1914       Register dst = g.ToRegister(destination);
   1915       __ Mov(temp, src);
   1916       __ Mov(src, dst);
   1917       __ Mov(dst, temp);
   1918     } else {
   1919       DCHECK(destination->IsStackSlot());
   1920       MemOperand dst = g.ToMemOperand(destination, masm());
   1921       __ Mov(temp, src);
   1922       __ Ldr(src, dst);
   1923       __ Str(temp, dst);
   1924     }
   1925   } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
   1926     UseScratchRegisterScope scope(masm());
   1927     DoubleRegister temp_0 = scope.AcquireD();
   1928     DoubleRegister temp_1 = scope.AcquireD();
   1929     MemOperand src = g.ToMemOperand(source, masm());
   1930     MemOperand dst = g.ToMemOperand(destination, masm());
   1931     __ Ldr(temp_0, src);
   1932     __ Ldr(temp_1, dst);
   1933     __ Str(temp_0, dst);
   1934     __ Str(temp_1, src);
   1935   } else if (source->IsFPRegister()) {
   1936     UseScratchRegisterScope scope(masm());
   1937     FPRegister temp = scope.AcquireD();
   1938     FPRegister src = g.ToDoubleRegister(source);
   1939     if (destination->IsFPRegister()) {
   1940       FPRegister dst = g.ToDoubleRegister(destination);
   1941       __ Fmov(temp, src);
   1942       __ Fmov(src, dst);
   1943       __ Fmov(dst, temp);
   1944     } else {
   1945       DCHECK(destination->IsFPStackSlot());
   1946       MemOperand dst = g.ToMemOperand(destination, masm());
   1947       __ Fmov(temp, src);
   1948       __ Ldr(src, dst);
   1949       __ Str(temp, dst);
   1950     }
   1951   } else {
   1952     // No other combinations are possible.
   1953     UNREACHABLE();
   1954   }
   1955 }
   1956 
   1957 
   1958 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
   1959   // On 64-bit ARM we emit the jump tables inline.
   1960   UNREACHABLE();
   1961 }
   1962 
   1963 
   1964 void CodeGenerator::EnsureSpaceForLazyDeopt() {
   1965   if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
   1966     return;
   1967   }
   1968 
   1969   int space_needed = Deoptimizer::patch_size();
   1970   // Ensure that we have enough space after the previous lazy-bailout
   1971   // instruction for patching the code here.
   1972   intptr_t current_pc = masm()->pc_offset();
   1973 
   1974   if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
   1975     intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
   1976     DCHECK((padding_size % kInstructionSize) == 0);
   1977     InstructionAccurateScope instruction_accurate(
   1978         masm(), padding_size / kInstructionSize);
   1979 
   1980     while (padding_size > 0) {
   1981       __ nop();
   1982       padding_size -= kInstructionSize;
   1983     }
   1984   }
   1985 }
   1986 
   1987 #undef __
   1988 
   1989 }  // namespace compiler
   1990 }  // namespace internal
   1991 }  // namespace v8
   1992