Home | History | Annotate | Download | only in arm64
      1 // Copyright 2013 the V8 project authors. All rights reserved.
      2 //
      3 // Redistribution and use in source and binary forms, with or without
      4 // modification, are permitted provided that the following conditions are
      5 // met:
      6 //
      7 //     * Redistributions of source code must retain the above copyright
      8 //       notice, this list of conditions and the following disclaimer.
      9 //     * Redistributions in binary form must reproduce the above
     10 //       copyright notice, this list of conditions and the following
     11 //       disclaimer in the documentation and/or other materials provided
     12 //       with the distribution.
     13 //     * Neither the name of Google Inc. nor the names of its
     14 //       contributors may be used to endorse or promote products derived
     15 //       from this software without specific prior written permission.
     16 //
     17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     18 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     19 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     20 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     21 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     22 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     23 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     24 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     25 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     26 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     27 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     28 
     29 #if V8_TARGET_ARCH_ARM64
     30 
     31 #define ARM64_DEFINE_REG_STATICS
     32 #include "src/arm64/assembler-arm64.h"
     33 
     34 #include "src/arm64/assembler-arm64-inl.h"
     35 #include "src/arm64/frames-arm64.h"
     36 #include "src/base/bits.h"
     37 #include "src/base/cpu.h"
     38 #include "src/register-configuration.h"
     39 
     40 namespace v8 {
     41 namespace internal {
     42 
     43 
     44 // -----------------------------------------------------------------------------
     45 // CpuFeatures implementation.
     46 
     47 void CpuFeatures::ProbeImpl(bool cross_compile) {
     48   // AArch64 has no configuration options, no further probing is required.
     49   supported_ = 0;
     50 
     51   // Only use statically determined features for cross compile (snapshot).
     52   if (cross_compile) return;
     53 
     54   // Probe for runtime features
     55   base::CPU cpu;
     56   if (cpu.implementer() == base::CPU::NVIDIA &&
     57       cpu.variant() == base::CPU::NVIDIA_DENVER &&
     58       cpu.part() <= base::CPU::NVIDIA_DENVER_V10) {
     59     supported_ |= 1u << COHERENT_CACHE;
     60   }
     61 }
     62 
     63 
     64 void CpuFeatures::PrintTarget() { }
     65 
     66 
     67 void CpuFeatures::PrintFeatures() {
     68   printf("COHERENT_CACHE=%d\n", CpuFeatures::IsSupported(COHERENT_CACHE));
     69 }
     70 
     71 
     72 // -----------------------------------------------------------------------------
     73 // CPURegList utilities.
     74 
     75 CPURegister CPURegList::PopLowestIndex() {
     76   DCHECK(IsValid());
     77   if (IsEmpty()) {
     78     return NoCPUReg;
     79   }
     80   int index = CountTrailingZeros(list_, kRegListSizeInBits);
     81   DCHECK((1 << index) & list_);
     82   Remove(index);
     83   return CPURegister::Create(index, size_, type_);
     84 }
     85 
     86 
     87 CPURegister CPURegList::PopHighestIndex() {
     88   DCHECK(IsValid());
     89   if (IsEmpty()) {
     90     return NoCPUReg;
     91   }
     92   int index = CountLeadingZeros(list_, kRegListSizeInBits);
     93   index = kRegListSizeInBits - 1 - index;
     94   DCHECK((1 << index) & list_);
     95   Remove(index);
     96   return CPURegister::Create(index, size_, type_);
     97 }
     98 
     99 
    100 void CPURegList::RemoveCalleeSaved() {
    101   if (type() == CPURegister::kRegister) {
    102     Remove(GetCalleeSaved(RegisterSizeInBits()));
    103   } else if (type() == CPURegister::kFPRegister) {
    104     Remove(GetCalleeSavedFP(RegisterSizeInBits()));
    105   } else {
    106     DCHECK(type() == CPURegister::kNoRegister);
    107     DCHECK(IsEmpty());
    108     // The list must already be empty, so do nothing.
    109   }
    110 }
    111 
    112 
    113 CPURegList CPURegList::GetCalleeSaved(int size) {
    114   return CPURegList(CPURegister::kRegister, size, 19, 29);
    115 }
    116 
    117 
    118 CPURegList CPURegList::GetCalleeSavedFP(int size) {
    119   return CPURegList(CPURegister::kFPRegister, size, 8, 15);
    120 }
    121 
    122 
    123 CPURegList CPURegList::GetCallerSaved(int size) {
    124   // Registers x0-x18 and lr (x30) are caller-saved.
    125   CPURegList list = CPURegList(CPURegister::kRegister, size, 0, 18);
    126   list.Combine(lr);
    127   return list;
    128 }
    129 
    130 
    131 CPURegList CPURegList::GetCallerSavedFP(int size) {
    132   // Registers d0-d7 and d16-d31 are caller-saved.
    133   CPURegList list = CPURegList(CPURegister::kFPRegister, size, 0, 7);
    134   list.Combine(CPURegList(CPURegister::kFPRegister, size, 16, 31));
    135   return list;
    136 }
    137 
    138 
    139 // This function defines the list of registers which are associated with a
    140 // safepoint slot. Safepoint register slots are saved contiguously on the stack.
    141 // MacroAssembler::SafepointRegisterStackIndex handles mapping from register
    142 // code to index in the safepoint register slots. Any change here can affect
    143 // this mapping.
    144 CPURegList CPURegList::GetSafepointSavedRegisters() {
    145   CPURegList list = CPURegList::GetCalleeSaved();
    146   list.Combine(
    147       CPURegList(CPURegister::kRegister, kXRegSizeInBits, kJSCallerSaved));
    148 
    149   // Note that unfortunately we can't use symbolic names for registers and have
    150   // to directly use register codes. This is because this function is used to
    151   // initialize some static variables and we can't rely on register variables
    152   // to be initialized due to static initialization order issues in C++.
    153 
    154   // Drop ip0 and ip1 (i.e. x16 and x17), as they should not be expected to be
    155   // preserved outside of the macro assembler.
    156   list.Remove(16);
    157   list.Remove(17);
    158 
    159   // Add x18 to the safepoint list, as although it's not in kJSCallerSaved, it
    160   // is a caller-saved register according to the procedure call standard.
    161   list.Combine(18);
    162 
    163   // Drop jssp as the stack pointer doesn't need to be included.
    164   list.Remove(28);
    165 
    166   // Add the link register (x30) to the safepoint list.
    167   list.Combine(30);
    168 
    169   return list;
    170 }
    171 
    172 
    173 // -----------------------------------------------------------------------------
    174 // Implementation of RelocInfo
    175 
    176 const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
    177 
    178 
    179 bool RelocInfo::IsCodedSpecially() {
    180   // The deserializer needs to know whether a pointer is specially coded. Being
    181   // specially coded on ARM64 means that it is a movz/movk sequence. We don't
    182   // generate those for relocatable pointers.
    183   return false;
    184 }
    185 
    186 
    187 bool RelocInfo::IsInConstantPool() {
    188   Instruction* instr = reinterpret_cast<Instruction*>(pc_);
    189   return instr->IsLdrLiteralX();
    190 }
    191 
    192 
    193 Register GetAllocatableRegisterThatIsNotOneOf(Register reg1, Register reg2,
    194                                               Register reg3, Register reg4) {
    195   CPURegList regs(reg1, reg2, reg3, reg4);
    196   const RegisterConfiguration* config =
    197       RegisterConfiguration::ArchDefault(RegisterConfiguration::CRANKSHAFT);
    198   for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
    199     int code = config->GetAllocatableDoubleCode(i);
    200     Register candidate = Register::from_code(code);
    201     if (regs.IncludesAliasOf(candidate)) continue;
    202     return candidate;
    203   }
    204   UNREACHABLE();
    205   return NoReg;
    206 }
    207 
    208 
    209 bool AreAliased(const CPURegister& reg1, const CPURegister& reg2,
    210                 const CPURegister& reg3, const CPURegister& reg4,
    211                 const CPURegister& reg5, const CPURegister& reg6,
    212                 const CPURegister& reg7, const CPURegister& reg8) {
    213   int number_of_valid_regs = 0;
    214   int number_of_valid_fpregs = 0;
    215 
    216   RegList unique_regs = 0;
    217   RegList unique_fpregs = 0;
    218 
    219   const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
    220 
    221   for (unsigned i = 0; i < arraysize(regs); i++) {
    222     if (regs[i].IsRegister()) {
    223       number_of_valid_regs++;
    224       unique_regs |= regs[i].Bit();
    225     } else if (regs[i].IsFPRegister()) {
    226       number_of_valid_fpregs++;
    227       unique_fpregs |= regs[i].Bit();
    228     } else {
    229       DCHECK(!regs[i].IsValid());
    230     }
    231   }
    232 
    233   int number_of_unique_regs =
    234     CountSetBits(unique_regs, sizeof(unique_regs) * kBitsPerByte);
    235   int number_of_unique_fpregs =
    236     CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte);
    237 
    238   DCHECK(number_of_valid_regs >= number_of_unique_regs);
    239   DCHECK(number_of_valid_fpregs >= number_of_unique_fpregs);
    240 
    241   return (number_of_valid_regs != number_of_unique_regs) ||
    242          (number_of_valid_fpregs != number_of_unique_fpregs);
    243 }
    244 
    245 
    246 bool AreSameSizeAndType(const CPURegister& reg1, const CPURegister& reg2,
    247                         const CPURegister& reg3, const CPURegister& reg4,
    248                         const CPURegister& reg5, const CPURegister& reg6,
    249                         const CPURegister& reg7, const CPURegister& reg8) {
    250   DCHECK(reg1.IsValid());
    251   bool match = true;
    252   match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
    253   match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
    254   match &= !reg4.IsValid() || reg4.IsSameSizeAndType(reg1);
    255   match &= !reg5.IsValid() || reg5.IsSameSizeAndType(reg1);
    256   match &= !reg6.IsValid() || reg6.IsSameSizeAndType(reg1);
    257   match &= !reg7.IsValid() || reg7.IsSameSizeAndType(reg1);
    258   match &= !reg8.IsValid() || reg8.IsSameSizeAndType(reg1);
    259   return match;
    260 }
    261 
    262 
    263 void Immediate::InitializeHandle(Handle<Object> handle) {
    264   AllowDeferredHandleDereference using_raw_address;
    265 
    266   // Verify all Objects referred by code are NOT in new space.
    267   Object* obj = *handle;
    268   if (obj->IsHeapObject()) {
    269     DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
    270     value_ = reinterpret_cast<intptr_t>(handle.location());
    271     rmode_ = RelocInfo::EMBEDDED_OBJECT;
    272   } else {
    273     STATIC_ASSERT(sizeof(intptr_t) == sizeof(int64_t));
    274     value_ = reinterpret_cast<intptr_t>(obj);
    275     rmode_ = RelocInfo::NONE64;
    276   }
    277 }
    278 
    279 
    280 bool Operand::NeedsRelocation(const Assembler* assembler) const {
    281   RelocInfo::Mode rmode = immediate_.rmode();
    282 
    283   if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
    284     return assembler->serializer_enabled();
    285   }
    286 
    287   return !RelocInfo::IsNone(rmode);
    288 }
    289 
    290 
    291 // Constant Pool.
    292 void ConstPool::RecordEntry(intptr_t data,
    293                             RelocInfo::Mode mode) {
    294   DCHECK(mode != RelocInfo::COMMENT &&
    295          mode != RelocInfo::POSITION &&
    296          mode != RelocInfo::STATEMENT_POSITION &&
    297          mode != RelocInfo::CONST_POOL &&
    298          mode != RelocInfo::VENEER_POOL &&
    299          mode != RelocInfo::CODE_AGE_SEQUENCE &&
    300          mode != RelocInfo::DEOPT_REASON);
    301   uint64_t raw_data = static_cast<uint64_t>(data);
    302   int offset = assm_->pc_offset();
    303   if (IsEmpty()) {
    304     first_use_ = offset;
    305   }
    306 
    307   std::pair<uint64_t, int> entry = std::make_pair(raw_data, offset);
    308   if (CanBeShared(mode)) {
    309     shared_entries_.insert(entry);
    310     if (shared_entries_.count(entry.first) == 1) {
    311       shared_entries_count++;
    312     }
    313   } else {
    314     unique_entries_.push_back(entry);
    315   }
    316 
    317   if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) {
    318     // Request constant pool emission after the next instruction.
    319     assm_->SetNextConstPoolCheckIn(1);
    320   }
    321 }
    322 
    323 
    324 int ConstPool::DistanceToFirstUse() {
    325   DCHECK(first_use_ >= 0);
    326   return assm_->pc_offset() - first_use_;
    327 }
    328 
    329 
    330 int ConstPool::MaxPcOffset() {
    331   // There are no pending entries in the pool so we can never get out of
    332   // range.
    333   if (IsEmpty()) return kMaxInt;
    334 
    335   // Entries are not necessarily emitted in the order they are added so in the
    336   // worst case the first constant pool use will be accessing the last entry.
    337   return first_use_ + kMaxLoadLiteralRange - WorstCaseSize();
    338 }
    339 
    340 
    341 int ConstPool::WorstCaseSize() {
    342   if (IsEmpty()) return 0;
    343 
    344   // Max size prologue:
    345   //   b   over
    346   //   ldr xzr, #pool_size
    347   //   blr xzr
    348   //   nop
    349   // All entries are 64-bit for now.
    350   return 4 * kInstructionSize + EntryCount() * kPointerSize;
    351 }
    352 
    353 
    354 int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) {
    355   if (IsEmpty()) return 0;
    356 
    357   // Prologue is:
    358   //   b   over  ;; if require_jump
    359   //   ldr xzr, #pool_size
    360   //   blr xzr
    361   //   nop       ;; if not 64-bit aligned
    362   int prologue_size = require_jump ? kInstructionSize : 0;
    363   prologue_size += 2 * kInstructionSize;
    364   prologue_size += IsAligned(assm_->pc_offset() + prologue_size, 8) ?
    365                    0 : kInstructionSize;
    366 
    367   // All entries are 64-bit for now.
    368   return prologue_size + EntryCount() * kPointerSize;
    369 }
    370 
    371 
    372 void ConstPool::Emit(bool require_jump) {
    373   DCHECK(!assm_->is_const_pool_blocked());
    374   // Prevent recursive pool emission and protect from veneer pools.
    375   Assembler::BlockPoolsScope block_pools(assm_);
    376 
    377   int size = SizeIfEmittedAtCurrentPc(require_jump);
    378   Label size_check;
    379   assm_->bind(&size_check);
    380 
    381   assm_->RecordConstPool(size);
    382   // Emit the constant pool. It is preceded by an optional branch if
    383   // require_jump and a header which will:
    384   //  1) Encode the size of the constant pool, for use by the disassembler.
    385   //  2) Terminate the program, to try to prevent execution from accidentally
    386   //     flowing into the constant pool.
    387   //  3) align the pool entries to 64-bit.
    388   // The header is therefore made of up to three arm64 instructions:
    389   //   ldr xzr, #<size of the constant pool in 32-bit words>
    390   //   blr xzr
    391   //   nop
    392   //
    393   // If executed, the header will likely segfault and lr will point to the
    394   // instruction following the offending blr.
    395   // TODO(all): Make the alignment part less fragile. Currently code is
    396   // allocated as a byte array so there are no guarantees the alignment will
    397   // be preserved on compaction. Currently it works as allocation seems to be
    398   // 64-bit aligned.
    399 
    400   // Emit branch if required
    401   Label after_pool;
    402   if (require_jump) {
    403     assm_->b(&after_pool);
    404   }
    405 
    406   // Emit the header.
    407   assm_->RecordComment("[ Constant Pool");
    408   EmitMarker();
    409   EmitGuard();
    410   assm_->Align(8);
    411 
    412   // Emit constant pool entries.
    413   // TODO(all): currently each relocated constant is 64 bits, consider adding
    414   // support for 32-bit entries.
    415   EmitEntries();
    416   assm_->RecordComment("]");
    417 
    418   if (after_pool.is_linked()) {
    419     assm_->bind(&after_pool);
    420   }
    421 
    422   DCHECK(assm_->SizeOfCodeGeneratedSince(&size_check) ==
    423          static_cast<unsigned>(size));
    424 }
    425 
    426 
    427 void ConstPool::Clear() {
    428   shared_entries_.clear();
    429   shared_entries_count = 0;
    430   unique_entries_.clear();
    431   first_use_ = -1;
    432 }
    433 
    434 
    435 bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
    436   // Constant pool currently does not support 32-bit entries.
    437   DCHECK(mode != RelocInfo::NONE32);
    438 
    439   return RelocInfo::IsNone(mode) ||
    440          (!assm_->serializer_enabled() && (mode >= RelocInfo::CELL));
    441 }
    442 
    443 
    444 void ConstPool::EmitMarker() {
    445   // A constant pool size is expressed in number of 32-bits words.
    446   // Currently all entries are 64-bit.
    447   // + 1 is for the crash guard.
    448   // + 0/1 for alignment.
    449   int word_count = EntryCount() * 2 + 1 +
    450                    (IsAligned(assm_->pc_offset(), 8) ? 0 : 1);
    451   assm_->Emit(LDR_x_lit                          |
    452               Assembler::ImmLLiteral(word_count) |
    453               Assembler::Rt(xzr));
    454 }
    455 
    456 
    457 MemOperand::PairResult MemOperand::AreConsistentForPair(
    458     const MemOperand& operandA,
    459     const MemOperand& operandB,
    460     int access_size_log2) {
    461   DCHECK(access_size_log2 >= 0);
    462   DCHECK(access_size_log2 <= 3);
    463   // Step one: check that they share the same base, that the mode is Offset
    464   // and that the offset is a multiple of access size.
    465   if (!operandA.base().Is(operandB.base()) ||
    466       (operandA.addrmode() != Offset) ||
    467       (operandB.addrmode() != Offset) ||
    468       ((operandA.offset() & ((1 << access_size_log2) - 1)) != 0)) {
    469     return kNotPair;
    470   }
    471   // Step two: check that the offsets are contiguous and that the range
    472   // is OK for ldp/stp.
    473   if ((operandB.offset() == operandA.offset() + (1 << access_size_log2)) &&
    474       is_int7(operandA.offset() >> access_size_log2)) {
    475     return kPairAB;
    476   }
    477   if ((operandA.offset() == operandB.offset() + (1 << access_size_log2)) &&
    478       is_int7(operandB.offset() >> access_size_log2)) {
    479     return kPairBA;
    480   }
    481   return kNotPair;
    482 }
    483 
    484 
    485 void ConstPool::EmitGuard() {
    486 #ifdef DEBUG
    487   Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc());
    488   DCHECK(instr->preceding()->IsLdrLiteralX() &&
    489          instr->preceding()->Rt() == xzr.code());
    490 #endif
    491   assm_->EmitPoolGuard();
    492 }
    493 
    494 
    495 void ConstPool::EmitEntries() {
    496   DCHECK(IsAligned(assm_->pc_offset(), 8));
    497 
    498   typedef std::multimap<uint64_t, int>::const_iterator SharedEntriesIterator;
    499   SharedEntriesIterator value_it;
    500   // Iterate through the keys (constant pool values).
    501   for (value_it = shared_entries_.begin();
    502        value_it != shared_entries_.end();
    503        value_it = shared_entries_.upper_bound(value_it->first)) {
    504     std::pair<SharedEntriesIterator, SharedEntriesIterator> range;
    505     uint64_t data = value_it->first;
    506     range = shared_entries_.equal_range(data);
    507     SharedEntriesIterator offset_it;
    508     // Iterate through the offsets of a given key.
    509     for (offset_it = range.first; offset_it != range.second; offset_it++) {
    510       Instruction* instr = assm_->InstructionAt(offset_it->second);
    511 
    512       // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
    513       DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
    514       instr->SetImmPCOffsetTarget(assm_->isolate(), assm_->pc());
    515     }
    516     assm_->dc64(data);
    517   }
    518   shared_entries_.clear();
    519   shared_entries_count = 0;
    520 
    521   // Emit unique entries.
    522   std::vector<std::pair<uint64_t, int> >::const_iterator unique_it;
    523   for (unique_it = unique_entries_.begin();
    524        unique_it != unique_entries_.end();
    525        unique_it++) {
    526     Instruction* instr = assm_->InstructionAt(unique_it->second);
    527 
    528     // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
    529     DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
    530     instr->SetImmPCOffsetTarget(assm_->isolate(), assm_->pc());
    531     assm_->dc64(unique_it->first);
    532   }
    533   unique_entries_.clear();
    534   first_use_ = -1;
    535 }
    536 
    537 
    538 // Assembler
    539 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
    540     : AssemblerBase(isolate, buffer, buffer_size),
    541       constpool_(this),
    542       recorded_ast_id_(TypeFeedbackId::None()),
    543       unresolved_branches_(),
    544       positions_recorder_(this) {
    545   const_pool_blocked_nesting_ = 0;
    546   veneer_pool_blocked_nesting_ = 0;
    547   Reset();
    548 }
    549 
    550 
    551 Assembler::~Assembler() {
    552   DCHECK(constpool_.IsEmpty());
    553   DCHECK(const_pool_blocked_nesting_ == 0);
    554   DCHECK(veneer_pool_blocked_nesting_ == 0);
    555 }
    556 
    557 
    558 void Assembler::Reset() {
    559 #ifdef DEBUG
    560   DCHECK((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
    561   DCHECK(const_pool_blocked_nesting_ == 0);
    562   DCHECK(veneer_pool_blocked_nesting_ == 0);
    563   DCHECK(unresolved_branches_.empty());
    564   memset(buffer_, 0, pc_ - buffer_);
    565 #endif
    566   pc_ = buffer_;
    567   reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_),
    568                                reinterpret_cast<byte*>(pc_));
    569   constpool_.Clear();
    570   next_constant_pool_check_ = 0;
    571   next_veneer_pool_check_ = kMaxInt;
    572   no_const_pool_before_ = 0;
    573   ClearRecordedAstId();
    574 }
    575 
    576 
    577 void Assembler::GetCode(CodeDesc* desc) {
    578   reloc_info_writer.Finish();
    579   // Emit constant pool if necessary.
    580   CheckConstPool(true, false);
    581   DCHECK(constpool_.IsEmpty());
    582 
    583   // Set up code descriptor.
    584   if (desc) {
    585     desc->buffer = reinterpret_cast<byte*>(buffer_);
    586     desc->buffer_size = buffer_size_;
    587     desc->instr_size = pc_offset();
    588     desc->reloc_size =
    589         static_cast<int>((reinterpret_cast<byte*>(buffer_) + buffer_size_) -
    590                          reloc_info_writer.pos());
    591     desc->origin = this;
    592     desc->constant_pool_size = 0;
    593   }
    594 }
    595 
    596 
    597 void Assembler::Align(int m) {
    598   DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
    599   while ((pc_offset() & (m - 1)) != 0) {
    600     nop();
    601   }
    602 }
    603 
    604 
    605 void Assembler::CheckLabelLinkChain(Label const * label) {
    606 #ifdef DEBUG
    607   if (label->is_linked()) {
    608     static const int kMaxLinksToCheck = 64;  // Avoid O(n2) behaviour.
    609     int links_checked = 0;
    610     int64_t linkoffset = label->pos();
    611     bool end_of_chain = false;
    612     while (!end_of_chain) {
    613       if (++links_checked > kMaxLinksToCheck) break;
    614       Instruction * link = InstructionAt(linkoffset);
    615       int64_t linkpcoffset = link->ImmPCOffset();
    616       int64_t prevlinkoffset = linkoffset + linkpcoffset;
    617 
    618       end_of_chain = (linkoffset == prevlinkoffset);
    619       linkoffset = linkoffset + linkpcoffset;
    620     }
    621   }
    622 #endif
    623 }
    624 
    625 
    626 void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
    627                                                Label* label,
    628                                                Instruction* label_veneer) {
    629   DCHECK(label->is_linked());
    630 
    631   CheckLabelLinkChain(label);
    632 
    633   Instruction* link = InstructionAt(label->pos());
    634   Instruction* prev_link = link;
    635   Instruction* next_link;
    636   bool end_of_chain = false;
    637 
    638   while (link != branch && !end_of_chain) {
    639     next_link = link->ImmPCOffsetTarget();
    640     end_of_chain = (link == next_link);
    641     prev_link = link;
    642     link = next_link;
    643   }
    644 
    645   DCHECK(branch == link);
    646   next_link = branch->ImmPCOffsetTarget();
    647 
    648   if (branch == prev_link) {
    649     // The branch is the first instruction in the chain.
    650     if (branch == next_link) {
    651       // It is also the last instruction in the chain, so it is the only branch
    652       // currently referring to this label.
    653       label->Unuse();
    654     } else {
    655       label->link_to(
    656           static_cast<int>(reinterpret_cast<byte*>(next_link) - buffer_));
    657     }
    658 
    659   } else if (branch == next_link) {
    660     // The branch is the last (but not also the first) instruction in the chain.
    661     prev_link->SetImmPCOffsetTarget(isolate(), prev_link);
    662 
    663   } else {
    664     // The branch is in the middle of the chain.
    665     if (prev_link->IsTargetInImmPCOffsetRange(next_link)) {
    666       prev_link->SetImmPCOffsetTarget(isolate(), next_link);
    667     } else if (label_veneer != NULL) {
    668       // Use the veneer for all previous links in the chain.
    669       prev_link->SetImmPCOffsetTarget(isolate(), prev_link);
    670 
    671       end_of_chain = false;
    672       link = next_link;
    673       while (!end_of_chain) {
    674         next_link = link->ImmPCOffsetTarget();
    675         end_of_chain = (link == next_link);
    676         link->SetImmPCOffsetTarget(isolate(), label_veneer);
    677         link = next_link;
    678       }
    679     } else {
    680       // The assert below will fire.
    681       // Some other work could be attempted to fix up the chain, but it would be
    682       // rather complicated. If we crash here, we may want to consider using an
    683       // other mechanism than a chain of branches.
    684       //
    685       // Note that this situation currently should not happen, as we always call
    686       // this function with a veneer to the target label.
    687       // However this could happen with a MacroAssembler in the following state:
    688       //    [previous code]
    689       //    B(label);
    690       //    [20KB code]
    691       //    Tbz(label);   // First tbz. Pointing to unconditional branch.
    692       //    [20KB code]
    693       //    Tbz(label);   // Second tbz. Pointing to the first tbz.
    694       //    [more code]
    695       // and this function is called to remove the first tbz from the label link
    696       // chain. Since tbz has a range of +-32KB, the second tbz cannot point to
    697       // the unconditional branch.
    698       CHECK(prev_link->IsTargetInImmPCOffsetRange(next_link));
    699       UNREACHABLE();
    700     }
    701   }
    702 
    703   CheckLabelLinkChain(label);
    704 }
    705 
    706 
    707 void Assembler::bind(Label* label) {
    708   // Bind label to the address at pc_. All instructions (most likely branches)
    709   // that are linked to this label will be updated to point to the newly-bound
    710   // label.
    711 
    712   DCHECK(!label->is_near_linked());
    713   DCHECK(!label->is_bound());
    714 
    715   DeleteUnresolvedBranchInfoForLabel(label);
    716 
    717   // If the label is linked, the link chain looks something like this:
    718   //
    719   // |--I----I-------I-------L
    720   // |---------------------->| pc_offset
    721   // |-------------->|         linkoffset = label->pos()
    722   //         |<------|         link->ImmPCOffset()
    723   // |------>|                 prevlinkoffset = linkoffset + link->ImmPCOffset()
    724   //
    725   // On each iteration, the last link is updated and then removed from the
    726   // chain until only one remains. At that point, the label is bound.
    727   //
    728   // If the label is not linked, no preparation is required before binding.
    729   while (label->is_linked()) {
    730     int linkoffset = label->pos();
    731     Instruction* link = InstructionAt(linkoffset);
    732     int prevlinkoffset = linkoffset + static_cast<int>(link->ImmPCOffset());
    733 
    734     CheckLabelLinkChain(label);
    735 
    736     DCHECK(linkoffset >= 0);
    737     DCHECK(linkoffset < pc_offset());
    738     DCHECK((linkoffset > prevlinkoffset) ||
    739            (linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
    740     DCHECK(prevlinkoffset >= 0);
    741 
    742     // Update the link to point to the label.
    743     if (link->IsUnresolvedInternalReference()) {
    744       // Internal references do not get patched to an instruction but directly
    745       // to an address.
    746       internal_reference_positions_.push_back(linkoffset);
    747       PatchingAssembler patcher(isolate(), link, 2);
    748       patcher.dc64(reinterpret_cast<uintptr_t>(pc_));
    749     } else {
    750       link->SetImmPCOffsetTarget(isolate(),
    751                                  reinterpret_cast<Instruction*>(pc_));
    752     }
    753 
    754     // Link the label to the previous link in the chain.
    755     if (linkoffset - prevlinkoffset == kStartOfLabelLinkChain) {
    756       // We hit kStartOfLabelLinkChain, so the chain is fully processed.
    757       label->Unuse();
    758     } else {
    759       // Update the label for the next iteration.
    760       label->link_to(prevlinkoffset);
    761     }
    762   }
    763   label->bind_to(pc_offset());
    764 
    765   DCHECK(label->is_bound());
    766   DCHECK(!label->is_linked());
    767 }
    768 
    769 
    770 int Assembler::LinkAndGetByteOffsetTo(Label* label) {
    771   DCHECK(sizeof(*pc_) == 1);
    772   CheckLabelLinkChain(label);
    773 
    774   int offset;
    775   if (label->is_bound()) {
    776     // The label is bound, so it does not need to be updated. Referring
    777     // instructions must link directly to the label as they will not be
    778     // updated.
    779     //
    780     // In this case, label->pos() returns the offset of the label from the
    781     // start of the buffer.
    782     //
    783     // Note that offset can be zero for self-referential instructions. (This
    784     // could be useful for ADR, for example.)
    785     offset = label->pos() - pc_offset();
    786     DCHECK(offset <= 0);
    787   } else {
    788     if (label->is_linked()) {
    789       // The label is linked, so the referring instruction should be added onto
    790       // the end of the label's link chain.
    791       //
    792       // In this case, label->pos() returns the offset of the last linked
    793       // instruction from the start of the buffer.
    794       offset = label->pos() - pc_offset();
    795       DCHECK(offset != kStartOfLabelLinkChain);
    796       // Note that the offset here needs to be PC-relative only so that the
    797       // first instruction in a buffer can link to an unbound label. Otherwise,
    798       // the offset would be 0 for this case, and 0 is reserved for
    799       // kStartOfLabelLinkChain.
    800     } else {
    801       // The label is unused, so it now becomes linked and the referring
    802       // instruction is at the start of the new link chain.
    803       offset = kStartOfLabelLinkChain;
    804     }
    805     // The instruction at pc is now the last link in the label's chain.
    806     label->link_to(pc_offset());
    807   }
    808 
    809   return offset;
    810 }
    811 
    812 
    813 void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
    814   DCHECK(label->is_linked());
    815   CheckLabelLinkChain(label);
    816 
    817   int link_offset = label->pos();
    818   int link_pcoffset;
    819   bool end_of_chain = false;
    820 
    821   while (!end_of_chain) {
    822     Instruction * link = InstructionAt(link_offset);
    823     link_pcoffset = static_cast<int>(link->ImmPCOffset());
    824 
    825     // ADR instructions are not handled by veneers.
    826     if (link->IsImmBranch()) {
    827       int max_reachable_pc =
    828           static_cast<int>(InstructionOffset(link) +
    829                            Instruction::ImmBranchRange(link->BranchType()));
    830       typedef std::multimap<int, FarBranchInfo>::iterator unresolved_info_it;
    831       std::pair<unresolved_info_it, unresolved_info_it> range;
    832       range = unresolved_branches_.equal_range(max_reachable_pc);
    833       unresolved_info_it it;
    834       for (it = range.first; it != range.second; ++it) {
    835         if (it->second.pc_offset_ == link_offset) {
    836           unresolved_branches_.erase(it);
    837           break;
    838         }
    839       }
    840     }
    841 
    842     end_of_chain = (link_pcoffset == 0);
    843     link_offset = link_offset + link_pcoffset;
    844   }
    845 }
    846 
    847 
    848 void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
    849   if (unresolved_branches_.empty()) {
    850     DCHECK(next_veneer_pool_check_ == kMaxInt);
    851     return;
    852   }
    853 
    854   if (label->is_linked()) {
    855     // Branches to this label will be resolved when the label is bound, normally
    856     // just after all the associated info has been deleted.
    857     DeleteUnresolvedBranchInfoForLabelTraverse(label);
    858   }
    859   if (unresolved_branches_.empty()) {
    860     next_veneer_pool_check_ = kMaxInt;
    861   } else {
    862     next_veneer_pool_check_ =
    863       unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
    864   }
    865 }
    866 
    867 
    868 void Assembler::StartBlockConstPool() {
    869   if (const_pool_blocked_nesting_++ == 0) {
    870     // Prevent constant pool checks happening by setting the next check to
    871     // the biggest possible offset.
    872     next_constant_pool_check_ = kMaxInt;
    873   }
    874 }
    875 
    876 
    877 void Assembler::EndBlockConstPool() {
    878   if (--const_pool_blocked_nesting_ == 0) {
    879     // Check the constant pool hasn't been blocked for too long.
    880     DCHECK(pc_offset() < constpool_.MaxPcOffset());
    881     // Two cases:
    882     //  * no_const_pool_before_ >= next_constant_pool_check_ and the emission is
    883     //    still blocked
    884     //  * no_const_pool_before_ < next_constant_pool_check_ and the next emit
    885     //    will trigger a check.
    886     next_constant_pool_check_ = no_const_pool_before_;
    887   }
    888 }
    889 
    890 
    891 bool Assembler::is_const_pool_blocked() const {
    892   return (const_pool_blocked_nesting_ > 0) ||
    893          (pc_offset() < no_const_pool_before_);
    894 }
    895 
    896 
    897 bool Assembler::IsConstantPoolAt(Instruction* instr) {
    898   // The constant pool marker is made of two instructions. These instructions
    899   // will never be emitted by the JIT, so checking for the first one is enough:
    900   // 0: ldr xzr, #<size of pool>
    901   bool result = instr->IsLdrLiteralX() && (instr->Rt() == kZeroRegCode);
    902 
    903   // It is still worth asserting the marker is complete.
    904   // 4: blr xzr
    905   DCHECK(!result || (instr->following()->IsBranchAndLinkToRegister() &&
    906                      instr->following()->Rn() == kZeroRegCode));
    907 
    908   return result;
    909 }
    910 
    911 
    912 int Assembler::ConstantPoolSizeAt(Instruction* instr) {
    913 #ifdef USE_SIMULATOR
    914   // Assembler::debug() embeds constants directly into the instruction stream.
    915   // Although this is not a genuine constant pool, treat it like one to avoid
    916   // disassembling the constants.
    917   if ((instr->Mask(ExceptionMask) == HLT) &&
    918       (instr->ImmException() == kImmExceptionIsDebug)) {
    919     const char* message =
    920         reinterpret_cast<const char*>(
    921             instr->InstructionAtOffset(kDebugMessageOffset));
    922     int size = static_cast<int>(kDebugMessageOffset + strlen(message) + 1);
    923     return RoundUp(size, kInstructionSize) / kInstructionSize;
    924   }
    925   // Same for printf support, see MacroAssembler::CallPrintf().
    926   if ((instr->Mask(ExceptionMask) == HLT) &&
    927       (instr->ImmException() == kImmExceptionIsPrintf)) {
    928     return kPrintfLength / kInstructionSize;
    929   }
    930 #endif
    931   if (IsConstantPoolAt(instr)) {
    932     return instr->ImmLLiteral();
    933   } else {
    934     return -1;
    935   }
    936 }
    937 
    938 
    939 void Assembler::EmitPoolGuard() {
    940   // We must generate only one instruction as this is used in scopes that
    941   // control the size of the code generated.
    942   Emit(BLR | Rn(xzr));
    943 }
    944 
    945 
    946 void Assembler::StartBlockVeneerPool() {
    947   ++veneer_pool_blocked_nesting_;
    948 }
    949 
    950 
    951 void Assembler::EndBlockVeneerPool() {
    952   if (--veneer_pool_blocked_nesting_ == 0) {
    953     // Check the veneer pool hasn't been blocked for too long.
    954     DCHECK(unresolved_branches_.empty() ||
    955            (pc_offset() < unresolved_branches_first_limit()));
    956   }
    957 }
    958 
    959 
    960 void Assembler::br(const Register& xn) {
    961   positions_recorder()->WriteRecordedPositions();
    962   DCHECK(xn.Is64Bits());
    963   Emit(BR | Rn(xn));
    964 }
    965 
    966 
    967 void Assembler::blr(const Register& xn) {
    968   positions_recorder()->WriteRecordedPositions();
    969   DCHECK(xn.Is64Bits());
    970   // The pattern 'blr xzr' is used as a guard to detect when execution falls
    971   // through the constant pool. It should not be emitted.
    972   DCHECK(!xn.Is(xzr));
    973   Emit(BLR | Rn(xn));
    974 }
    975 
    976 
    977 void Assembler::ret(const Register& xn) {
    978   positions_recorder()->WriteRecordedPositions();
    979   DCHECK(xn.Is64Bits());
    980   Emit(RET | Rn(xn));
    981 }
    982 
    983 
    984 void Assembler::b(int imm26) {
    985   Emit(B | ImmUncondBranch(imm26));
    986 }
    987 
    988 
    989 void Assembler::b(Label* label) {
    990   positions_recorder()->WriteRecordedPositions();
    991   b(LinkAndGetInstructionOffsetTo(label));
    992 }
    993 
    994 
    995 void Assembler::b(int imm19, Condition cond) {
    996   Emit(B_cond | ImmCondBranch(imm19) | cond);
    997 }
    998 
    999 
   1000 void Assembler::b(Label* label, Condition cond) {
   1001   positions_recorder()->WriteRecordedPositions();
   1002   b(LinkAndGetInstructionOffsetTo(label), cond);
   1003 }
   1004 
   1005 
   1006 void Assembler::bl(int imm26) {
   1007   positions_recorder()->WriteRecordedPositions();
   1008   Emit(BL | ImmUncondBranch(imm26));
   1009 }
   1010 
   1011 
   1012 void Assembler::bl(Label* label) {
   1013   positions_recorder()->WriteRecordedPositions();
   1014   bl(LinkAndGetInstructionOffsetTo(label));
   1015 }
   1016 
   1017 
   1018 void Assembler::cbz(const Register& rt,
   1019                     int imm19) {
   1020   positions_recorder()->WriteRecordedPositions();
   1021   Emit(SF(rt) | CBZ | ImmCmpBranch(imm19) | Rt(rt));
   1022 }
   1023 
   1024 
   1025 void Assembler::cbz(const Register& rt,
   1026                     Label* label) {
   1027   positions_recorder()->WriteRecordedPositions();
   1028   cbz(rt, LinkAndGetInstructionOffsetTo(label));
   1029 }
   1030 
   1031 
   1032 void Assembler::cbnz(const Register& rt,
   1033                      int imm19) {
   1034   positions_recorder()->WriteRecordedPositions();
   1035   Emit(SF(rt) | CBNZ | ImmCmpBranch(imm19) | Rt(rt));
   1036 }
   1037 
   1038 
   1039 void Assembler::cbnz(const Register& rt,
   1040                      Label* label) {
   1041   positions_recorder()->WriteRecordedPositions();
   1042   cbnz(rt, LinkAndGetInstructionOffsetTo(label));
   1043 }
   1044 
   1045 
   1046 void Assembler::tbz(const Register& rt,
   1047                     unsigned bit_pos,
   1048                     int imm14) {
   1049   positions_recorder()->WriteRecordedPositions();
   1050   DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
   1051   Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
   1052 }
   1053 
   1054 
   1055 void Assembler::tbz(const Register& rt,
   1056                     unsigned bit_pos,
   1057                     Label* label) {
   1058   positions_recorder()->WriteRecordedPositions();
   1059   tbz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
   1060 }
   1061 
   1062 
   1063 void Assembler::tbnz(const Register& rt,
   1064                      unsigned bit_pos,
   1065                      int imm14) {
   1066   positions_recorder()->WriteRecordedPositions();
   1067   DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
   1068   Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
   1069 }
   1070 
   1071 
   1072 void Assembler::tbnz(const Register& rt,
   1073                      unsigned bit_pos,
   1074                      Label* label) {
   1075   positions_recorder()->WriteRecordedPositions();
   1076   tbnz(rt, bit_pos, LinkAndGetInstructionOffsetTo(label));
   1077 }
   1078 
   1079 
   1080 void Assembler::adr(const Register& rd, int imm21) {
   1081   DCHECK(rd.Is64Bits());
   1082   Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
   1083 }
   1084 
   1085 
   1086 void Assembler::adr(const Register& rd, Label* label) {
   1087   adr(rd, LinkAndGetByteOffsetTo(label));
   1088 }
   1089 
   1090 
   1091 void Assembler::add(const Register& rd,
   1092                     const Register& rn,
   1093                     const Operand& operand) {
   1094   AddSub(rd, rn, operand, LeaveFlags, ADD);
   1095 }
   1096 
   1097 
   1098 void Assembler::adds(const Register& rd,
   1099                      const Register& rn,
   1100                      const Operand& operand) {
   1101   AddSub(rd, rn, operand, SetFlags, ADD);
   1102 }
   1103 
   1104 
   1105 void Assembler::cmn(const Register& rn,
   1106                     const Operand& operand) {
   1107   Register zr = AppropriateZeroRegFor(rn);
   1108   adds(zr, rn, operand);
   1109 }
   1110 
   1111 
   1112 void Assembler::sub(const Register& rd,
   1113                     const Register& rn,
   1114                     const Operand& operand) {
   1115   AddSub(rd, rn, operand, LeaveFlags, SUB);
   1116 }
   1117 
   1118 
   1119 void Assembler::subs(const Register& rd,
   1120                      const Register& rn,
   1121                      const Operand& operand) {
   1122   AddSub(rd, rn, operand, SetFlags, SUB);
   1123 }
   1124 
   1125 
   1126 void Assembler::cmp(const Register& rn, const Operand& operand) {
   1127   Register zr = AppropriateZeroRegFor(rn);
   1128   subs(zr, rn, operand);
   1129 }
   1130 
   1131 
   1132 void Assembler::neg(const Register& rd, const Operand& operand) {
   1133   Register zr = AppropriateZeroRegFor(rd);
   1134   sub(rd, zr, operand);
   1135 }
   1136 
   1137 
   1138 void Assembler::negs(const Register& rd, const Operand& operand) {
   1139   Register zr = AppropriateZeroRegFor(rd);
   1140   subs(rd, zr, operand);
   1141 }
   1142 
   1143 
   1144 void Assembler::adc(const Register& rd,
   1145                     const Register& rn,
   1146                     const Operand& operand) {
   1147   AddSubWithCarry(rd, rn, operand, LeaveFlags, ADC);
   1148 }
   1149 
   1150 
   1151 void Assembler::adcs(const Register& rd,
   1152                      const Register& rn,
   1153                      const Operand& operand) {
   1154   AddSubWithCarry(rd, rn, operand, SetFlags, ADC);
   1155 }
   1156 
   1157 
   1158 void Assembler::sbc(const Register& rd,
   1159                     const Register& rn,
   1160                     const Operand& operand) {
   1161   AddSubWithCarry(rd, rn, operand, LeaveFlags, SBC);
   1162 }
   1163 
   1164 
   1165 void Assembler::sbcs(const Register& rd,
   1166                      const Register& rn,
   1167                      const Operand& operand) {
   1168   AddSubWithCarry(rd, rn, operand, SetFlags, SBC);
   1169 }
   1170 
   1171 
   1172 void Assembler::ngc(const Register& rd, const Operand& operand) {
   1173   Register zr = AppropriateZeroRegFor(rd);
   1174   sbc(rd, zr, operand);
   1175 }
   1176 
   1177 
   1178 void Assembler::ngcs(const Register& rd, const Operand& operand) {
   1179   Register zr = AppropriateZeroRegFor(rd);
   1180   sbcs(rd, zr, operand);
   1181 }
   1182 
   1183 
   1184 // Logical instructions.
   1185 void Assembler::and_(const Register& rd,
   1186                      const Register& rn,
   1187                      const Operand& operand) {
   1188   Logical(rd, rn, operand, AND);
   1189 }
   1190 
   1191 
   1192 void Assembler::ands(const Register& rd,
   1193                      const Register& rn,
   1194                      const Operand& operand) {
   1195   Logical(rd, rn, operand, ANDS);
   1196 }
   1197 
   1198 
   1199 void Assembler::tst(const Register& rn,
   1200                     const Operand& operand) {
   1201   ands(AppropriateZeroRegFor(rn), rn, operand);
   1202 }
   1203 
   1204 
   1205 void Assembler::bic(const Register& rd,
   1206                     const Register& rn,
   1207                     const Operand& operand) {
   1208   Logical(rd, rn, operand, BIC);
   1209 }
   1210 
   1211 
   1212 void Assembler::bics(const Register& rd,
   1213                      const Register& rn,
   1214                      const Operand& operand) {
   1215   Logical(rd, rn, operand, BICS);
   1216 }
   1217 
   1218 
   1219 void Assembler::orr(const Register& rd,
   1220                     const Register& rn,
   1221                     const Operand& operand) {
   1222   Logical(rd, rn, operand, ORR);
   1223 }
   1224 
   1225 
   1226 void Assembler::orn(const Register& rd,
   1227                     const Register& rn,
   1228                     const Operand& operand) {
   1229   Logical(rd, rn, operand, ORN);
   1230 }
   1231 
   1232 
   1233 void Assembler::eor(const Register& rd,
   1234                     const Register& rn,
   1235                     const Operand& operand) {
   1236   Logical(rd, rn, operand, EOR);
   1237 }
   1238 
   1239 
   1240 void Assembler::eon(const Register& rd,
   1241                     const Register& rn,
   1242                     const Operand& operand) {
   1243   Logical(rd, rn, operand, EON);
   1244 }
   1245 
   1246 
   1247 void Assembler::lslv(const Register& rd,
   1248                      const Register& rn,
   1249                      const Register& rm) {
   1250   DCHECK(rd.SizeInBits() == rn.SizeInBits());
   1251   DCHECK(rd.SizeInBits() == rm.SizeInBits());
   1252   Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
   1253 }
   1254 
   1255 
   1256 void Assembler::lsrv(const Register& rd,
   1257                      const Register& rn,
   1258                      const Register& rm) {
   1259   DCHECK(rd.SizeInBits() == rn.SizeInBits());
   1260   DCHECK(rd.SizeInBits() == rm.SizeInBits());
   1261   Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
   1262 }
   1263 
   1264 
   1265 void Assembler::asrv(const Register& rd,
   1266                      const Register& rn,
   1267                      const Register& rm) {
   1268   DCHECK(rd.SizeInBits() == rn.SizeInBits());
   1269   DCHECK(rd.SizeInBits() == rm.SizeInBits());
   1270   Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
   1271 }
   1272 
   1273 
   1274 void Assembler::rorv(const Register& rd,
   1275                      const Register& rn,
   1276                      const Register& rm) {
   1277   DCHECK(rd.SizeInBits() == rn.SizeInBits());
   1278   DCHECK(rd.SizeInBits() == rm.SizeInBits());
   1279   Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
   1280 }
   1281 
   1282 
   1283 // Bitfield operations.
   1284 void Assembler::bfm(const Register& rd, const Register& rn, int immr,
   1285                     int imms) {
   1286   DCHECK(rd.SizeInBits() == rn.SizeInBits());
   1287   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
   1288   Emit(SF(rd) | BFM | N |
   1289        ImmR(immr, rd.SizeInBits()) |
   1290        ImmS(imms, rn.SizeInBits()) |
   1291        Rn(rn) | Rd(rd));
   1292 }
   1293 
   1294 
   1295 void Assembler::sbfm(const Register& rd, const Register& rn, int immr,
   1296                      int imms) {
   1297   DCHECK(rd.Is64Bits() || rn.Is32Bits());
   1298   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
   1299   Emit(SF(rd) | SBFM | N |
   1300        ImmR(immr, rd.SizeInBits()) |
   1301        ImmS(imms, rn.SizeInBits()) |
   1302        Rn(rn) | Rd(rd));
   1303 }
   1304 
   1305 
   1306 void Assembler::ubfm(const Register& rd, const Register& rn, int immr,
   1307                      int imms) {
   1308   DCHECK(rd.SizeInBits() == rn.SizeInBits());
   1309   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
   1310   Emit(SF(rd) | UBFM | N |
   1311        ImmR(immr, rd.SizeInBits()) |
   1312        ImmS(imms, rn.SizeInBits()) |
   1313        Rn(rn) | Rd(rd));
   1314 }
   1315 
   1316 
   1317 void Assembler::extr(const Register& rd, const Register& rn, const Register& rm,
   1318                      int lsb) {
   1319   DCHECK(rd.SizeInBits() == rn.SizeInBits());
   1320   DCHECK(rd.SizeInBits() == rm.SizeInBits());
   1321   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
   1322   Emit(SF(rd) | EXTR | N | Rm(rm) |
   1323        ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd));
   1324 }
   1325 
   1326 
   1327 void Assembler::csel(const Register& rd,
   1328                      const Register& rn,
   1329                      const Register& rm,
   1330                      Condition cond) {
   1331   ConditionalSelect(rd, rn, rm, cond, CSEL);
   1332 }
   1333 
   1334 
   1335 void Assembler::csinc(const Register& rd,
   1336                       const Register& rn,
   1337                       const Register& rm,
   1338                       Condition cond) {
   1339   ConditionalSelect(rd, rn, rm, cond, CSINC);
   1340 }
   1341 
   1342 
   1343 void Assembler::csinv(const Register& rd,
   1344                       const Register& rn,
   1345                       const Register& rm,
   1346                       Condition cond) {
   1347   ConditionalSelect(rd, rn, rm, cond, CSINV);
   1348 }
   1349 
   1350 
   1351 void Assembler::csneg(const Register& rd,
   1352                       const Register& rn,
   1353                       const Register& rm,
   1354                       Condition cond) {
   1355   ConditionalSelect(rd, rn, rm, cond, CSNEG);
   1356 }
   1357 
   1358 
   1359 void Assembler::cset(const Register &rd, Condition cond) {
   1360   DCHECK((cond != al) && (cond != nv));
   1361   Register zr = AppropriateZeroRegFor(rd);
   1362   csinc(rd, zr, zr, NegateCondition(cond));
   1363 }
   1364 
   1365 
   1366 void Assembler::csetm(const Register &rd, Condition cond) {
   1367   DCHECK((cond != al) && (cond != nv));
   1368   Register zr = AppropriateZeroRegFor(rd);
   1369   csinv(rd, zr, zr, NegateCondition(cond));
   1370 }
   1371 
   1372 
   1373 void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
   1374   DCHECK((cond != al) && (cond != nv));
   1375   csinc(rd, rn, rn, NegateCondition(cond));
   1376 }
   1377 
   1378 
   1379 void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
   1380   DCHECK((cond != al) && (cond != nv));
   1381   csinv(rd, rn, rn, NegateCondition(cond));
   1382 }
   1383 
   1384 
   1385 void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
   1386   DCHECK((cond != al) && (cond != nv));
   1387   csneg(rd, rn, rn, NegateCondition(cond));
   1388 }
   1389 
   1390 
   1391 void Assembler::ConditionalSelect(const Register& rd,
   1392                                   const Register& rn,
   1393                                   const Register& rm,
   1394                                   Condition cond,
   1395                                   ConditionalSelectOp op) {
   1396   DCHECK(rd.SizeInBits() == rn.SizeInBits());
   1397   DCHECK(rd.SizeInBits() == rm.SizeInBits());
   1398   Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
   1399 }
   1400 
   1401 
   1402 void Assembler::ccmn(const Register& rn,
   1403                      const Operand& operand,
   1404                      StatusFlags nzcv,
   1405                      Condition cond) {
   1406   ConditionalCompare(rn, operand, nzcv, cond, CCMN);
   1407 }
   1408 
   1409 
   1410 void Assembler::ccmp(const Register& rn,
   1411                      const Operand& operand,
   1412                      StatusFlags nzcv,
   1413                      Condition cond) {
   1414   ConditionalCompare(rn, operand, nzcv, cond, CCMP);
   1415 }
   1416 
   1417 
   1418 void Assembler::DataProcessing3Source(const Register& rd,
   1419                                       const Register& rn,
   1420                                       const Register& rm,
   1421                                       const Register& ra,
   1422                                       DataProcessing3SourceOp op) {
   1423   Emit(SF(rd) | op | Rm(rm) | Ra(ra) | Rn(rn) | Rd(rd));
   1424 }
   1425 
   1426 
   1427 void Assembler::mul(const Register& rd,
   1428                     const Register& rn,
   1429                     const Register& rm) {
   1430   DCHECK(AreSameSizeAndType(rd, rn, rm));
   1431   Register zr = AppropriateZeroRegFor(rn);
   1432   DataProcessing3Source(rd, rn, rm, zr, MADD);
   1433 }
   1434 
   1435 
   1436 void Assembler::madd(const Register& rd,
   1437                      const Register& rn,
   1438                      const Register& rm,
   1439                      const Register& ra) {
   1440   DCHECK(AreSameSizeAndType(rd, rn, rm, ra));
   1441   DataProcessing3Source(rd, rn, rm, ra, MADD);
   1442 }
   1443 
   1444 
   1445 void Assembler::mneg(const Register& rd,
   1446                      const Register& rn,
   1447                      const Register& rm) {
   1448   DCHECK(AreSameSizeAndType(rd, rn, rm));
   1449   Register zr = AppropriateZeroRegFor(rn);
   1450   DataProcessing3Source(rd, rn, rm, zr, MSUB);
   1451 }
   1452 
   1453 
   1454 void Assembler::msub(const Register& rd,
   1455                      const Register& rn,
   1456                      const Register& rm,
   1457                      const Register& ra) {
   1458   DCHECK(AreSameSizeAndType(rd, rn, rm, ra));
   1459   DataProcessing3Source(rd, rn, rm, ra, MSUB);
   1460 }
   1461 
   1462 
   1463 void Assembler::smaddl(const Register& rd,
   1464                        const Register& rn,
   1465                        const Register& rm,
   1466                        const Register& ra) {
   1467   DCHECK(rd.Is64Bits() && ra.Is64Bits());
   1468   DCHECK(rn.Is32Bits() && rm.Is32Bits());
   1469   DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
   1470 }
   1471 
   1472 
   1473 void Assembler::smsubl(const Register& rd,
   1474                        const Register& rn,
   1475                        const Register& rm,
   1476                        const Register& ra) {
   1477   DCHECK(rd.Is64Bits() && ra.Is64Bits());
   1478   DCHECK(rn.Is32Bits() && rm.Is32Bits());
   1479   DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
   1480 }
   1481 
   1482 
   1483 void Assembler::umaddl(const Register& rd,
   1484                        const Register& rn,
   1485                        const Register& rm,
   1486                        const Register& ra) {
   1487   DCHECK(rd.Is64Bits() && ra.Is64Bits());
   1488   DCHECK(rn.Is32Bits() && rm.Is32Bits());
   1489   DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
   1490 }
   1491 
   1492 
   1493 void Assembler::umsubl(const Register& rd,
   1494                        const Register& rn,
   1495                        const Register& rm,
   1496                        const Register& ra) {
   1497   DCHECK(rd.Is64Bits() && ra.Is64Bits());
   1498   DCHECK(rn.Is32Bits() && rm.Is32Bits());
   1499   DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
   1500 }
   1501 
   1502 
   1503 void Assembler::smull(const Register& rd,
   1504                       const Register& rn,
   1505                       const Register& rm) {
   1506   DCHECK(rd.Is64Bits());
   1507   DCHECK(rn.Is32Bits() && rm.Is32Bits());
   1508   DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
   1509 }
   1510 
   1511 
   1512 void Assembler::smulh(const Register& rd,
   1513                       const Register& rn,
   1514                       const Register& rm) {
   1515   DCHECK(AreSameSizeAndType(rd, rn, rm));
   1516   DataProcessing3Source(rd, rn, rm, xzr, SMULH_x);
   1517 }
   1518 
   1519 
   1520 void Assembler::sdiv(const Register& rd,
   1521                      const Register& rn,
   1522                      const Register& rm) {
   1523   DCHECK(rd.SizeInBits() == rn.SizeInBits());
   1524   DCHECK(rd.SizeInBits() == rm.SizeInBits());
   1525   Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
   1526 }
   1527 
   1528 
   1529 void Assembler::udiv(const Register& rd,
   1530                      const Register& rn,
   1531                      const Register& rm) {
   1532   DCHECK(rd.SizeInBits() == rn.SizeInBits());
   1533   DCHECK(rd.SizeInBits() == rm.SizeInBits());
   1534   Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
   1535 }
   1536 
   1537 
   1538 void Assembler::rbit(const Register& rd,
   1539                      const Register& rn) {
   1540   DataProcessing1Source(rd, rn, RBIT);
   1541 }
   1542 
   1543 
   1544 void Assembler::rev16(const Register& rd,
   1545                       const Register& rn) {
   1546   DataProcessing1Source(rd, rn, REV16);
   1547 }
   1548 
   1549 
   1550 void Assembler::rev32(const Register& rd,
   1551                       const Register& rn) {
   1552   DCHECK(rd.Is64Bits());
   1553   DataProcessing1Source(rd, rn, REV);
   1554 }
   1555 
   1556 
   1557 void Assembler::rev(const Register& rd,
   1558                     const Register& rn) {
   1559   DataProcessing1Source(rd, rn, rd.Is64Bits() ? REV_x : REV_w);
   1560 }
   1561 
   1562 
   1563 void Assembler::clz(const Register& rd,
   1564                     const Register& rn) {
   1565   DataProcessing1Source(rd, rn, CLZ);
   1566 }
   1567 
   1568 
   1569 void Assembler::cls(const Register& rd,
   1570                     const Register& rn) {
   1571   DataProcessing1Source(rd, rn, CLS);
   1572 }
   1573 
   1574 
   1575 void Assembler::ldp(const CPURegister& rt,
   1576                     const CPURegister& rt2,
   1577                     const MemOperand& src) {
   1578   LoadStorePair(rt, rt2, src, LoadPairOpFor(rt, rt2));
   1579 }
   1580 
   1581 
   1582 void Assembler::stp(const CPURegister& rt,
   1583                     const CPURegister& rt2,
   1584                     const MemOperand& dst) {
   1585   LoadStorePair(rt, rt2, dst, StorePairOpFor(rt, rt2));
   1586 }
   1587 
   1588 
   1589 void Assembler::ldpsw(const Register& rt,
   1590                       const Register& rt2,
   1591                       const MemOperand& src) {
   1592   DCHECK(rt.Is64Bits());
   1593   LoadStorePair(rt, rt2, src, LDPSW_x);
   1594 }
   1595 
   1596 
   1597 void Assembler::LoadStorePair(const CPURegister& rt,
   1598                               const CPURegister& rt2,
   1599                               const MemOperand& addr,
   1600                               LoadStorePairOp op) {
   1601   // 'rt' and 'rt2' can only be aliased for stores.
   1602   DCHECK(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
   1603   DCHECK(AreSameSizeAndType(rt, rt2));
   1604   DCHECK(IsImmLSPair(addr.offset(), CalcLSPairDataSize(op)));
   1605   int offset = static_cast<int>(addr.offset());
   1606 
   1607   Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
   1608                 ImmLSPair(offset, CalcLSPairDataSize(op));
   1609 
   1610   Instr addrmodeop;
   1611   if (addr.IsImmediateOffset()) {
   1612     addrmodeop = LoadStorePairOffsetFixed;
   1613   } else {
   1614     // Pre-index and post-index modes.
   1615     DCHECK(!rt.Is(addr.base()));
   1616     DCHECK(!rt2.Is(addr.base()));
   1617     DCHECK(addr.offset() != 0);
   1618     if (addr.IsPreIndex()) {
   1619       addrmodeop = LoadStorePairPreIndexFixed;
   1620     } else {
   1621       DCHECK(addr.IsPostIndex());
   1622       addrmodeop = LoadStorePairPostIndexFixed;
   1623     }
   1624   }
   1625   Emit(addrmodeop | memop);
   1626 }
   1627 
   1628 
   1629 // Memory instructions.
   1630 void Assembler::ldrb(const Register& rt, const MemOperand& src) {
   1631   LoadStore(rt, src, LDRB_w);
   1632 }
   1633 
   1634 
   1635 void Assembler::strb(const Register& rt, const MemOperand& dst) {
   1636   LoadStore(rt, dst, STRB_w);
   1637 }
   1638 
   1639 
   1640 void Assembler::ldrsb(const Register& rt, const MemOperand& src) {
   1641   LoadStore(rt, src, rt.Is64Bits() ? LDRSB_x : LDRSB_w);
   1642 }
   1643 
   1644 
   1645 void Assembler::ldrh(const Register& rt, const MemOperand& src) {
   1646   LoadStore(rt, src, LDRH_w);
   1647 }
   1648 
   1649 
   1650 void Assembler::strh(const Register& rt, const MemOperand& dst) {
   1651   LoadStore(rt, dst, STRH_w);
   1652 }
   1653 
   1654 
   1655 void Assembler::ldrsh(const Register& rt, const MemOperand& src) {
   1656   LoadStore(rt, src, rt.Is64Bits() ? LDRSH_x : LDRSH_w);
   1657 }
   1658 
   1659 
   1660 void Assembler::ldr(const CPURegister& rt, const MemOperand& src) {
   1661   LoadStore(rt, src, LoadOpFor(rt));
   1662 }
   1663 
   1664 
   1665 void Assembler::str(const CPURegister& rt, const MemOperand& src) {
   1666   LoadStore(rt, src, StoreOpFor(rt));
   1667 }
   1668 
   1669 
   1670 void Assembler::ldrsw(const Register& rt, const MemOperand& src) {
   1671   DCHECK(rt.Is64Bits());
   1672   LoadStore(rt, src, LDRSW_x);
   1673 }
   1674 
   1675 
   1676 void Assembler::ldr_pcrel(const CPURegister& rt, int imm19) {
   1677   // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
   1678   // constant pool. It should not be emitted.
   1679   DCHECK(!rt.IsZero());
   1680   Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt));
   1681 }
   1682 
   1683 
   1684 void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
   1685   // Currently we only support 64-bit literals.
   1686   DCHECK(rt.Is64Bits());
   1687 
   1688   RecordRelocInfo(imm.rmode(), imm.value());
   1689   BlockConstPoolFor(1);
   1690   // The load will be patched when the constpool is emitted, patching code
   1691   // expect a load literal with offset 0.
   1692   ldr_pcrel(rt, 0);
   1693 }
   1694 
   1695 
   1696 void Assembler::mov(const Register& rd, const Register& rm) {
   1697   // Moves involving the stack pointer are encoded as add immediate with
   1698   // second operand of zero. Otherwise, orr with first operand zr is
   1699   // used.
   1700   if (rd.IsSP() || rm.IsSP()) {
   1701     add(rd, rm, 0);
   1702   } else {
   1703     orr(rd, AppropriateZeroRegFor(rd), rm);
   1704   }
   1705 }
   1706 
   1707 
   1708 void Assembler::mvn(const Register& rd, const Operand& operand) {
   1709   orn(rd, AppropriateZeroRegFor(rd), operand);
   1710 }
   1711 
   1712 
   1713 void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
   1714   DCHECK(rt.Is64Bits());
   1715   Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
   1716 }
   1717 
   1718 
   1719 void Assembler::msr(SystemRegister sysreg, const Register& rt) {
   1720   DCHECK(rt.Is64Bits());
   1721   Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
   1722 }
   1723 
   1724 
   1725 void Assembler::hint(SystemHint code) {
   1726   Emit(HINT | ImmHint(code) | Rt(xzr));
   1727 }
   1728 
   1729 
   1730 void Assembler::dmb(BarrierDomain domain, BarrierType type) {
   1731   Emit(DMB | ImmBarrierDomain(domain) | ImmBarrierType(type));
   1732 }
   1733 
   1734 
   1735 void Assembler::dsb(BarrierDomain domain, BarrierType type) {
   1736   Emit(DSB | ImmBarrierDomain(domain) | ImmBarrierType(type));
   1737 }
   1738 
   1739 
   1740 void Assembler::isb() {
   1741   Emit(ISB | ImmBarrierDomain(FullSystem) | ImmBarrierType(BarrierAll));
   1742 }
   1743 
   1744 
   1745 void Assembler::fmov(FPRegister fd, double imm) {
   1746   DCHECK(fd.Is64Bits());
   1747   DCHECK(IsImmFP64(imm));
   1748   Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm));
   1749 }
   1750 
   1751 
   1752 void Assembler::fmov(FPRegister fd, float imm) {
   1753   DCHECK(fd.Is32Bits());
   1754   DCHECK(IsImmFP32(imm));
   1755   Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm));
   1756 }
   1757 
   1758 
   1759 void Assembler::fmov(Register rd, FPRegister fn) {
   1760   DCHECK(rd.SizeInBits() == fn.SizeInBits());
   1761   FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
   1762   Emit(op | Rd(rd) | Rn(fn));
   1763 }
   1764 
   1765 
   1766 void Assembler::fmov(FPRegister fd, Register rn) {
   1767   DCHECK(fd.SizeInBits() == rn.SizeInBits());
   1768   FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx;
   1769   Emit(op | Rd(fd) | Rn(rn));
   1770 }
   1771 
   1772 
   1773 void Assembler::fmov(FPRegister fd, FPRegister fn) {
   1774   DCHECK(fd.SizeInBits() == fn.SizeInBits());
   1775   Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn));
   1776 }
   1777 
   1778 
   1779 void Assembler::fadd(const FPRegister& fd,
   1780                      const FPRegister& fn,
   1781                      const FPRegister& fm) {
   1782   FPDataProcessing2Source(fd, fn, fm, FADD);
   1783 }
   1784 
   1785 
   1786 void Assembler::fsub(const FPRegister& fd,
   1787                      const FPRegister& fn,
   1788                      const FPRegister& fm) {
   1789   FPDataProcessing2Source(fd, fn, fm, FSUB);
   1790 }
   1791 
   1792 
   1793 void Assembler::fmul(const FPRegister& fd,
   1794                      const FPRegister& fn,
   1795                      const FPRegister& fm) {
   1796   FPDataProcessing2Source(fd, fn, fm, FMUL);
   1797 }
   1798 
   1799 
   1800 void Assembler::fmadd(const FPRegister& fd,
   1801                       const FPRegister& fn,
   1802                       const FPRegister& fm,
   1803                       const FPRegister& fa) {
   1804   FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMADD_s : FMADD_d);
   1805 }
   1806 
   1807 
   1808 void Assembler::fmsub(const FPRegister& fd,
   1809                       const FPRegister& fn,
   1810                       const FPRegister& fm,
   1811                       const FPRegister& fa) {
   1812   FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FMSUB_s : FMSUB_d);
   1813 }
   1814 
   1815 
   1816 void Assembler::fnmadd(const FPRegister& fd,
   1817                        const FPRegister& fn,
   1818                        const FPRegister& fm,
   1819                        const FPRegister& fa) {
   1820   FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMADD_s : FNMADD_d);
   1821 }
   1822 
   1823 
   1824 void Assembler::fnmsub(const FPRegister& fd,
   1825                        const FPRegister& fn,
   1826                        const FPRegister& fm,
   1827                        const FPRegister& fa) {
   1828   FPDataProcessing3Source(fd, fn, fm, fa, fd.Is32Bits() ? FNMSUB_s : FNMSUB_d);
   1829 }
   1830 
   1831 
   1832 void Assembler::fdiv(const FPRegister& fd,
   1833                      const FPRegister& fn,
   1834                      const FPRegister& fm) {
   1835   FPDataProcessing2Source(fd, fn, fm, FDIV);
   1836 }
   1837 
   1838 
   1839 void Assembler::fmax(const FPRegister& fd,
   1840                      const FPRegister& fn,
   1841                      const FPRegister& fm) {
   1842   FPDataProcessing2Source(fd, fn, fm, FMAX);
   1843 }
   1844 
   1845 
   1846 void Assembler::fmaxnm(const FPRegister& fd,
   1847                        const FPRegister& fn,
   1848                        const FPRegister& fm) {
   1849   FPDataProcessing2Source(fd, fn, fm, FMAXNM);
   1850 }
   1851 
   1852 
   1853 void Assembler::fmin(const FPRegister& fd,
   1854                      const FPRegister& fn,
   1855                      const FPRegister& fm) {
   1856   FPDataProcessing2Source(fd, fn, fm, FMIN);
   1857 }
   1858 
   1859 
   1860 void Assembler::fminnm(const FPRegister& fd,
   1861                        const FPRegister& fn,
   1862                        const FPRegister& fm) {
   1863   FPDataProcessing2Source(fd, fn, fm, FMINNM);
   1864 }
   1865 
   1866 
   1867 void Assembler::fabs(const FPRegister& fd,
   1868                      const FPRegister& fn) {
   1869   DCHECK(fd.SizeInBits() == fn.SizeInBits());
   1870   FPDataProcessing1Source(fd, fn, FABS);
   1871 }
   1872 
   1873 
   1874 void Assembler::fneg(const FPRegister& fd,
   1875                      const FPRegister& fn) {
   1876   DCHECK(fd.SizeInBits() == fn.SizeInBits());
   1877   FPDataProcessing1Source(fd, fn, FNEG);
   1878 }
   1879 
   1880 
   1881 void Assembler::fsqrt(const FPRegister& fd,
   1882                       const FPRegister& fn) {
   1883   DCHECK(fd.SizeInBits() == fn.SizeInBits());
   1884   FPDataProcessing1Source(fd, fn, FSQRT);
   1885 }
   1886 
   1887 
   1888 void Assembler::frinta(const FPRegister& fd,
   1889                        const FPRegister& fn) {
   1890   DCHECK(fd.SizeInBits() == fn.SizeInBits());
   1891   FPDataProcessing1Source(fd, fn, FRINTA);
   1892 }
   1893 
   1894 
   1895 void Assembler::frintm(const FPRegister& fd,
   1896                        const FPRegister& fn) {
   1897   DCHECK(fd.SizeInBits() == fn.SizeInBits());
   1898   FPDataProcessing1Source(fd, fn, FRINTM);
   1899 }
   1900 
   1901 
   1902 void Assembler::frintn(const FPRegister& fd,
   1903                        const FPRegister& fn) {
   1904   DCHECK(fd.SizeInBits() == fn.SizeInBits());
   1905   FPDataProcessing1Source(fd, fn, FRINTN);
   1906 }
   1907 
   1908 
   1909 void Assembler::frintp(const FPRegister& fd, const FPRegister& fn) {
   1910   DCHECK(fd.SizeInBits() == fn.SizeInBits());
   1911   FPDataProcessing1Source(fd, fn, FRINTP);
   1912 }
   1913 
   1914 
   1915 void Assembler::frintz(const FPRegister& fd,
   1916                        const FPRegister& fn) {
   1917   DCHECK(fd.SizeInBits() == fn.SizeInBits());
   1918   FPDataProcessing1Source(fd, fn, FRINTZ);
   1919 }
   1920 
   1921 
   1922 void Assembler::fcmp(const FPRegister& fn,
   1923                      const FPRegister& fm) {
   1924   DCHECK(fn.SizeInBits() == fm.SizeInBits());
   1925   Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
   1926 }
   1927 
   1928 
   1929 void Assembler::fcmp(const FPRegister& fn,
   1930                      double value) {
   1931   USE(value);
   1932   // Although the fcmp instruction can strictly only take an immediate value of
   1933   // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
   1934   // affect the result of the comparison.
   1935   DCHECK(value == 0.0);
   1936   Emit(FPType(fn) | FCMP_zero | Rn(fn));
   1937 }
   1938 
   1939 
   1940 void Assembler::fccmp(const FPRegister& fn,
   1941                       const FPRegister& fm,
   1942                       StatusFlags nzcv,
   1943                       Condition cond) {
   1944   DCHECK(fn.SizeInBits() == fm.SizeInBits());
   1945   Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
   1946 }
   1947 
   1948 
   1949 void Assembler::fcsel(const FPRegister& fd,
   1950                       const FPRegister& fn,
   1951                       const FPRegister& fm,
   1952                       Condition cond) {
   1953   DCHECK(fd.SizeInBits() == fn.SizeInBits());
   1954   DCHECK(fd.SizeInBits() == fm.SizeInBits());
   1955   Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
   1956 }
   1957 
   1958 
   1959 void Assembler::FPConvertToInt(const Register& rd,
   1960                                const FPRegister& fn,
   1961                                FPIntegerConvertOp op) {
   1962   Emit(SF(rd) | FPType(fn) | op | Rn(fn) | Rd(rd));
   1963 }
   1964 
   1965 
   1966 void Assembler::fcvt(const FPRegister& fd,
   1967                      const FPRegister& fn) {
   1968   if (fd.Is64Bits()) {
   1969     // Convert float to double.
   1970     DCHECK(fn.Is32Bits());
   1971     FPDataProcessing1Source(fd, fn, FCVT_ds);
   1972   } else {
   1973     // Convert double to float.
   1974     DCHECK(fn.Is64Bits());
   1975     FPDataProcessing1Source(fd, fn, FCVT_sd);
   1976   }
   1977 }
   1978 
   1979 
   1980 void Assembler::fcvtau(const Register& rd, const FPRegister& fn) {
   1981   FPConvertToInt(rd, fn, FCVTAU);
   1982 }
   1983 
   1984 
   1985 void Assembler::fcvtas(const Register& rd, const FPRegister& fn) {
   1986   FPConvertToInt(rd, fn, FCVTAS);
   1987 }
   1988 
   1989 
   1990 void Assembler::fcvtmu(const Register& rd, const FPRegister& fn) {
   1991   FPConvertToInt(rd, fn, FCVTMU);
   1992 }
   1993 
   1994 
   1995 void Assembler::fcvtms(const Register& rd, const FPRegister& fn) {
   1996   FPConvertToInt(rd, fn, FCVTMS);
   1997 }
   1998 
   1999 
   2000 void Assembler::fcvtnu(const Register& rd, const FPRegister& fn) {
   2001   FPConvertToInt(rd, fn, FCVTNU);
   2002 }
   2003 
   2004 
   2005 void Assembler::fcvtns(const Register& rd, const FPRegister& fn) {
   2006   FPConvertToInt(rd, fn, FCVTNS);
   2007 }
   2008 
   2009 
   2010 void Assembler::fcvtzu(const Register& rd, const FPRegister& fn) {
   2011   FPConvertToInt(rd, fn, FCVTZU);
   2012 }
   2013 
   2014 
   2015 void Assembler::fcvtzs(const Register& rd, const FPRegister& fn) {
   2016   FPConvertToInt(rd, fn, FCVTZS);
   2017 }
   2018 
   2019 
   2020 void Assembler::scvtf(const FPRegister& fd,
   2021                       const Register& rn,
   2022                       unsigned fbits) {
   2023   if (fbits == 0) {
   2024     Emit(SF(rn) | FPType(fd) | SCVTF | Rn(rn) | Rd(fd));
   2025   } else {
   2026     Emit(SF(rn) | FPType(fd) | SCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
   2027          Rd(fd));
   2028   }
   2029 }
   2030 
   2031 
   2032 void Assembler::ucvtf(const FPRegister& fd,
   2033                       const Register& rn,
   2034                       unsigned fbits) {
   2035   if (fbits == 0) {
   2036     Emit(SF(rn) | FPType(fd) | UCVTF | Rn(rn) | Rd(fd));
   2037   } else {
   2038     Emit(SF(rn) | FPType(fd) | UCVTF_fixed | FPScale(64 - fbits) | Rn(rn) |
   2039          Rd(fd));
   2040   }
   2041 }
   2042 
   2043 
   2044 void Assembler::dcptr(Label* label) {
   2045   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
   2046   if (label->is_bound()) {
   2047     // The label is bound, so it does not need to be updated and the internal
   2048     // reference should be emitted.
   2049     //
   2050     // In this case, label->pos() returns the offset of the label from the
   2051     // start of the buffer.
   2052     internal_reference_positions_.push_back(pc_offset());
   2053     dc64(reinterpret_cast<uintptr_t>(buffer_ + label->pos()));
   2054   } else {
   2055     int32_t offset;
   2056     if (label->is_linked()) {
   2057       // The label is linked, so the internal reference should be added
   2058       // onto the end of the label's link chain.
   2059       //
   2060       // In this case, label->pos() returns the offset of the last linked
   2061       // instruction from the start of the buffer.
   2062       offset = label->pos() - pc_offset();
   2063       DCHECK(offset != kStartOfLabelLinkChain);
   2064     } else {
   2065       // The label is unused, so it now becomes linked and the internal
   2066       // reference is at the start of the new link chain.
   2067       offset = kStartOfLabelLinkChain;
   2068     }
   2069     // The instruction at pc is now the last link in the label's chain.
   2070     label->link_to(pc_offset());
   2071 
   2072     // Traditionally the offset to the previous instruction in the chain is
   2073     // encoded in the instruction payload (e.g. branch range) but internal
   2074     // references are not instructions so while unbound they are encoded as
   2075     // two consecutive brk instructions. The two 16-bit immediates are used
   2076     // to encode the offset.
   2077     offset >>= kInstructionSizeLog2;
   2078     DCHECK(is_int32(offset));
   2079     uint32_t high16 = unsigned_bitextract_32(31, 16, offset);
   2080     uint32_t low16 = unsigned_bitextract_32(15, 0, offset);
   2081 
   2082     brk(high16);
   2083     brk(low16);
   2084   }
   2085 }
   2086 
   2087 
   2088 // Note:
   2089 // Below, a difference in case for the same letter indicates a
   2090 // negated bit.
   2091 // If b is 1, then B is 0.
   2092 Instr Assembler::ImmFP32(float imm) {
   2093   DCHECK(IsImmFP32(imm));
   2094   // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
   2095   uint32_t bits = float_to_rawbits(imm);
   2096   // bit7: a000.0000
   2097   uint32_t bit7 = ((bits >> 31) & 0x1) << 7;
   2098   // bit6: 0b00.0000
   2099   uint32_t bit6 = ((bits >> 29) & 0x1) << 6;
   2100   // bit5_to_0: 00cd.efgh
   2101   uint32_t bit5_to_0 = (bits >> 19) & 0x3f;
   2102 
   2103   return (bit7 | bit6 | bit5_to_0) << ImmFP_offset;
   2104 }
   2105 
   2106 
   2107 Instr Assembler::ImmFP64(double imm) {
   2108   DCHECK(IsImmFP64(imm));
   2109   // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
   2110   //       0000.0000.0000.0000.0000.0000.0000.0000
   2111   uint64_t bits = double_to_rawbits(imm);
   2112   // bit7: a000.0000
   2113   uint64_t bit7 = ((bits >> 63) & 0x1) << 7;
   2114   // bit6: 0b00.0000
   2115   uint64_t bit6 = ((bits >> 61) & 0x1) << 6;
   2116   // bit5_to_0: 00cd.efgh
   2117   uint64_t bit5_to_0 = (bits >> 48) & 0x3f;
   2118 
   2119   return static_cast<Instr>((bit7 | bit6 | bit5_to_0) << ImmFP_offset);
   2120 }
   2121 
   2122 
   2123 // Code generation helpers.
   2124 void Assembler::MoveWide(const Register& rd,
   2125                          uint64_t imm,
   2126                          int shift,
   2127                          MoveWideImmediateOp mov_op) {
   2128   // Ignore the top 32 bits of an immediate if we're moving to a W register.
   2129   if (rd.Is32Bits()) {
   2130     // Check that the top 32 bits are zero (a positive 32-bit number) or top
   2131     // 33 bits are one (a negative 32-bit number, sign extended to 64 bits).
   2132     DCHECK(((imm >> kWRegSizeInBits) == 0) ||
   2133            ((imm >> (kWRegSizeInBits - 1)) == 0x1ffffffff));
   2134     imm &= kWRegMask;
   2135   }
   2136 
   2137   if (shift >= 0) {
   2138     // Explicit shift specified.
   2139     DCHECK((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
   2140     DCHECK(rd.Is64Bits() || (shift == 0) || (shift == 16));
   2141     shift /= 16;
   2142   } else {
   2143     // Calculate a new immediate and shift combination to encode the immediate
   2144     // argument.
   2145     shift = 0;
   2146     if ((imm & ~0xffffUL) == 0) {
   2147       // Nothing to do.
   2148     } else if ((imm & ~(0xffffUL << 16)) == 0) {
   2149       imm >>= 16;
   2150       shift = 1;
   2151     } else if ((imm & ~(0xffffUL << 32)) == 0) {
   2152       DCHECK(rd.Is64Bits());
   2153       imm >>= 32;
   2154       shift = 2;
   2155     } else if ((imm & ~(0xffffUL << 48)) == 0) {
   2156       DCHECK(rd.Is64Bits());
   2157       imm >>= 48;
   2158       shift = 3;
   2159     }
   2160   }
   2161 
   2162   DCHECK(is_uint16(imm));
   2163 
   2164   Emit(SF(rd) | MoveWideImmediateFixed | mov_op | Rd(rd) |
   2165        ImmMoveWide(static_cast<int>(imm)) | ShiftMoveWide(shift));
   2166 }
   2167 
   2168 
   2169 void Assembler::AddSub(const Register& rd,
   2170                        const Register& rn,
   2171                        const Operand& operand,
   2172                        FlagsUpdate S,
   2173                        AddSubOp op) {
   2174   DCHECK(rd.SizeInBits() == rn.SizeInBits());
   2175   DCHECK(!operand.NeedsRelocation(this));
   2176   if (operand.IsImmediate()) {
   2177     int64_t immediate = operand.ImmediateValue();
   2178     DCHECK(IsImmAddSub(immediate));
   2179     Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
   2180     Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
   2181          ImmAddSub(static_cast<int>(immediate)) | dest_reg | RnSP(rn));
   2182   } else if (operand.IsShiftedRegister()) {
   2183     DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
   2184     DCHECK(operand.shift() != ROR);
   2185 
   2186     // For instructions of the form:
   2187     //   add/sub   wsp, <Wn>, <Wm> [, LSL #0-3 ]
   2188     //   add/sub   <Wd>, wsp, <Wm> [, LSL #0-3 ]
   2189     //   add/sub   wsp, wsp, <Wm> [, LSL #0-3 ]
   2190     //   adds/subs <Wd>, wsp, <Wm> [, LSL #0-3 ]
   2191     // or their 64-bit register equivalents, convert the operand from shifted to
   2192     // extended register mode, and emit an add/sub extended instruction.
   2193     if (rn.IsSP() || rd.IsSP()) {
   2194       DCHECK(!(rd.IsSP() && (S == SetFlags)));
   2195       DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
   2196                                AddSubExtendedFixed | op);
   2197     } else {
   2198       DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
   2199     }
   2200   } else {
   2201     DCHECK(operand.IsExtendedRegister());
   2202     DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
   2203   }
   2204 }
   2205 
   2206 
   2207 void Assembler::AddSubWithCarry(const Register& rd,
   2208                                 const Register& rn,
   2209                                 const Operand& operand,
   2210                                 FlagsUpdate S,
   2211                                 AddSubWithCarryOp op) {
   2212   DCHECK(rd.SizeInBits() == rn.SizeInBits());
   2213   DCHECK(rd.SizeInBits() == operand.reg().SizeInBits());
   2214   DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
   2215   DCHECK(!operand.NeedsRelocation(this));
   2216   Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
   2217 }
   2218 
   2219 
   2220 void Assembler::hlt(int code) {
   2221   DCHECK(is_uint16(code));
   2222   Emit(HLT | ImmException(code));
   2223 }
   2224 
   2225 
   2226 void Assembler::brk(int code) {
   2227   DCHECK(is_uint16(code));
   2228   Emit(BRK | ImmException(code));
   2229 }
   2230 
   2231 
   2232 void Assembler::EmitStringData(const char* string) {
   2233   size_t len = strlen(string) + 1;
   2234   DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
   2235   EmitData(string, static_cast<int>(len));
   2236   // Pad with NULL characters until pc_ is aligned.
   2237   const char pad[] = {'\0', '\0', '\0', '\0'};
   2238   STATIC_ASSERT(sizeof(pad) == kInstructionSize);
   2239   EmitData(pad, RoundUp(pc_offset(), kInstructionSize) - pc_offset());
   2240 }
   2241 
   2242 
   2243 void Assembler::debug(const char* message, uint32_t code, Instr params) {
   2244 #ifdef USE_SIMULATOR
   2245   // Don't generate simulator specific code if we are building a snapshot, which
   2246   // might be run on real hardware.
   2247   if (!serializer_enabled()) {
   2248     // The arguments to the debug marker need to be contiguous in memory, so
   2249     // make sure we don't try to emit pools.
   2250     BlockPoolsScope scope(this);
   2251 
   2252     Label start;
   2253     bind(&start);
   2254 
   2255     // Refer to instructions-arm64.h for a description of the marker and its
   2256     // arguments.
   2257     hlt(kImmExceptionIsDebug);
   2258     DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset);
   2259     dc32(code);
   2260     DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset);
   2261     dc32(params);
   2262     DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset);
   2263     EmitStringData(message);
   2264     hlt(kImmExceptionIsUnreachable);
   2265 
   2266     return;
   2267   }
   2268   // Fall through if Serializer is enabled.
   2269 #endif
   2270 
   2271   if (params & BREAK) {
   2272     hlt(kImmExceptionIsDebug);
   2273   }
   2274 }
   2275 
   2276 
   2277 void Assembler::Logical(const Register& rd,
   2278                         const Register& rn,
   2279                         const Operand& operand,
   2280                         LogicalOp op) {
   2281   DCHECK(rd.SizeInBits() == rn.SizeInBits());
   2282   DCHECK(!operand.NeedsRelocation(this));
   2283   if (operand.IsImmediate()) {
   2284     int64_t immediate = operand.ImmediateValue();
   2285     unsigned reg_size = rd.SizeInBits();
   2286 
   2287     DCHECK(immediate != 0);
   2288     DCHECK(immediate != -1);
   2289     DCHECK(rd.Is64Bits() || is_uint32(immediate));
   2290 
   2291     // If the operation is NOT, invert the operation and immediate.
   2292     if ((op & NOT) == NOT) {
   2293       op = static_cast<LogicalOp>(op & ~NOT);
   2294       immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
   2295     }
   2296 
   2297     unsigned n, imm_s, imm_r;
   2298     if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) {
   2299       // Immediate can be encoded in the instruction.
   2300       LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
   2301     } else {
   2302       // This case is handled in the macro assembler.
   2303       UNREACHABLE();
   2304     }
   2305   } else {
   2306     DCHECK(operand.IsShiftedRegister());
   2307     DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
   2308     Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
   2309     DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
   2310   }
   2311 }
   2312 
   2313 
   2314 void Assembler::LogicalImmediate(const Register& rd,
   2315                                  const Register& rn,
   2316                                  unsigned n,
   2317                                  unsigned imm_s,
   2318                                  unsigned imm_r,
   2319                                  LogicalOp op) {
   2320   unsigned reg_size = rd.SizeInBits();
   2321   Instr dest_reg = (op == ANDS) ? Rd(rd) : RdSP(rd);
   2322   Emit(SF(rd) | LogicalImmediateFixed | op | BitN(n, reg_size) |
   2323        ImmSetBits(imm_s, reg_size) | ImmRotate(imm_r, reg_size) | dest_reg |
   2324        Rn(rn));
   2325 }
   2326 
   2327 
   2328 void Assembler::ConditionalCompare(const Register& rn,
   2329                                    const Operand& operand,
   2330                                    StatusFlags nzcv,
   2331                                    Condition cond,
   2332                                    ConditionalCompareOp op) {
   2333   Instr ccmpop;
   2334   DCHECK(!operand.NeedsRelocation(this));
   2335   if (operand.IsImmediate()) {
   2336     int64_t immediate = operand.ImmediateValue();
   2337     DCHECK(IsImmConditionalCompare(immediate));
   2338     ccmpop = ConditionalCompareImmediateFixed | op |
   2339              ImmCondCmp(static_cast<unsigned>(immediate));
   2340   } else {
   2341     DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
   2342     ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
   2343   }
   2344   Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
   2345 }
   2346 
   2347 
   2348 void Assembler::DataProcessing1Source(const Register& rd,
   2349                                       const Register& rn,
   2350                                       DataProcessing1SourceOp op) {
   2351   DCHECK(rd.SizeInBits() == rn.SizeInBits());
   2352   Emit(SF(rn) | op | Rn(rn) | Rd(rd));
   2353 }
   2354 
   2355 
   2356 void Assembler::FPDataProcessing1Source(const FPRegister& fd,
   2357                                         const FPRegister& fn,
   2358                                         FPDataProcessing1SourceOp op) {
   2359   Emit(FPType(fn) | op | Rn(fn) | Rd(fd));
   2360 }
   2361 
   2362 
   2363 void Assembler::FPDataProcessing2Source(const FPRegister& fd,
   2364                                         const FPRegister& fn,
   2365                                         const FPRegister& fm,
   2366                                         FPDataProcessing2SourceOp op) {
   2367   DCHECK(fd.SizeInBits() == fn.SizeInBits());
   2368   DCHECK(fd.SizeInBits() == fm.SizeInBits());
   2369   Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
   2370 }
   2371 
   2372 
   2373 void Assembler::FPDataProcessing3Source(const FPRegister& fd,
   2374                                         const FPRegister& fn,
   2375                                         const FPRegister& fm,
   2376                                         const FPRegister& fa,
   2377                                         FPDataProcessing3SourceOp op) {
   2378   DCHECK(AreSameSizeAndType(fd, fn, fm, fa));
   2379   Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
   2380 }
   2381 
   2382 
   2383 void Assembler::EmitShift(const Register& rd,
   2384                           const Register& rn,
   2385                           Shift shift,
   2386                           unsigned shift_amount) {
   2387   switch (shift) {
   2388     case LSL:
   2389       lsl(rd, rn, shift_amount);
   2390       break;
   2391     case LSR:
   2392       lsr(rd, rn, shift_amount);
   2393       break;
   2394     case ASR:
   2395       asr(rd, rn, shift_amount);
   2396       break;
   2397     case ROR:
   2398       ror(rd, rn, shift_amount);
   2399       break;
   2400     default:
   2401       UNREACHABLE();
   2402   }
   2403 }
   2404 
   2405 
   2406 void Assembler::EmitExtendShift(const Register& rd,
   2407                                 const Register& rn,
   2408                                 Extend extend,
   2409                                 unsigned left_shift) {
   2410   DCHECK(rd.SizeInBits() >= rn.SizeInBits());
   2411   unsigned reg_size = rd.SizeInBits();
   2412   // Use the correct size of register.
   2413   Register rn_ = Register::Create(rn.code(), rd.SizeInBits());
   2414   // Bits extracted are high_bit:0.
   2415   unsigned high_bit = (8 << (extend & 0x3)) - 1;
   2416   // Number of bits left in the result that are not introduced by the shift.
   2417   unsigned non_shift_bits = (reg_size - left_shift) & (reg_size - 1);
   2418 
   2419   if ((non_shift_bits > high_bit) || (non_shift_bits == 0)) {
   2420     switch (extend) {
   2421       case UXTB:
   2422       case UXTH:
   2423       case UXTW: ubfm(rd, rn_, non_shift_bits, high_bit); break;
   2424       case SXTB:
   2425       case SXTH:
   2426       case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
   2427       case UXTX:
   2428       case SXTX: {
   2429         DCHECK(rn.SizeInBits() == kXRegSizeInBits);
   2430         // Nothing to extend. Just shift.
   2431         lsl(rd, rn_, left_shift);
   2432         break;
   2433       }
   2434       default: UNREACHABLE();
   2435     }
   2436   } else {
   2437     // No need to extend as the extended bits would be shifted away.
   2438     lsl(rd, rn_, left_shift);
   2439   }
   2440 }
   2441 
   2442 
   2443 void Assembler::DataProcShiftedRegister(const Register& rd,
   2444                                         const Register& rn,
   2445                                         const Operand& operand,
   2446                                         FlagsUpdate S,
   2447                                         Instr op) {
   2448   DCHECK(operand.IsShiftedRegister());
   2449   DCHECK(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
   2450   DCHECK(!operand.NeedsRelocation(this));
   2451   Emit(SF(rd) | op | Flags(S) |
   2452        ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
   2453        Rm(operand.reg()) | Rn(rn) | Rd(rd));
   2454 }
   2455 
   2456 
   2457 void Assembler::DataProcExtendedRegister(const Register& rd,
   2458                                          const Register& rn,
   2459                                          const Operand& operand,
   2460                                          FlagsUpdate S,
   2461                                          Instr op) {
   2462   DCHECK(!operand.NeedsRelocation(this));
   2463   Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
   2464   Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
   2465        ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
   2466        dest_reg | RnSP(rn));
   2467 }
   2468 
   2469 
   2470 bool Assembler::IsImmAddSub(int64_t immediate) {
   2471   return is_uint12(immediate) ||
   2472          (is_uint12(immediate >> 12) && ((immediate & 0xfff) == 0));
   2473 }
   2474 
   2475 void Assembler::LoadStore(const CPURegister& rt,
   2476                           const MemOperand& addr,
   2477                           LoadStoreOp op) {
   2478   Instr memop = op | Rt(rt) | RnSP(addr.base());
   2479 
   2480   if (addr.IsImmediateOffset()) {
   2481     LSDataSize size = CalcLSDataSize(op);
   2482     if (IsImmLSScaled(addr.offset(), size)) {
   2483       int offset = static_cast<int>(addr.offset());
   2484       // Use the scaled addressing mode.
   2485       Emit(LoadStoreUnsignedOffsetFixed | memop |
   2486            ImmLSUnsigned(offset >> size));
   2487     } else if (IsImmLSUnscaled(addr.offset())) {
   2488       int offset = static_cast<int>(addr.offset());
   2489       // Use the unscaled addressing mode.
   2490       Emit(LoadStoreUnscaledOffsetFixed | memop | ImmLS(offset));
   2491     } else {
   2492       // This case is handled in the macro assembler.
   2493       UNREACHABLE();
   2494     }
   2495   } else if (addr.IsRegisterOffset()) {
   2496     Extend ext = addr.extend();
   2497     Shift shift = addr.shift();
   2498     unsigned shift_amount = addr.shift_amount();
   2499 
   2500     // LSL is encoded in the option field as UXTX.
   2501     if (shift == LSL) {
   2502       ext = UXTX;
   2503     }
   2504 
   2505     // Shifts are encoded in one bit, indicating a left shift by the memory
   2506     // access size.
   2507     DCHECK((shift_amount == 0) ||
   2508            (shift_amount == static_cast<unsigned>(CalcLSDataSize(op))));
   2509     Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
   2510          ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
   2511   } else {
   2512     // Pre-index and post-index modes.
   2513     DCHECK(!rt.Is(addr.base()));
   2514     if (IsImmLSUnscaled(addr.offset())) {
   2515       int offset = static_cast<int>(addr.offset());
   2516       if (addr.IsPreIndex()) {
   2517         Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
   2518       } else {
   2519         DCHECK(addr.IsPostIndex());
   2520         Emit(LoadStorePostIndexFixed | memop | ImmLS(offset));
   2521       }
   2522     } else {
   2523       // This case is handled in the macro assembler.
   2524       UNREACHABLE();
   2525     }
   2526   }
   2527 }
   2528 
   2529 
   2530 bool Assembler::IsImmLSUnscaled(int64_t offset) {
   2531   return is_int9(offset);
   2532 }
   2533 
   2534 
   2535 bool Assembler::IsImmLSScaled(int64_t offset, LSDataSize size) {
   2536   bool offset_is_size_multiple = (((offset >> size) << size) == offset);
   2537   return offset_is_size_multiple && is_uint12(offset >> size);
   2538 }
   2539 
   2540 
   2541 bool Assembler::IsImmLSPair(int64_t offset, LSDataSize size) {
   2542   bool offset_is_size_multiple = (((offset >> size) << size) == offset);
   2543   return offset_is_size_multiple && is_int7(offset >> size);
   2544 }
   2545 
   2546 
   2547 bool Assembler::IsImmLLiteral(int64_t offset) {
   2548   int inst_size = static_cast<int>(kInstructionSizeLog2);
   2549   bool offset_is_inst_multiple =
   2550       (((offset >> inst_size) << inst_size) == offset);
   2551   return offset_is_inst_multiple && is_intn(offset, ImmLLiteral_width);
   2552 }
   2553 
   2554 
   2555 // Test if a given value can be encoded in the immediate field of a logical
   2556 // instruction.
   2557 // If it can be encoded, the function returns true, and values pointed to by n,
   2558 // imm_s and imm_r are updated with immediates encoded in the format required
   2559 // by the corresponding fields in the logical instruction.
   2560 // If it can not be encoded, the function returns false, and the values pointed
   2561 // to by n, imm_s and imm_r are undefined.
   2562 bool Assembler::IsImmLogical(uint64_t value,
   2563                              unsigned width,
   2564                              unsigned* n,
   2565                              unsigned* imm_s,
   2566                              unsigned* imm_r) {
   2567   DCHECK((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
   2568   DCHECK((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
   2569 
   2570   bool negate = false;
   2571 
   2572   // Logical immediates are encoded using parameters n, imm_s and imm_r using
   2573   // the following table:
   2574   //
   2575   //    N   imms    immr    size        S             R
   2576   //    1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr)
   2577   //    0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr)
   2578   //    0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr)
   2579   //    0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr)
   2580   //    0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr)
   2581   //    0  11110s  xxxxxr     2    UInt(s)       UInt(r)
   2582   // (s bits must not be all set)
   2583   //
   2584   // A pattern is constructed of size bits, where the least significant S+1 bits
   2585   // are set. The pattern is rotated right by R, and repeated across a 32 or
   2586   // 64-bit value, depending on destination register width.
   2587   //
   2588   // Put another way: the basic format of a logical immediate is a single
   2589   // contiguous stretch of 1 bits, repeated across the whole word at intervals
   2590   // given by a power of 2. To identify them quickly, we first locate the
   2591   // lowest stretch of 1 bits, then the next 1 bit above that; that combination
   2592   // is different for every logical immediate, so it gives us all the
   2593   // information we need to identify the only logical immediate that our input
   2594   // could be, and then we simply check if that's the value we actually have.
   2595   //
   2596   // (The rotation parameter does give the possibility of the stretch of 1 bits
   2597   // going 'round the end' of the word. To deal with that, we observe that in
   2598   // any situation where that happens the bitwise NOT of the value is also a
   2599   // valid logical immediate. So we simply invert the input whenever its low bit
   2600   // is set, and then we know that the rotated case can't arise.)
   2601 
   2602   if (value & 1) {
   2603     // If the low bit is 1, negate the value, and set a flag to remember that we
   2604     // did (so that we can adjust the return values appropriately).
   2605     negate = true;
   2606     value = ~value;
   2607   }
   2608 
   2609   if (width == kWRegSizeInBits) {
   2610     // To handle 32-bit logical immediates, the very easiest thing is to repeat
   2611     // the input value twice to make a 64-bit word. The correct encoding of that
   2612     // as a logical immediate will also be the correct encoding of the 32-bit
   2613     // value.
   2614 
   2615     // The most-significant 32 bits may not be zero (ie. negate is true) so
   2616     // shift the value left before duplicating it.
   2617     value <<= kWRegSizeInBits;
   2618     value |= value >> kWRegSizeInBits;
   2619   }
   2620 
   2621   // The basic analysis idea: imagine our input word looks like this.
   2622   //
   2623   //    0011111000111110001111100011111000111110001111100011111000111110
   2624   //                                                          c  b    a
   2625   //                                                          |<--d-->|
   2626   //
   2627   // We find the lowest set bit (as an actual power-of-2 value, not its index)
   2628   // and call it a. Then we add a to our original number, which wipes out the
   2629   // bottommost stretch of set bits and replaces it with a 1 carried into the
   2630   // next zero bit. Then we look for the new lowest set bit, which is in
   2631   // position b, and subtract it, so now our number is just like the original
   2632   // but with the lowest stretch of set bits completely gone. Now we find the
   2633   // lowest set bit again, which is position c in the diagram above. Then we'll
   2634   // measure the distance d between bit positions a and c (using CLZ), and that
   2635   // tells us that the only valid logical immediate that could possibly be equal
   2636   // to this number is the one in which a stretch of bits running from a to just
   2637   // below b is replicated every d bits.
   2638   uint64_t a = LargestPowerOf2Divisor(value);
   2639   uint64_t value_plus_a = value + a;
   2640   uint64_t b = LargestPowerOf2Divisor(value_plus_a);
   2641   uint64_t value_plus_a_minus_b = value_plus_a - b;
   2642   uint64_t c = LargestPowerOf2Divisor(value_plus_a_minus_b);
   2643 
   2644   int d, clz_a, out_n;
   2645   uint64_t mask;
   2646 
   2647   if (c != 0) {
   2648     // The general case, in which there is more than one stretch of set bits.
   2649     // Compute the repeat distance d, and set up a bitmask covering the basic
   2650     // unit of repetition (i.e. a word with the bottom d bits set). Also, in all
   2651     // of these cases the N bit of the output will be zero.
   2652     clz_a = CountLeadingZeros(a, kXRegSizeInBits);
   2653     int clz_c = CountLeadingZeros(c, kXRegSizeInBits);
   2654     d = clz_a - clz_c;
   2655     mask = ((V8_UINT64_C(1) << d) - 1);
   2656     out_n = 0;
   2657   } else {
   2658     // Handle degenerate cases.
   2659     //
   2660     // If any of those 'find lowest set bit' operations didn't find a set bit at
   2661     // all, then the word will have been zero thereafter, so in particular the
   2662     // last lowest_set_bit operation will have returned zero. So we can test for
   2663     // all the special case conditions in one go by seeing if c is zero.
   2664     if (a == 0) {
   2665       // The input was zero (or all 1 bits, which will come to here too after we
   2666       // inverted it at the start of the function), for which we just return
   2667       // false.
   2668       return false;
   2669     } else {
   2670       // Otherwise, if c was zero but a was not, then there's just one stretch
   2671       // of set bits in our word, meaning that we have the trivial case of
   2672       // d == 64 and only one 'repetition'. Set up all the same variables as in
   2673       // the general case above, and set the N bit in the output.
   2674       clz_a = CountLeadingZeros(a, kXRegSizeInBits);
   2675       d = 64;
   2676       mask = ~V8_UINT64_C(0);
   2677       out_n = 1;
   2678     }
   2679   }
   2680 
   2681   // If the repeat period d is not a power of two, it can't be encoded.
   2682   if (!IS_POWER_OF_TWO(d)) {
   2683     return false;
   2684   }
   2685 
   2686   if (((b - a) & ~mask) != 0) {
   2687     // If the bit stretch (b - a) does not fit within the mask derived from the
   2688     // repeat period, then fail.
   2689     return false;
   2690   }
   2691 
   2692   // The only possible option is b - a repeated every d bits. Now we're going to
   2693   // actually construct the valid logical immediate derived from that
   2694   // specification, and see if it equals our original input.
   2695   //
   2696   // To repeat a value every d bits, we multiply it by a number of the form
   2697   // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can
   2698   // be derived using a table lookup on CLZ(d).
   2699   static const uint64_t multipliers[] = {
   2700     0x0000000000000001UL,
   2701     0x0000000100000001UL,
   2702     0x0001000100010001UL,
   2703     0x0101010101010101UL,
   2704     0x1111111111111111UL,
   2705     0x5555555555555555UL,
   2706   };
   2707   int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57;
   2708   // Ensure that the index to the multipliers array is within bounds.
   2709   DCHECK((multiplier_idx >= 0) &&
   2710          (static_cast<size_t>(multiplier_idx) < arraysize(multipliers)));
   2711   uint64_t multiplier = multipliers[multiplier_idx];
   2712   uint64_t candidate = (b - a) * multiplier;
   2713 
   2714   if (value != candidate) {
   2715     // The candidate pattern doesn't match our input value, so fail.
   2716     return false;
   2717   }
   2718 
   2719   // We have a match! This is a valid logical immediate, so now we have to
   2720   // construct the bits and pieces of the instruction encoding that generates
   2721   // it.
   2722 
   2723   // Count the set bits in our basic stretch. The special case of clz(0) == -1
   2724   // makes the answer come out right for stretches that reach the very top of
   2725   // the word (e.g. numbers like 0xffffc00000000000).
   2726   int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSizeInBits);
   2727   int s = clz_a - clz_b;
   2728 
   2729   // Decide how many bits to rotate right by, to put the low bit of that basic
   2730   // stretch in position a.
   2731   int r;
   2732   if (negate) {
   2733     // If we inverted the input right at the start of this function, here's
   2734     // where we compensate: the number of set bits becomes the number of clear
   2735     // bits, and the rotation count is based on position b rather than position
   2736     // a (since b is the location of the 'lowest' 1 bit after inversion).
   2737     s = d - s;
   2738     r = (clz_b + 1) & (d - 1);
   2739   } else {
   2740     r = (clz_a + 1) & (d - 1);
   2741   }
   2742 
   2743   // Now we're done, except for having to encode the S output in such a way that
   2744   // it gives both the number of set bits and the length of the repeated
   2745   // segment. The s field is encoded like this:
   2746   //
   2747   //     imms    size        S
   2748   //    ssssss    64    UInt(ssssss)
   2749   //    0sssss    32    UInt(sssss)
   2750   //    10ssss    16    UInt(ssss)
   2751   //    110sss     8    UInt(sss)
   2752   //    1110ss     4    UInt(ss)
   2753   //    11110s     2    UInt(s)
   2754   //
   2755   // So we 'or' (-d << 1) with our computed s to form imms.
   2756   *n = out_n;
   2757   *imm_s = ((-d << 1) | (s - 1)) & 0x3f;
   2758   *imm_r = r;
   2759 
   2760   return true;
   2761 }
   2762 
   2763 
   2764 bool Assembler::IsImmConditionalCompare(int64_t immediate) {
   2765   return is_uint5(immediate);
   2766 }
   2767 
   2768 
   2769 bool Assembler::IsImmFP32(float imm) {
   2770   // Valid values will have the form:
   2771   // aBbb.bbbc.defg.h000.0000.0000.0000.0000
   2772   uint32_t bits = float_to_rawbits(imm);
   2773   // bits[19..0] are cleared.
   2774   if ((bits & 0x7ffff) != 0) {
   2775     return false;
   2776   }
   2777 
   2778   // bits[29..25] are all set or all cleared.
   2779   uint32_t b_pattern = (bits >> 16) & 0x3e00;
   2780   if (b_pattern != 0 && b_pattern != 0x3e00) {
   2781     return false;
   2782   }
   2783 
   2784   // bit[30] and bit[29] are opposite.
   2785   if (((bits ^ (bits << 1)) & 0x40000000) == 0) {
   2786     return false;
   2787   }
   2788 
   2789   return true;
   2790 }
   2791 
   2792 
   2793 bool Assembler::IsImmFP64(double imm) {
   2794   // Valid values will have the form:
   2795   // aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
   2796   // 0000.0000.0000.0000.0000.0000.0000.0000
   2797   uint64_t bits = double_to_rawbits(imm);
   2798   // bits[47..0] are cleared.
   2799   if ((bits & 0xffffffffffffL) != 0) {
   2800     return false;
   2801   }
   2802 
   2803   // bits[61..54] are all set or all cleared.
   2804   uint32_t b_pattern = (bits >> 48) & 0x3fc0;
   2805   if (b_pattern != 0 && b_pattern != 0x3fc0) {
   2806     return false;
   2807   }
   2808 
   2809   // bit[62] and bit[61] are opposite.
   2810   if (((bits ^ (bits << 1)) & 0x4000000000000000L) == 0) {
   2811     return false;
   2812   }
   2813 
   2814   return true;
   2815 }
   2816 
   2817 
   2818 void Assembler::GrowBuffer() {
   2819   if (!own_buffer_) FATAL("external code buffer is too small");
   2820 
   2821   // Compute new buffer size.
   2822   CodeDesc desc;  // the new buffer
   2823   if (buffer_size_ < 1 * MB) {
   2824     desc.buffer_size = 2 * buffer_size_;
   2825   } else {
   2826     desc.buffer_size = buffer_size_ + 1 * MB;
   2827   }
   2828   CHECK_GT(desc.buffer_size, 0);  // No overflow.
   2829 
   2830   byte* buffer = reinterpret_cast<byte*>(buffer_);
   2831 
   2832   // Set up new buffer.
   2833   desc.buffer = NewArray<byte>(desc.buffer_size);
   2834   desc.origin = this;
   2835 
   2836   desc.instr_size = pc_offset();
   2837   desc.reloc_size =
   2838       static_cast<int>((buffer + buffer_size_) - reloc_info_writer.pos());
   2839 
   2840   // Copy the data.
   2841   intptr_t pc_delta = desc.buffer - buffer;
   2842   intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
   2843                       (buffer + buffer_size_);
   2844   memmove(desc.buffer, buffer, desc.instr_size);
   2845   memmove(reloc_info_writer.pos() + rc_delta,
   2846           reloc_info_writer.pos(), desc.reloc_size);
   2847 
   2848   // Switch buffers.
   2849   DeleteArray(buffer_);
   2850   buffer_ = desc.buffer;
   2851   buffer_size_ = desc.buffer_size;
   2852   pc_ = reinterpret_cast<byte*>(pc_) + pc_delta;
   2853   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
   2854                                reloc_info_writer.last_pc() + pc_delta);
   2855 
   2856   // None of our relocation types are pc relative pointing outside the code
   2857   // buffer nor pc absolute pointing inside the code buffer, so there is no need
   2858   // to relocate any emitted relocation entries.
   2859 
   2860   // Relocate internal references.
   2861   for (auto pos : internal_reference_positions_) {
   2862     intptr_t* p = reinterpret_cast<intptr_t*>(buffer_ + pos);
   2863     *p += pc_delta;
   2864   }
   2865 
   2866   // Pending relocation entries are also relative, no need to relocate.
   2867 }
   2868 
   2869 
   2870 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
   2871   // We do not try to reuse pool constants.
   2872   RelocInfo rinfo(isolate(), reinterpret_cast<byte*>(pc_), rmode, data, NULL);
   2873   if (((rmode >= RelocInfo::COMMENT) &&
   2874        (rmode <= RelocInfo::DEBUG_BREAK_SLOT_AT_CALL)) ||
   2875       (rmode == RelocInfo::INTERNAL_REFERENCE) ||
   2876       (rmode == RelocInfo::CONST_POOL) || (rmode == RelocInfo::VENEER_POOL) ||
   2877       (rmode == RelocInfo::DEOPT_REASON) ||
   2878       (rmode == RelocInfo::GENERATOR_CONTINUATION)) {
   2879     // Adjust code for new modes.
   2880     DCHECK(RelocInfo::IsDebugBreakSlot(rmode) || RelocInfo::IsComment(rmode) ||
   2881            RelocInfo::IsDeoptReason(rmode) || RelocInfo::IsPosition(rmode) ||
   2882            RelocInfo::IsInternalReference(rmode) ||
   2883            RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode) ||
   2884            RelocInfo::IsGeneratorContinuation(rmode));
   2885     // These modes do not need an entry in the constant pool.
   2886   } else {
   2887     constpool_.RecordEntry(data, rmode);
   2888     // Make sure the constant pool is not emitted in place of the next
   2889     // instruction for which we just recorded relocation info.
   2890     BlockConstPoolFor(1);
   2891   }
   2892 
   2893   if (!RelocInfo::IsNone(rmode)) {
   2894     // Don't record external references unless the heap will be serialized.
   2895     if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
   2896         !serializer_enabled() && !emit_debug_code()) {
   2897       return;
   2898     }
   2899     DCHECK(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
   2900     if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
   2901       RelocInfo reloc_info_with_ast_id(isolate(), reinterpret_cast<byte*>(pc_),
   2902                                        rmode, RecordedAstId().ToInt(), NULL);
   2903       ClearRecordedAstId();
   2904       reloc_info_writer.Write(&reloc_info_with_ast_id);
   2905     } else {
   2906       reloc_info_writer.Write(&rinfo);
   2907     }
   2908   }
   2909 }
   2910 
   2911 
   2912 void Assembler::BlockConstPoolFor(int instructions) {
   2913   int pc_limit = pc_offset() + instructions * kInstructionSize;
   2914   if (no_const_pool_before_ < pc_limit) {
   2915     no_const_pool_before_ = pc_limit;
   2916     // Make sure the pool won't be blocked for too long.
   2917     DCHECK(pc_limit < constpool_.MaxPcOffset());
   2918   }
   2919 
   2920   if (next_constant_pool_check_ < no_const_pool_before_) {
   2921     next_constant_pool_check_ = no_const_pool_before_;
   2922   }
   2923 }
   2924 
   2925 
   2926 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
   2927   // Some short sequence of instruction mustn't be broken up by constant pool
   2928   // emission, such sequences are protected by calls to BlockConstPoolFor and
   2929   // BlockConstPoolScope.
   2930   if (is_const_pool_blocked()) {
   2931     // Something is wrong if emission is forced and blocked at the same time.
   2932     DCHECK(!force_emit);
   2933     return;
   2934   }
   2935 
   2936   // There is nothing to do if there are no pending constant pool entries.
   2937   if (constpool_.IsEmpty())  {
   2938     // Calculate the offset of the next check.
   2939     SetNextConstPoolCheckIn(kCheckConstPoolInterval);
   2940     return;
   2941   }
   2942 
   2943   // We emit a constant pool when:
   2944   //  * requested to do so by parameter force_emit (e.g. after each function).
   2945   //  * the distance to the first instruction accessing the constant pool is
   2946   //    kApproxMaxDistToConstPool or more.
   2947   //  * the number of entries in the pool is kApproxMaxPoolEntryCount or more.
   2948   int dist = constpool_.DistanceToFirstUse();
   2949   int count = constpool_.EntryCount();
   2950   if (!force_emit &&
   2951       (dist < kApproxMaxDistToConstPool) &&
   2952       (count < kApproxMaxPoolEntryCount)) {
   2953     return;
   2954   }
   2955 
   2956 
   2957   // Emit veneers for branches that would go out of range during emission of the
   2958   // constant pool.
   2959   int worst_case_size = constpool_.WorstCaseSize();
   2960   CheckVeneerPool(false, require_jump,
   2961                   kVeneerDistanceMargin + worst_case_size);
   2962 
   2963   // Check that the code buffer is large enough before emitting the constant
   2964   // pool (this includes the gap to the relocation information).
   2965   int needed_space = worst_case_size + kGap + 1 * kInstructionSize;
   2966   while (buffer_space() <= needed_space) {
   2967     GrowBuffer();
   2968   }
   2969 
   2970   Label size_check;
   2971   bind(&size_check);
   2972   constpool_.Emit(require_jump);
   2973   DCHECK(SizeOfCodeGeneratedSince(&size_check) <=
   2974          static_cast<unsigned>(worst_case_size));
   2975 
   2976   // Since a constant pool was just emitted, move the check offset forward by
   2977   // the standard interval.
   2978   SetNextConstPoolCheckIn(kCheckConstPoolInterval);
   2979 }
   2980 
   2981 
   2982 bool Assembler::ShouldEmitVeneer(int max_reachable_pc, int margin) {
   2983   // Account for the branch around the veneers and the guard.
   2984   int protection_offset = 2 * kInstructionSize;
   2985   return pc_offset() > max_reachable_pc - margin - protection_offset -
   2986     static_cast<int>(unresolved_branches_.size() * kMaxVeneerCodeSize);
   2987 }
   2988 
   2989 
   2990 void Assembler::RecordVeneerPool(int location_offset, int size) {
   2991   RelocInfo rinfo(isolate(), buffer_ + location_offset, RelocInfo::VENEER_POOL,
   2992                   static_cast<intptr_t>(size), NULL);
   2993   reloc_info_writer.Write(&rinfo);
   2994 }
   2995 
   2996 
   2997 void Assembler::EmitVeneers(bool force_emit, bool need_protection, int margin) {
   2998   BlockPoolsScope scope(this);
   2999   RecordComment("[ Veneers");
   3000 
   3001   // The exact size of the veneer pool must be recorded (see the comment at the
   3002   // declaration site of RecordConstPool()), but computing the number of
   3003   // veneers that will be generated is not obvious. So instead we remember the
   3004   // current position and will record the size after the pool has been
   3005   // generated.
   3006   Label size_check;
   3007   bind(&size_check);
   3008   int veneer_pool_relocinfo_loc = pc_offset();
   3009 
   3010   Label end;
   3011   if (need_protection) {
   3012     b(&end);
   3013   }
   3014 
   3015   EmitVeneersGuard();
   3016 
   3017   Label veneer_size_check;
   3018 
   3019   std::multimap<int, FarBranchInfo>::iterator it, it_to_delete;
   3020 
   3021   it = unresolved_branches_.begin();
   3022   while (it != unresolved_branches_.end()) {
   3023     if (force_emit || ShouldEmitVeneer(it->first, margin)) {
   3024       Instruction* branch = InstructionAt(it->second.pc_offset_);
   3025       Label* label = it->second.label_;
   3026 
   3027 #ifdef DEBUG
   3028       bind(&veneer_size_check);
   3029 #endif
   3030       // Patch the branch to point to the current position, and emit a branch
   3031       // to the label.
   3032       Instruction* veneer = reinterpret_cast<Instruction*>(pc_);
   3033       RemoveBranchFromLabelLinkChain(branch, label, veneer);
   3034       branch->SetImmPCOffsetTarget(isolate(), veneer);
   3035       b(label);
   3036 #ifdef DEBUG
   3037       DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <=
   3038              static_cast<uint64_t>(kMaxVeneerCodeSize));
   3039       veneer_size_check.Unuse();
   3040 #endif
   3041 
   3042       it_to_delete = it++;
   3043       unresolved_branches_.erase(it_to_delete);
   3044     } else {
   3045       ++it;
   3046     }
   3047   }
   3048 
   3049   // Record the veneer pool size.
   3050   int pool_size = static_cast<int>(SizeOfCodeGeneratedSince(&size_check));
   3051   RecordVeneerPool(veneer_pool_relocinfo_loc, pool_size);
   3052 
   3053   if (unresolved_branches_.empty()) {
   3054     next_veneer_pool_check_ = kMaxInt;
   3055   } else {
   3056     next_veneer_pool_check_ =
   3057       unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
   3058   }
   3059 
   3060   bind(&end);
   3061 
   3062   RecordComment("]");
   3063 }
   3064 
   3065 
   3066 void Assembler::CheckVeneerPool(bool force_emit, bool require_jump,
   3067                                 int margin) {
   3068   // There is nothing to do if there are no pending veneer pool entries.
   3069   if (unresolved_branches_.empty())  {
   3070     DCHECK(next_veneer_pool_check_ == kMaxInt);
   3071     return;
   3072   }
   3073 
   3074   DCHECK(pc_offset() < unresolved_branches_first_limit());
   3075 
   3076   // Some short sequence of instruction mustn't be broken up by veneer pool
   3077   // emission, such sequences are protected by calls to BlockVeneerPoolFor and
   3078   // BlockVeneerPoolScope.
   3079   if (is_veneer_pool_blocked()) {
   3080     DCHECK(!force_emit);
   3081     return;
   3082   }
   3083 
   3084   if (!require_jump) {
   3085     // Prefer emitting veneers protected by an existing instruction.
   3086     margin *= kVeneerNoProtectionFactor;
   3087   }
   3088   if (force_emit || ShouldEmitVeneers(margin)) {
   3089     EmitVeneers(force_emit, require_jump, margin);
   3090   } else {
   3091     next_veneer_pool_check_ =
   3092       unresolved_branches_first_limit() - kVeneerDistanceCheckMargin;
   3093   }
   3094 }
   3095 
   3096 
   3097 int Assembler::buffer_space() const {
   3098   return static_cast<int>(reloc_info_writer.pos() -
   3099                           reinterpret_cast<byte*>(pc_));
   3100 }
   3101 
   3102 
   3103 void Assembler::RecordConstPool(int size) {
   3104   // We only need this for debugger support, to correctly compute offsets in the
   3105   // code.
   3106   RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
   3107 }
   3108 
   3109 
   3110 void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
   3111   // The code at the current instruction should be:
   3112   //   adr  rd, 0
   3113   //   nop  (adr_far)
   3114   //   nop  (adr_far)
   3115   //   movz scratch, 0
   3116 
   3117   // Verify the expected code.
   3118   Instruction* expected_adr = InstructionAt(0);
   3119   CHECK(expected_adr->IsAdr() && (expected_adr->ImmPCRel() == 0));
   3120   int rd_code = expected_adr->Rd();
   3121   for (int i = 0; i < kAdrFarPatchableNNops; ++i) {
   3122     CHECK(InstructionAt((i + 1) * kInstructionSize)->IsNop(ADR_FAR_NOP));
   3123   }
   3124   Instruction* expected_movz =
   3125       InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstructionSize);
   3126   CHECK(expected_movz->IsMovz() &&
   3127         (expected_movz->ImmMoveWide() == 0) &&
   3128         (expected_movz->ShiftMoveWide() == 0));
   3129   int scratch_code = expected_movz->Rd();
   3130 
   3131   // Patch to load the correct address.
   3132   Register rd = Register::XRegFromCode(rd_code);
   3133   Register scratch = Register::XRegFromCode(scratch_code);
   3134   // Addresses are only 48 bits.
   3135   adr(rd, target_offset & 0xFFFF);
   3136   movz(scratch, (target_offset >> 16) & 0xFFFF, 16);
   3137   movk(scratch, (target_offset >> 32) & 0xFFFF, 32);
   3138   DCHECK((target_offset >> 48) == 0);
   3139   add(rd, rd, scratch);
   3140 }
   3141 
   3142 
   3143 }  // namespace internal
   3144 }  // namespace v8
   3145 
   3146 #endif  // V8_TARGET_ARCH_ARM64
   3147