Home | History | Annotate | Download | only in arm
      1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
      2 // All Rights Reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions
      6 // are met:
      7 //
      8 // - Redistributions of source code must retain the above copyright notice,
      9 // this list of conditions and the following disclaimer.
     10 //
     11 // - Redistribution in binary form must reproduce the above copyright
     12 // notice, this list of conditions and the following disclaimer in the
     13 // documentation and/or other materials provided with the
     14 // distribution.
     15 //
     16 // - Neither the name of Sun Microsystems or the names of contributors may
     17 // be used to endorse or promote products derived from this software without
     18 // specific prior written permission.
     19 //
     20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
     31 // OF THE POSSIBILITY OF SUCH DAMAGE.
     32 
     33 // The original source code covered by the above license above has been
     34 // modified significantly by Google Inc.
     35 // Copyright 2012 the V8 project authors. All rights reserved.
     36 
     37 #include "v8.h"
     38 
     39 #if V8_TARGET_ARCH_ARM
     40 
     41 #include "arm/assembler-arm-inl.h"
     42 #include "serialize.h"
     43 
     44 namespace v8 {
     45 namespace internal {
     46 
     47 #ifdef DEBUG
     48 bool CpuFeatures::initialized_ = false;
     49 #endif
     50 unsigned CpuFeatures::supported_ = 0;
     51 unsigned CpuFeatures::found_by_runtime_probing_only_ = 0;
     52 unsigned CpuFeatures::cache_line_size_ = 64;
     53 
     54 
     55 ExternalReference ExternalReference::cpu_features() {
     56   ASSERT(CpuFeatures::initialized_);
     57   return ExternalReference(&CpuFeatures::supported_);
     58 }
     59 
     60 
     61 // Get the CPU features enabled by the build. For cross compilation the
     62 // preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP3_INSTRUCTIONS
     63 // can be defined to enable ARMv7 and VFPv3 instructions when building the
     64 // snapshot.
     65 static unsigned CpuFeaturesImpliedByCompiler() {
     66   unsigned answer = 0;
     67 #ifdef CAN_USE_ARMV7_INSTRUCTIONS
     68   if (FLAG_enable_armv7) {
     69     answer |= 1u << ARMv7;
     70   }
     71 #endif  // CAN_USE_ARMV7_INSTRUCTIONS
     72 #ifdef CAN_USE_VFP3_INSTRUCTIONS
     73   if (FLAG_enable_vfp3) {
     74     answer |= 1u << VFP3 | 1u << ARMv7;
     75   }
     76 #endif  // CAN_USE_VFP3_INSTRUCTIONS
     77 #ifdef CAN_USE_VFP32DREGS
     78   if (FLAG_enable_32dregs) {
     79     answer |= 1u << VFP32DREGS;
     80   }
     81 #endif  // CAN_USE_VFP32DREGS
     82   if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) {
     83     answer |= 1u << UNALIGNED_ACCESSES;
     84   }
     85 
     86   return answer;
     87 }
     88 
     89 
     90 const char* DwVfpRegister::AllocationIndexToString(int index) {
     91   ASSERT(index >= 0 && index < NumAllocatableRegisters());
     92   ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
     93          kNumReservedRegisters - 1);
     94   if (index >= kDoubleRegZero.code())
     95     index += kNumReservedRegisters;
     96 
     97   return VFPRegisters::Name(index, true);
     98 }
     99 
    100 
    101 void CpuFeatures::Probe() {
    102   uint64_t standard_features = static_cast<unsigned>(
    103       OS::CpuFeaturesImpliedByPlatform()) | CpuFeaturesImpliedByCompiler();
    104   ASSERT(supported_ == 0 || supported_ == standard_features);
    105 #ifdef DEBUG
    106   initialized_ = true;
    107 #endif
    108 
    109   // Get the features implied by the OS and the compiler settings. This is the
    110   // minimal set of features which is also alowed for generated code in the
    111   // snapshot.
    112   supported_ |= standard_features;
    113 
    114   if (Serializer::enabled()) {
    115     // No probing for features if we might serialize (generate snapshot).
    116     printf("   ");
    117     PrintFeatures();
    118     return;
    119   }
    120 
    121 #ifndef __arm__
    122   // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
    123   // enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
    124   if (FLAG_enable_vfp3) {
    125     supported_ |=
    126         static_cast<uint64_t>(1) << VFP3 |
    127         static_cast<uint64_t>(1) << ARMv7;
    128   }
    129   if (FLAG_enable_neon) {
    130     supported_ |= 1u << NEON;
    131   }
    132   // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
    133   if (FLAG_enable_armv7) {
    134     supported_ |= static_cast<uint64_t>(1) << ARMv7;
    135   }
    136 
    137   if (FLAG_enable_sudiv) {
    138     supported_ |= static_cast<uint64_t>(1) << SUDIV;
    139   }
    140 
    141   if (FLAG_enable_movw_movt) {
    142     supported_ |= static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
    143   }
    144 
    145   if (FLAG_enable_32dregs) {
    146     supported_ |= static_cast<uint64_t>(1) << VFP32DREGS;
    147   }
    148 
    149   if (FLAG_enable_unaligned_accesses) {
    150     supported_ |= static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
    151   }
    152 
    153 #else  // __arm__
    154   // Probe for additional features not already known to be available.
    155   if (!IsSupported(VFP3) && FLAG_enable_vfp3 && OS::ArmCpuHasFeature(VFP3)) {
    156     // This implementation also sets the VFP flags if runtime
    157     // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
    158     // 0406B, page A1-6.
    159     found_by_runtime_probing_only_ |=
    160         static_cast<uint64_t>(1) << VFP3 |
    161         static_cast<uint64_t>(1) << ARMv7;
    162   }
    163 
    164   if (!IsSupported(NEON) && FLAG_enable_neon && OS::ArmCpuHasFeature(NEON)) {
    165     found_by_runtime_probing_only_ |= 1u << NEON;
    166   }
    167 
    168   if (!IsSupported(ARMv7) && FLAG_enable_armv7 && OS::ArmCpuHasFeature(ARMv7)) {
    169     found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7;
    170   }
    171 
    172   if (!IsSupported(SUDIV) && FLAG_enable_sudiv && OS::ArmCpuHasFeature(SUDIV)) {
    173     found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << SUDIV;
    174   }
    175 
    176   if (!IsSupported(UNALIGNED_ACCESSES) && FLAG_enable_unaligned_accesses
    177       && OS::ArmCpuHasFeature(ARMv7)) {
    178     found_by_runtime_probing_only_ |=
    179         static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
    180   }
    181 
    182   CpuImplementer implementer = OS::GetCpuImplementer();
    183   if (implementer == QUALCOMM_IMPLEMENTER &&
    184       FLAG_enable_movw_movt && OS::ArmCpuHasFeature(ARMv7)) {
    185     found_by_runtime_probing_only_ |=
    186         static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
    187   }
    188 
    189   CpuPart part = OS::GetCpuPart(implementer);
    190   if ((part == CORTEX_A9) || (part == CORTEX_A5)) {
    191     cache_line_size_ = 32;
    192   }
    193 
    194   if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs
    195       && OS::ArmCpuHasFeature(VFP32DREGS)) {
    196     found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS;
    197   }
    198 
    199   supported_ |= found_by_runtime_probing_only_;
    200 #endif
    201 
    202   // Assert that VFP3 implies ARMv7.
    203   ASSERT(!IsSupported(VFP3) || IsSupported(ARMv7));
    204 }
    205 
    206 
    207 void CpuFeatures::PrintTarget() {
    208   const char* arm_arch = NULL;
    209   const char* arm_test = "";
    210   const char* arm_fpu = "";
    211   const char* arm_thumb = "";
    212   const char* arm_float_abi = NULL;
    213 
    214 #if defined CAN_USE_ARMV7_INSTRUCTIONS
    215   arm_arch = "arm v7";
    216 #else
    217   arm_arch = "arm v6";
    218 #endif
    219 
    220 #ifdef __arm__
    221 
    222 # ifdef ARM_TEST
    223   arm_test = " test";
    224 # endif
    225 # if defined __ARM_NEON__
    226   arm_fpu = " neon";
    227 # elif defined CAN_USE_VFP3_INSTRUCTIONS
    228   arm_fpu = " vfp3";
    229 # else
    230   arm_fpu = " vfp2";
    231 # endif
    232 # if (defined __thumb__) || (defined __thumb2__)
    233   arm_thumb = " thumb";
    234 # endif
    235   arm_float_abi = OS::ArmUsingHardFloat() ? "hard" : "softfp";
    236 
    237 #else  // __arm__
    238 
    239   arm_test = " simulator";
    240 # if defined CAN_USE_VFP3_INSTRUCTIONS
    241 #  if defined CAN_USE_VFP32DREGS
    242   arm_fpu = " vfp3";
    243 #  else
    244   arm_fpu = " vfp3-d16";
    245 #  endif
    246 # else
    247   arm_fpu = " vfp2";
    248 # endif
    249 # if USE_EABI_HARDFLOAT == 1
    250   arm_float_abi = "hard";
    251 # else
    252   arm_float_abi = "softfp";
    253 # endif
    254 
    255 #endif  // __arm__
    256 
    257   printf("target%s %s%s%s %s\n",
    258          arm_test, arm_arch, arm_fpu, arm_thumb, arm_float_abi);
    259 }
    260 
    261 
    262 void CpuFeatures::PrintFeatures() {
    263   printf(
    264     "ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
    265     "MOVW_MOVT_IMMEDIATE_LOADS=%d",
    266     CpuFeatures::IsSupported(ARMv7),
    267     CpuFeatures::IsSupported(VFP3),
    268     CpuFeatures::IsSupported(VFP32DREGS),
    269     CpuFeatures::IsSupported(NEON),
    270     CpuFeatures::IsSupported(SUDIV),
    271     CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
    272     CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
    273 #ifdef __arm__
    274   bool eabi_hardfloat = OS::ArmUsingHardFloat();
    275 #elif USE_EABI_HARDFLOAT
    276   bool eabi_hardfloat = true;
    277 #else
    278   bool eabi_hardfloat = false;
    279 #endif
    280     printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
    281 }
    282 
    283 
    284 // -----------------------------------------------------------------------------
    285 // Implementation of RelocInfo
    286 
    287 const int RelocInfo::kApplyMask = 0;
    288 
    289 
    290 bool RelocInfo::IsCodedSpecially() {
    291   // The deserializer needs to know whether a pointer is specially coded.  Being
    292   // specially coded on ARM means that it is a movw/movt instruction.  We don't
    293   // generate those yet.
    294   return false;
    295 }
    296 
    297 
    298 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
    299   // Patch the code at the current address with the supplied instructions.
    300   Instr* pc = reinterpret_cast<Instr*>(pc_);
    301   Instr* instr = reinterpret_cast<Instr*>(instructions);
    302   for (int i = 0; i < instruction_count; i++) {
    303     *(pc + i) = *(instr + i);
    304   }
    305 
    306   // Indicate that code has changed.
    307   CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
    308 }
    309 
    310 
    311 // Patch the code at the current PC with a call to the target address.
    312 // Additional guard instructions can be added if required.
    313 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
    314   // Patch the code at the current address with a call to the target.
    315   UNIMPLEMENTED();
    316 }
    317 
    318 
    319 // -----------------------------------------------------------------------------
    320 // Implementation of Operand and MemOperand
    321 // See assembler-arm-inl.h for inlined constructors
    322 
    323 Operand::Operand(Handle<Object> handle) {
    324 #ifdef DEBUG
    325   Isolate* isolate = Isolate::Current();
    326 #endif
    327   AllowDeferredHandleDereference using_raw_address;
    328   rm_ = no_reg;
    329   // Verify all Objects referred by code are NOT in new space.
    330   Object* obj = *handle;
    331   ASSERT(!isolate->heap()->InNewSpace(obj));
    332   if (obj->IsHeapObject()) {
    333     imm32_ = reinterpret_cast<intptr_t>(handle.location());
    334     rmode_ = RelocInfo::EMBEDDED_OBJECT;
    335   } else {
    336     // no relocation needed
    337     imm32_ = reinterpret_cast<intptr_t>(obj);
    338     rmode_ = RelocInfo::NONE32;
    339   }
    340 }
    341 
    342 
    343 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
    344   ASSERT(is_uint5(shift_imm));
    345   ASSERT(shift_op != ROR || shift_imm != 0);  // use RRX if you mean it
    346   rm_ = rm;
    347   rs_ = no_reg;
    348   shift_op_ = shift_op;
    349   shift_imm_ = shift_imm & 31;
    350   if (shift_op == RRX) {
    351     // encoded as ROR with shift_imm == 0
    352     ASSERT(shift_imm == 0);
    353     shift_op_ = ROR;
    354     shift_imm_ = 0;
    355   }
    356 }
    357 
    358 
    359 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
    360   ASSERT(shift_op != RRX);
    361   rm_ = rm;
    362   rs_ = no_reg;
    363   shift_op_ = shift_op;
    364   rs_ = rs;
    365 }
    366 
    367 
    368 MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
    369   rn_ = rn;
    370   rm_ = no_reg;
    371   offset_ = offset;
    372   am_ = am;
    373 }
    374 
    375 
    376 MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
    377   rn_ = rn;
    378   rm_ = rm;
    379   shift_op_ = LSL;
    380   shift_imm_ = 0;
    381   am_ = am;
    382 }
    383 
    384 
    385 MemOperand::MemOperand(Register rn, Register rm,
    386                        ShiftOp shift_op, int shift_imm, AddrMode am) {
    387   ASSERT(is_uint5(shift_imm));
    388   rn_ = rn;
    389   rm_ = rm;
    390   shift_op_ = shift_op;
    391   shift_imm_ = shift_imm & 31;
    392   am_ = am;
    393 }
    394 
    395 
    396 NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) {
    397   ASSERT((am == Offset) || (am == PostIndex));
    398   rn_ = rn;
    399   rm_ = (am == Offset) ? pc : sp;
    400   SetAlignment(align);
    401 }
    402 
    403 
    404 NeonMemOperand::NeonMemOperand(Register rn, Register rm, int align) {
    405   rn_ = rn;
    406   rm_ = rm;
    407   SetAlignment(align);
    408 }
    409 
    410 
    411 void NeonMemOperand::SetAlignment(int align) {
    412   switch (align) {
    413     case 0:
    414       align_ = 0;
    415       break;
    416     case 64:
    417       align_ = 1;
    418       break;
    419     case 128:
    420       align_ = 2;
    421       break;
    422     case 256:
    423       align_ = 3;
    424       break;
    425     default:
    426       UNREACHABLE();
    427       align_ = 0;
    428       break;
    429   }
    430 }
    431 
    432 
    433 NeonListOperand::NeonListOperand(DoubleRegister base, int registers_count) {
    434   base_ = base;
    435   switch (registers_count) {
    436     case 1:
    437       type_ = nlt_1;
    438       break;
    439     case 2:
    440       type_ = nlt_2;
    441       break;
    442     case 3:
    443       type_ = nlt_3;
    444       break;
    445     case 4:
    446       type_ = nlt_4;
    447       break;
    448     default:
    449       UNREACHABLE();
    450       type_ = nlt_1;
    451       break;
    452   }
    453 }
    454 
    455 
    456 // -----------------------------------------------------------------------------
    457 // Specific instructions, constants, and masks.
    458 
    459 // add(sp, sp, 4) instruction (aka Pop())
    460 const Instr kPopInstruction =
    461     al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 |
    462         kRegister_sp_Code * B12;
    463 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
    464 // register r is not encoded.
    465 const Instr kPushRegPattern =
    466     al | B26 | 4 | NegPreIndex | kRegister_sp_Code * B16;
    467 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
    468 // register r is not encoded.
    469 const Instr kPopRegPattern =
    470     al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
    471 // mov lr, pc
    472 const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
    473 // ldr rd, [pc, #offset]
    474 const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16;
    475 const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16;
    476 // vldr dd, [pc, #offset]
    477 const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
    478 const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
    479 // blxcc rm
    480 const Instr kBlxRegMask =
    481     15 * B24 | 15 * B20 | 15 * B16 | 15 * B12 | 15 * B8 | 15 * B4;
    482 const Instr kBlxRegPattern =
    483     B24 | B21 | 15 * B16 | 15 * B12 | 15 * B8 | BLX;
    484 const Instr kBlxIp = al | kBlxRegPattern | ip.code();
    485 const Instr kMovMvnMask = 0x6d * B21 | 0xf * B16;
    486 const Instr kMovMvnPattern = 0xd * B21;
    487 const Instr kMovMvnFlip = B22;
    488 const Instr kMovLeaveCCMask = 0xdff * B16;
    489 const Instr kMovLeaveCCPattern = 0x1a0 * B16;
    490 const Instr kMovwMask = 0xff * B20;
    491 const Instr kMovwPattern = 0x30 * B20;
    492 const Instr kMovwLeaveCCFlip = 0x5 * B21;
    493 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
    494 const Instr kCmpCmnPattern = 0x15 * B20;
    495 const Instr kCmpCmnFlip = B21;
    496 const Instr kAddSubFlip = 0x6 * B21;
    497 const Instr kAndBicFlip = 0xe * B21;
    498 
    499 // A mask for the Rd register for push, pop, ldr, str instructions.
    500 const Instr kLdrRegFpOffsetPattern =
    501     al | B26 | L | Offset | kRegister_fp_Code * B16;
    502 const Instr kStrRegFpOffsetPattern =
    503     al | B26 | Offset | kRegister_fp_Code * B16;
    504 const Instr kLdrRegFpNegOffsetPattern =
    505     al | B26 | L | NegOffset | kRegister_fp_Code * B16;
    506 const Instr kStrRegFpNegOffsetPattern =
    507     al | B26 | NegOffset | kRegister_fp_Code * B16;
    508 const Instr kLdrStrInstrTypeMask = 0xffff0000;
    509 const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
    510 const Instr kLdrStrOffsetMask = 0x00000fff;
    511 
    512 
    513 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
    514     : AssemblerBase(isolate, buffer, buffer_size),
    515       recorded_ast_id_(TypeFeedbackId::None()),
    516       positions_recorder_(this) {
    517   reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
    518   num_pending_reloc_info_ = 0;
    519   num_pending_64_bit_reloc_info_ = 0;
    520   next_buffer_check_ = 0;
    521   const_pool_blocked_nesting_ = 0;
    522   no_const_pool_before_ = 0;
    523   first_const_pool_use_ = -1;
    524   last_bound_pos_ = 0;
    525   ClearRecordedAstId();
    526 }
    527 
    528 
    529 Assembler::~Assembler() {
    530   ASSERT(const_pool_blocked_nesting_ == 0);
    531 }
    532 
    533 
    534 void Assembler::GetCode(CodeDesc* desc) {
    535   // Emit constant pool if necessary.
    536   CheckConstPool(true, false);
    537   ASSERT(num_pending_reloc_info_ == 0);
    538   ASSERT(num_pending_64_bit_reloc_info_ == 0);
    539 
    540   // Set up code descriptor.
    541   desc->buffer = buffer_;
    542   desc->buffer_size = buffer_size_;
    543   desc->instr_size = pc_offset();
    544   desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
    545 }
    546 
    547 
    548 void Assembler::Align(int m) {
    549   ASSERT(m >= 4 && IsPowerOf2(m));
    550   while ((pc_offset() & (m - 1)) != 0) {
    551     nop();
    552   }
    553 }
    554 
    555 
    556 void Assembler::CodeTargetAlign() {
    557   // Preferred alignment of jump targets on some ARM chips.
    558   Align(8);
    559 }
    560 
    561 
    562 Condition Assembler::GetCondition(Instr instr) {
    563   return Instruction::ConditionField(instr);
    564 }
    565 
    566 
    567 bool Assembler::IsBranch(Instr instr) {
    568   return (instr & (B27 | B25)) == (B27 | B25);
    569 }
    570 
    571 
    572 int Assembler::GetBranchOffset(Instr instr) {
    573   ASSERT(IsBranch(instr));
    574   // Take the jump offset in the lower 24 bits, sign extend it and multiply it
    575   // with 4 to get the offset in bytes.
    576   return ((instr & kImm24Mask) << 8) >> 6;
    577 }
    578 
    579 
    580 bool Assembler::IsLdrRegisterImmediate(Instr instr) {
    581   return (instr & (B27 | B26 | B25 | B22 | B20)) == (B26 | B20);
    582 }
    583 
    584 
    585 bool Assembler::IsVldrDRegisterImmediate(Instr instr) {
    586   return (instr & (15 * B24 | 3 * B20 | 15 * B8)) == (13 * B24 | B20 | 11 * B8);
    587 }
    588 
    589 
    590 int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
    591   ASSERT(IsLdrRegisterImmediate(instr));
    592   bool positive = (instr & B23) == B23;
    593   int offset = instr & kOff12Mask;  // Zero extended offset.
    594   return positive ? offset : -offset;
    595 }
    596 
    597 
    598 int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
    599   ASSERT(IsVldrDRegisterImmediate(instr));
    600   bool positive = (instr & B23) == B23;
    601   int offset = instr & kOff8Mask;  // Zero extended offset.
    602   offset <<= 2;
    603   return positive ? offset : -offset;
    604 }
    605 
    606 
    607 Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
    608   ASSERT(IsLdrRegisterImmediate(instr));
    609   bool positive = offset >= 0;
    610   if (!positive) offset = -offset;
    611   ASSERT(is_uint12(offset));
    612   // Set bit indicating whether the offset should be added.
    613   instr = (instr & ~B23) | (positive ? B23 : 0);
    614   // Set the actual offset.
    615   return (instr & ~kOff12Mask) | offset;
    616 }
    617 
    618 
    619 Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) {
    620   ASSERT(IsVldrDRegisterImmediate(instr));
    621   ASSERT((offset & ~3) == offset);  // Must be 64-bit aligned.
    622   bool positive = offset >= 0;
    623   if (!positive) offset = -offset;
    624   ASSERT(is_uint10(offset));
    625   // Set bit indicating whether the offset should be added.
    626   instr = (instr & ~B23) | (positive ? B23 : 0);
    627   // Set the actual offset. Its bottom 2 bits are zero.
    628   return (instr & ~kOff8Mask) | (offset >> 2);
    629 }
    630 
    631 
    632 bool Assembler::IsStrRegisterImmediate(Instr instr) {
    633   return (instr & (B27 | B26 | B25 | B22 | B20)) == B26;
    634 }
    635 
    636 
    637 Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
    638   ASSERT(IsStrRegisterImmediate(instr));
    639   bool positive = offset >= 0;
    640   if (!positive) offset = -offset;
    641   ASSERT(is_uint12(offset));
    642   // Set bit indicating whether the offset should be added.
    643   instr = (instr & ~B23) | (positive ? B23 : 0);
    644   // Set the actual offset.
    645   return (instr & ~kOff12Mask) | offset;
    646 }
    647 
    648 
    649 bool Assembler::IsAddRegisterImmediate(Instr instr) {
    650   return (instr & (B27 | B26 | B25 | B24 | B23 | B22 | B21)) == (B25 | B23);
    651 }
    652 
    653 
    654 Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
    655   ASSERT(IsAddRegisterImmediate(instr));
    656   ASSERT(offset >= 0);
    657   ASSERT(is_uint12(offset));
    658   // Set the offset.
    659   return (instr & ~kOff12Mask) | offset;
    660 }
    661 
    662 
    663 Register Assembler::GetRd(Instr instr) {
    664   Register reg;
    665   reg.code_ = Instruction::RdValue(instr);
    666   return reg;
    667 }
    668 
    669 
    670 Register Assembler::GetRn(Instr instr) {
    671   Register reg;
    672   reg.code_ = Instruction::RnValue(instr);
    673   return reg;
    674 }
    675 
    676 
    677 Register Assembler::GetRm(Instr instr) {
    678   Register reg;
    679   reg.code_ = Instruction::RmValue(instr);
    680   return reg;
    681 }
    682 
    683 
    684 bool Assembler::IsPush(Instr instr) {
    685   return ((instr & ~kRdMask) == kPushRegPattern);
    686 }
    687 
    688 
    689 bool Assembler::IsPop(Instr instr) {
    690   return ((instr & ~kRdMask) == kPopRegPattern);
    691 }
    692 
    693 
    694 bool Assembler::IsStrRegFpOffset(Instr instr) {
    695   return ((instr & kLdrStrInstrTypeMask) == kStrRegFpOffsetPattern);
    696 }
    697 
    698 
    699 bool Assembler::IsLdrRegFpOffset(Instr instr) {
    700   return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpOffsetPattern);
    701 }
    702 
    703 
    704 bool Assembler::IsStrRegFpNegOffset(Instr instr) {
    705   return ((instr & kLdrStrInstrTypeMask) == kStrRegFpNegOffsetPattern);
    706 }
    707 
    708 
    709 bool Assembler::IsLdrRegFpNegOffset(Instr instr) {
    710   return ((instr & kLdrStrInstrTypeMask) == kLdrRegFpNegOffsetPattern);
    711 }
    712 
    713 
    714 bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
    715   // Check the instruction is indeed a
    716   // ldr<cond> <Rd>, [pc +/- offset_12].
    717   return (instr & kLdrPCMask) == kLdrPCPattern;
    718 }
    719 
    720 
    721 bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
    722   // Check the instruction is indeed a
    723   // vldr<cond> <Dd>, [pc +/- offset_10].
    724   return (instr & kVldrDPCMask) == kVldrDPCPattern;
    725 }
    726 
    727 
    728 bool Assembler::IsTstImmediate(Instr instr) {
    729   return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
    730       (I | TST | S);
    731 }
    732 
    733 
    734 bool Assembler::IsCmpRegister(Instr instr) {
    735   return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask | B4)) ==
    736       (CMP | S);
    737 }
    738 
    739 
    740 bool Assembler::IsCmpImmediate(Instr instr) {
    741   return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
    742       (I | CMP | S);
    743 }
    744 
    745 
    746 Register Assembler::GetCmpImmediateRegister(Instr instr) {
    747   ASSERT(IsCmpImmediate(instr));
    748   return GetRn(instr);
    749 }
    750 
    751 
    752 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
    753   ASSERT(IsCmpImmediate(instr));
    754   return instr & kOff12Mask;
    755 }
    756 
    757 
    758 // Labels refer to positions in the (to be) generated code.
    759 // There are bound, linked, and unused labels.
    760 //
    761 // Bound labels refer to known positions in the already
    762 // generated code. pos() is the position the label refers to.
    763 //
    764 // Linked labels refer to unknown positions in the code
    765 // to be generated; pos() is the position of the last
    766 // instruction using the label.
    767 //
    768 // The linked labels form a link chain by making the branch offset
    769 // in the instruction steam to point to the previous branch
    770 // instruction using the same label.
    771 //
    772 // The link chain is terminated by a branch offset pointing to the
    773 // same position.
    774 
    775 
    776 int Assembler::target_at(int pos)  {
    777   Instr instr = instr_at(pos);
    778   if ((instr & ~kImm24Mask) == 0) {
    779     // Emitted label constant, not part of a branch.
    780     return instr - (Code::kHeaderSize - kHeapObjectTag);
    781   }
    782   ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
    783   int imm26 = ((instr & kImm24Mask) << 8) >> 6;
    784   if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
    785       ((instr & B24) != 0)) {
    786     // blx uses bit 24 to encode bit 2 of imm26
    787     imm26 += 2;
    788   }
    789   return pos + kPcLoadDelta + imm26;
    790 }
    791 
    792 
    793 void Assembler::target_at_put(int pos, int target_pos) {
    794   Instr instr = instr_at(pos);
    795   if ((instr & ~kImm24Mask) == 0) {
    796     ASSERT(target_pos == pos || target_pos >= 0);
    797     // Emitted label constant, not part of a branch.
    798     // Make label relative to Code* of generated Code object.
    799     instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
    800     return;
    801   }
    802   int imm26 = target_pos - (pos + kPcLoadDelta);
    803   ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
    804   if (Instruction::ConditionField(instr) == kSpecialCondition) {
    805     // blx uses bit 24 to encode bit 2 of imm26
    806     ASSERT((imm26 & 1) == 0);
    807     instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
    808   } else {
    809     ASSERT((imm26 & 3) == 0);
    810     instr &= ~kImm24Mask;
    811   }
    812   int imm24 = imm26 >> 2;
    813   ASSERT(is_int24(imm24));
    814   instr_at_put(pos, instr | (imm24 & kImm24Mask));
    815 }
    816 
    817 
    818 void Assembler::print(Label* L) {
    819   if (L->is_unused()) {
    820     PrintF("unused label\n");
    821   } else if (L->is_bound()) {
    822     PrintF("bound label to %d\n", L->pos());
    823   } else if (L->is_linked()) {
    824     Label l = *L;
    825     PrintF("unbound label");
    826     while (l.is_linked()) {
    827       PrintF("@ %d ", l.pos());
    828       Instr instr = instr_at(l.pos());
    829       if ((instr & ~kImm24Mask) == 0) {
    830         PrintF("value\n");
    831       } else {
    832         ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx
    833         Condition cond = Instruction::ConditionField(instr);
    834         const char* b;
    835         const char* c;
    836         if (cond == kSpecialCondition) {
    837           b = "blx";
    838           c = "";
    839         } else {
    840           if ((instr & B24) != 0)
    841             b = "bl";
    842           else
    843             b = "b";
    844 
    845           switch (cond) {
    846             case eq: c = "eq"; break;
    847             case ne: c = "ne"; break;
    848             case hs: c = "hs"; break;
    849             case lo: c = "lo"; break;
    850             case mi: c = "mi"; break;
    851             case pl: c = "pl"; break;
    852             case vs: c = "vs"; break;
    853             case vc: c = "vc"; break;
    854             case hi: c = "hi"; break;
    855             case ls: c = "ls"; break;
    856             case ge: c = "ge"; break;
    857             case lt: c = "lt"; break;
    858             case gt: c = "gt"; break;
    859             case le: c = "le"; break;
    860             case al: c = ""; break;
    861             default:
    862               c = "";
    863               UNREACHABLE();
    864           }
    865         }
    866         PrintF("%s%s\n", b, c);
    867       }
    868       next(&l);
    869     }
    870   } else {
    871     PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
    872   }
    873 }
    874 
    875 
    876 void Assembler::bind_to(Label* L, int pos) {
    877   ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
    878   while (L->is_linked()) {
    879     int fixup_pos = L->pos();
    880     next(L);  // call next before overwriting link with target at fixup_pos
    881     target_at_put(fixup_pos, pos);
    882   }
    883   L->bind_to(pos);
    884 
    885   // Keep track of the last bound label so we don't eliminate any instructions
    886   // before a bound label.
    887   if (pos > last_bound_pos_)
    888     last_bound_pos_ = pos;
    889 }
    890 
    891 
    892 void Assembler::bind(Label* L) {
    893   ASSERT(!L->is_bound());  // label can only be bound once
    894   bind_to(L, pc_offset());
    895 }
    896 
    897 
    898 void Assembler::next(Label* L) {
    899   ASSERT(L->is_linked());
    900   int link = target_at(L->pos());
    901   if (link == L->pos()) {
    902     // Branch target points to the same instuction. This is the end of the link
    903     // chain.
    904     L->Unuse();
    905   } else {
    906     ASSERT(link >= 0);
    907     L->link_to(link);
    908   }
    909 }
    910 
    911 
    912 // Low-level code emission routines depending on the addressing mode.
    913 // If this returns true then you have to use the rotate_imm and immed_8
    914 // that it returns, because it may have already changed the instruction
    915 // to match them!
    916 static bool fits_shifter(uint32_t imm32,
    917                          uint32_t* rotate_imm,
    918                          uint32_t* immed_8,
    919                          Instr* instr) {
    920   // imm32 must be unsigned.
    921   for (int rot = 0; rot < 16; rot++) {
    922     uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
    923     if ((imm8 <= 0xff)) {
    924       *rotate_imm = rot;
    925       *immed_8 = imm8;
    926       return true;
    927     }
    928   }
    929   // If the opcode is one with a complementary version and the complementary
    930   // immediate fits, change the opcode.
    931   if (instr != NULL) {
    932     if ((*instr & kMovMvnMask) == kMovMvnPattern) {
    933       if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
    934         *instr ^= kMovMvnFlip;
    935         return true;
    936       } else if ((*instr & kMovLeaveCCMask) == kMovLeaveCCPattern) {
    937         if (CpuFeatures::IsSupported(ARMv7)) {
    938           if (imm32 < 0x10000) {
    939             *instr ^= kMovwLeaveCCFlip;
    940             *instr |= EncodeMovwImmediate(imm32);
    941             *rotate_imm = *immed_8 = 0;  // Not used for movw.
    942             return true;
    943           }
    944         }
    945       }
    946     } else if ((*instr & kCmpCmnMask) == kCmpCmnPattern) {
    947       if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
    948         *instr ^= kCmpCmnFlip;
    949         return true;
    950       }
    951     } else {
    952       Instr alu_insn = (*instr & kALUMask);
    953       if (alu_insn == ADD ||
    954           alu_insn == SUB) {
    955         if (fits_shifter(-static_cast<int>(imm32), rotate_imm, immed_8, NULL)) {
    956           *instr ^= kAddSubFlip;
    957           return true;
    958         }
    959       } else if (alu_insn == AND ||
    960                  alu_insn == BIC) {
    961         if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
    962           *instr ^= kAndBicFlip;
    963           return true;
    964         }
    965       }
    966     }
    967   }
    968   return false;
    969 }
    970 
    971 
    972 // We have to use the temporary register for things that can be relocated even
    973 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction
    974 // space.  There is no guarantee that the relocated location can be similarly
    975 // encoded.
    976 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
    977   if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
    978 #ifdef DEBUG
    979     if (!Serializer::enabled()) {
    980       Serializer::TooLateToEnableNow();
    981     }
    982 #endif  // def DEBUG
    983     if (assembler != NULL && assembler->predictable_code_size()) return true;
    984     return Serializer::enabled();
    985   } else if (RelocInfo::IsNone(rmode_)) {
    986     return false;
    987   }
    988   return true;
    989 }
    990 
    991 
    992 static bool use_movw_movt(const Operand& x, const Assembler* assembler) {
    993   if (Assembler::use_immediate_embedded_pointer_loads(assembler)) {
    994     return true;
    995   }
    996   if (x.must_output_reloc_info(assembler)) {
    997     return false;
    998   }
    999   return CpuFeatures::IsSupported(ARMv7);
   1000 }
   1001 
   1002 
   1003 bool Operand::is_single_instruction(const Assembler* assembler,
   1004                                     Instr instr) const {
   1005   if (rm_.is_valid()) return true;
   1006   uint32_t dummy1, dummy2;
   1007   if (must_output_reloc_info(assembler) ||
   1008       !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
   1009     // The immediate operand cannot be encoded as a shifter operand, or use of
   1010     // constant pool is required. For a mov instruction not setting the
   1011     // condition code additional instruction conventions can be used.
   1012     if ((instr & ~kCondMask) == 13*B21) {  // mov, S not set
   1013       return !use_movw_movt(*this, assembler);
   1014     } else {
   1015       // If this is not a mov or mvn instruction there will always an additional
   1016       // instructions - either mov or ldr. The mov might actually be two
   1017       // instructions mov or movw followed by movt so including the actual
   1018       // instruction two or three instructions will be generated.
   1019       return false;
   1020     }
   1021   } else {
   1022     // No use of constant pool and the immediate operand can be encoded as a
   1023     // shifter operand.
   1024     return true;
   1025   }
   1026 }
   1027 
   1028 
   1029 void Assembler::move_32_bit_immediate(Condition cond,
   1030                                       Register rd,
   1031                                       SBit s,
   1032                                       const Operand& x) {
   1033   if (rd.code() != pc.code() && s == LeaveCC) {
   1034     if (use_movw_movt(x, this)) {
   1035       if (x.must_output_reloc_info(this)) {
   1036         RecordRelocInfo(x.rmode_, x.imm32_, DONT_USE_CONSTANT_POOL);
   1037         // Make sure the movw/movt doesn't get separated.
   1038         BlockConstPoolFor(2);
   1039       }
   1040       emit(cond | 0x30*B20 | rd.code()*B12 |
   1041            EncodeMovwImmediate(x.imm32_ & 0xffff));
   1042       movt(rd, static_cast<uint32_t>(x.imm32_) >> 16, cond);
   1043       return;
   1044     }
   1045   }
   1046 
   1047   RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
   1048   ldr(rd, MemOperand(pc, 0), cond);
   1049 }
   1050 
   1051 
   1052 void Assembler::addrmod1(Instr instr,
   1053                          Register rn,
   1054                          Register rd,
   1055                          const Operand& x) {
   1056   CheckBuffer();
   1057   ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
   1058   if (!x.rm_.is_valid()) {
   1059     // Immediate.
   1060     uint32_t rotate_imm;
   1061     uint32_t immed_8;
   1062     if (x.must_output_reloc_info(this) ||
   1063         !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
   1064       // The immediate operand cannot be encoded as a shifter operand, so load
   1065       // it first to register ip and change the original instruction to use ip.
   1066       // However, if the original instruction is a 'mov rd, x' (not setting the
   1067       // condition code), then replace it with a 'ldr rd, [pc]'.
   1068       CHECK(!rn.is(ip));  // rn should never be ip, or will be trashed
   1069       Condition cond = Instruction::ConditionField(instr);
   1070       if ((instr & ~kCondMask) == 13*B21) {  // mov, S not set
   1071         move_32_bit_immediate(cond, rd, LeaveCC, x);
   1072       } else {
   1073         if ((instr & kMovMvnMask) == kMovMvnPattern) {
   1074           // Moves need to use a constant pool entry.
   1075           RecordRelocInfo(x.rmode_, x.imm32_, USE_CONSTANT_POOL);
   1076           ldr(ip, MemOperand(pc, 0), cond);
   1077         } else if (x.must_output_reloc_info(this)) {
   1078           // Otherwise, use most efficient form of fetching from constant pool.
   1079           move_32_bit_immediate(cond, ip, LeaveCC, x);
   1080         } else {
   1081           // If this is not a mov or mvn instruction we may still be able to
   1082           // avoid a constant pool entry by using mvn or movw.
   1083           mov(ip, x, LeaveCC, cond);
   1084         }
   1085         addrmod1(instr, rn, rd, Operand(ip));
   1086       }
   1087       return;
   1088     }
   1089     instr |= I | rotate_imm*B8 | immed_8;
   1090   } else if (!x.rs_.is_valid()) {
   1091     // Immediate shift.
   1092     instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
   1093   } else {
   1094     // Register shift.
   1095     ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
   1096     instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
   1097   }
   1098   emit(instr | rn.code()*B16 | rd.code()*B12);
   1099   if (rn.is(pc) || x.rm_.is(pc)) {
   1100     // Block constant pool emission for one instruction after reading pc.
   1101     BlockConstPoolFor(1);
   1102   }
   1103 }
   1104 
   1105 
   1106 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
   1107   ASSERT((instr & ~(kCondMask | B | L)) == B26);
   1108   int am = x.am_;
   1109   if (!x.rm_.is_valid()) {
   1110     // Immediate offset.
   1111     int offset_12 = x.offset_;
   1112     if (offset_12 < 0) {
   1113       offset_12 = -offset_12;
   1114       am ^= U;
   1115     }
   1116     if (!is_uint12(offset_12)) {
   1117       // Immediate offset cannot be encoded, load it first to register ip
   1118       // rn (and rd in a load) should never be ip, or will be trashed.
   1119       ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
   1120       mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
   1121       addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
   1122       return;
   1123     }
   1124     ASSERT(offset_12 >= 0);  // no masking needed
   1125     instr |= offset_12;
   1126   } else {
   1127     // Register offset (shift_imm_ and shift_op_ are 0) or scaled
   1128     // register offset the constructors make sure than both shift_imm_
   1129     // and shift_op_ are initialized.
   1130     ASSERT(!x.rm_.is(pc));
   1131     instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
   1132   }
   1133   ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
   1134   emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
   1135 }
   1136 
   1137 
   1138 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
   1139   ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
   1140   ASSERT(x.rn_.is_valid());
   1141   int am = x.am_;
   1142   if (!x.rm_.is_valid()) {
   1143     // Immediate offset.
   1144     int offset_8 = x.offset_;
   1145     if (offset_8 < 0) {
   1146       offset_8 = -offset_8;
   1147       am ^= U;
   1148     }
   1149     if (!is_uint8(offset_8)) {
   1150       // Immediate offset cannot be encoded, load it first to register ip
   1151       // rn (and rd in a load) should never be ip, or will be trashed.
   1152       ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
   1153       mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
   1154       addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
   1155       return;
   1156     }
   1157     ASSERT(offset_8 >= 0);  // no masking needed
   1158     instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
   1159   } else if (x.shift_imm_ != 0) {
   1160     // Scaled register offset not supported, load index first
   1161     // rn (and rd in a load) should never be ip, or will be trashed.
   1162     ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
   1163     mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
   1164         Instruction::ConditionField(instr));
   1165     addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
   1166     return;
   1167   } else {
   1168     // Register offset.
   1169     ASSERT((am & (P|W)) == P || !x.rm_.is(pc));  // no pc index with writeback
   1170     instr |= x.rm_.code();
   1171   }
   1172   ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
   1173   emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
   1174 }
   1175 
   1176 
   1177 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
   1178   ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
   1179   ASSERT(rl != 0);
   1180   ASSERT(!rn.is(pc));
   1181   emit(instr | rn.code()*B16 | rl);
   1182 }
   1183 
   1184 
   1185 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
   1186   // Unindexed addressing is not encoded by this function.
   1187   ASSERT_EQ((B27 | B26),
   1188             (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
   1189   ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
   1190   int am = x.am_;
   1191   int offset_8 = x.offset_;
   1192   ASSERT((offset_8 & 3) == 0);  // offset must be an aligned word offset
   1193   offset_8 >>= 2;
   1194   if (offset_8 < 0) {
   1195     offset_8 = -offset_8;
   1196     am ^= U;
   1197   }
   1198   ASSERT(is_uint8(offset_8));  // unsigned word offset must fit in a byte
   1199   ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
   1200 
   1201   // Post-indexed addressing requires W == 1; different than in addrmod2/3.
   1202   if ((am & P) == 0)
   1203     am |= W;
   1204 
   1205   ASSERT(offset_8 >= 0);  // no masking needed
   1206   emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
   1207 }
   1208 
   1209 
   1210 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
   1211   int target_pos;
   1212   if (L->is_bound()) {
   1213     target_pos = L->pos();
   1214   } else {
   1215     if (L->is_linked()) {
   1216       // Point to previous instruction that uses the link.
   1217       target_pos = L->pos();
   1218     } else {
   1219       // First entry of the link chain points to itself.
   1220       target_pos = pc_offset();
   1221     }
   1222     L->link_to(pc_offset());
   1223   }
   1224 
   1225   // Block the emission of the constant pool, since the branch instruction must
   1226   // be emitted at the pc offset recorded by the label.
   1227   BlockConstPoolFor(1);
   1228   return target_pos - (pc_offset() + kPcLoadDelta);
   1229 }
   1230 
   1231 
   1232 void Assembler::label_at_put(Label* L, int at_offset) {
   1233   int target_pos;
   1234   ASSERT(!L->is_bound());
   1235   if (L->is_linked()) {
   1236     // Point to previous instruction that uses the link.
   1237     target_pos = L->pos();
   1238   } else {
   1239     // First entry of the link chain points to itself.
   1240     target_pos = at_offset;
   1241   }
   1242   L->link_to(at_offset);
   1243   instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
   1244 }
   1245 
   1246 
   1247 // Branch instructions.
   1248 void Assembler::b(int branch_offset, Condition cond) {
   1249   ASSERT((branch_offset & 3) == 0);
   1250   int imm24 = branch_offset >> 2;
   1251   ASSERT(is_int24(imm24));
   1252   emit(cond | B27 | B25 | (imm24 & kImm24Mask));
   1253 
   1254   if (cond == al) {
   1255     // Dead code is a good location to emit the constant pool.
   1256     CheckConstPool(false, false);
   1257   }
   1258 }
   1259 
   1260 
   1261 void Assembler::bl(int branch_offset, Condition cond) {
   1262   positions_recorder()->WriteRecordedPositions();
   1263   ASSERT((branch_offset & 3) == 0);
   1264   int imm24 = branch_offset >> 2;
   1265   ASSERT(is_int24(imm24));
   1266   emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
   1267 }
   1268 
   1269 
   1270 void Assembler::blx(int branch_offset) {  // v5 and above
   1271   positions_recorder()->WriteRecordedPositions();
   1272   ASSERT((branch_offset & 1) == 0);
   1273   int h = ((branch_offset & 2) >> 1)*B24;
   1274   int imm24 = branch_offset >> 2;
   1275   ASSERT(is_int24(imm24));
   1276   emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
   1277 }
   1278 
   1279 
   1280 void Assembler::blx(Register target, Condition cond) {  // v5 and above
   1281   positions_recorder()->WriteRecordedPositions();
   1282   ASSERT(!target.is(pc));
   1283   emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
   1284 }
   1285 
   1286 
   1287 void Assembler::bx(Register target, Condition cond) {  // v5 and above, plus v4t
   1288   positions_recorder()->WriteRecordedPositions();
   1289   ASSERT(!target.is(pc));  // use of pc is actually allowed, but discouraged
   1290   emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
   1291 }
   1292 
   1293 
   1294 // Data-processing instructions.
   1295 
   1296 void Assembler::and_(Register dst, Register src1, const Operand& src2,
   1297                      SBit s, Condition cond) {
   1298   addrmod1(cond | AND | s, src1, dst, src2);
   1299 }
   1300 
   1301 
   1302 void Assembler::eor(Register dst, Register src1, const Operand& src2,
   1303                     SBit s, Condition cond) {
   1304   addrmod1(cond | EOR | s, src1, dst, src2);
   1305 }
   1306 
   1307 
   1308 void Assembler::sub(Register dst, Register src1, const Operand& src2,
   1309                     SBit s, Condition cond) {
   1310   addrmod1(cond | SUB | s, src1, dst, src2);
   1311 }
   1312 
   1313 
   1314 void Assembler::rsb(Register dst, Register src1, const Operand& src2,
   1315                     SBit s, Condition cond) {
   1316   addrmod1(cond | RSB | s, src1, dst, src2);
   1317 }
   1318 
   1319 
   1320 void Assembler::add(Register dst, Register src1, const Operand& src2,
   1321                     SBit s, Condition cond) {
   1322   addrmod1(cond | ADD | s, src1, dst, src2);
   1323 }
   1324 
   1325 
   1326 void Assembler::adc(Register dst, Register src1, const Operand& src2,
   1327                     SBit s, Condition cond) {
   1328   addrmod1(cond | ADC | s, src1, dst, src2);
   1329 }
   1330 
   1331 
   1332 void Assembler::sbc(Register dst, Register src1, const Operand& src2,
   1333                     SBit s, Condition cond) {
   1334   addrmod1(cond | SBC | s, src1, dst, src2);
   1335 }
   1336 
   1337 
   1338 void Assembler::rsc(Register dst, Register src1, const Operand& src2,
   1339                     SBit s, Condition cond) {
   1340   addrmod1(cond | RSC | s, src1, dst, src2);
   1341 }
   1342 
   1343 
   1344 void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
   1345   addrmod1(cond | TST | S, src1, r0, src2);
   1346 }
   1347 
   1348 
   1349 void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
   1350   addrmod1(cond | TEQ | S, src1, r0, src2);
   1351 }
   1352 
   1353 
   1354 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
   1355   addrmod1(cond | CMP | S, src1, r0, src2);
   1356 }
   1357 
   1358 
   1359 void Assembler::cmp_raw_immediate(
   1360     Register src, int raw_immediate, Condition cond) {
   1361   ASSERT(is_uint12(raw_immediate));
   1362   emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
   1363 }
   1364 
   1365 
   1366 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
   1367   addrmod1(cond | CMN | S, src1, r0, src2);
   1368 }
   1369 
   1370 
   1371 void Assembler::orr(Register dst, Register src1, const Operand& src2,
   1372                     SBit s, Condition cond) {
   1373   addrmod1(cond | ORR | s, src1, dst, src2);
   1374 }
   1375 
   1376 
   1377 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
   1378   if (dst.is(pc)) {
   1379     positions_recorder()->WriteRecordedPositions();
   1380   }
   1381   // Don't allow nop instructions in the form mov rn, rn to be generated using
   1382   // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
   1383   // or MarkCode(int/NopMarkerTypes) pseudo instructions.
   1384   ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
   1385   addrmod1(cond | MOV | s, r0, dst, src);
   1386 }
   1387 
   1388 
   1389 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
   1390   ASSERT(immediate < 0x10000);
   1391   // May use movw if supported, but on unsupported platforms will try to use
   1392   // equivalent rotated immed_8 value and other tricks before falling back to a
   1393   // constant pool load.
   1394   mov(reg, Operand(immediate), LeaveCC, cond);
   1395 }
   1396 
   1397 
   1398 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
   1399   emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
   1400 }
   1401 
   1402 
   1403 void Assembler::bic(Register dst, Register src1, const Operand& src2,
   1404                     SBit s, Condition cond) {
   1405   addrmod1(cond | BIC | s, src1, dst, src2);
   1406 }
   1407 
   1408 
   1409 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
   1410   addrmod1(cond | MVN | s, r0, dst, src);
   1411 }
   1412 
   1413 
   1414 // Multiply instructions.
   1415 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
   1416                     SBit s, Condition cond) {
   1417   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
   1418   emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
   1419        src2.code()*B8 | B7 | B4 | src1.code());
   1420 }
   1421 
   1422 
   1423 void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
   1424                     Condition cond) {
   1425   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
   1426   emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
   1427        src2.code()*B8 | B7 | B4 | src1.code());
   1428 }
   1429 
   1430 
   1431 void Assembler::sdiv(Register dst, Register src1, Register src2,
   1432                      Condition cond) {
   1433   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
   1434   ASSERT(IsEnabled(SUDIV));
   1435   emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
   1436        src2.code()*B8 | B4 | src1.code());
   1437 }
   1438 
   1439 
   1440 void Assembler::mul(Register dst, Register src1, Register src2,
   1441                     SBit s, Condition cond) {
   1442   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
   1443   // dst goes in bits 16-19 for this instruction!
   1444   emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
   1445 }
   1446 
   1447 
   1448 void Assembler::smlal(Register dstL,
   1449                       Register dstH,
   1450                       Register src1,
   1451                       Register src2,
   1452                       SBit s,
   1453                       Condition cond) {
   1454   ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
   1455   ASSERT(!dstL.is(dstH));
   1456   emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
   1457        src2.code()*B8 | B7 | B4 | src1.code());
   1458 }
   1459 
   1460 
   1461 void Assembler::smull(Register dstL,
   1462                       Register dstH,
   1463                       Register src1,
   1464                       Register src2,
   1465                       SBit s,
   1466                       Condition cond) {
   1467   ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
   1468   ASSERT(!dstL.is(dstH));
   1469   emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
   1470        src2.code()*B8 | B7 | B4 | src1.code());
   1471 }
   1472 
   1473 
   1474 void Assembler::umlal(Register dstL,
   1475                       Register dstH,
   1476                       Register src1,
   1477                       Register src2,
   1478                       SBit s,
   1479                       Condition cond) {
   1480   ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
   1481   ASSERT(!dstL.is(dstH));
   1482   emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
   1483        src2.code()*B8 | B7 | B4 | src1.code());
   1484 }
   1485 
   1486 
   1487 void Assembler::umull(Register dstL,
   1488                       Register dstH,
   1489                       Register src1,
   1490                       Register src2,
   1491                       SBit s,
   1492                       Condition cond) {
   1493   ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
   1494   ASSERT(!dstL.is(dstH));
   1495   emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
   1496        src2.code()*B8 | B7 | B4 | src1.code());
   1497 }
   1498 
   1499 
   1500 // Miscellaneous arithmetic instructions.
   1501 void Assembler::clz(Register dst, Register src, Condition cond) {
   1502   // v5 and above.
   1503   ASSERT(!dst.is(pc) && !src.is(pc));
   1504   emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
   1505        15*B8 | CLZ | src.code());
   1506 }
   1507 
   1508 
   1509 // Saturating instructions.
   1510 
   1511 // Unsigned saturate.
   1512 void Assembler::usat(Register dst,
   1513                      int satpos,
   1514                      const Operand& src,
   1515                      Condition cond) {
   1516   // v6 and above.
   1517   ASSERT(CpuFeatures::IsSupported(ARMv7));
   1518   ASSERT(!dst.is(pc) && !src.rm_.is(pc));
   1519   ASSERT((satpos >= 0) && (satpos <= 31));
   1520   ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
   1521   ASSERT(src.rs_.is(no_reg));
   1522 
   1523   int sh = 0;
   1524   if (src.shift_op_ == ASR) {
   1525       sh = 1;
   1526   }
   1527 
   1528   emit(cond | 0x6*B24 | 0xe*B20 | satpos*B16 | dst.code()*B12 |
   1529        src.shift_imm_*B7 | sh*B6 | 0x1*B4 | src.rm_.code());
   1530 }
   1531 
   1532 
   1533 // Bitfield manipulation instructions.
   1534 
   1535 // Unsigned bit field extract.
   1536 // Extracts #width adjacent bits from position #lsb in a register, and
   1537 // writes them to the low bits of a destination register.
   1538 //   ubfx dst, src, #lsb, #width
   1539 void Assembler::ubfx(Register dst,
   1540                      Register src,
   1541                      int lsb,
   1542                      int width,
   1543                      Condition cond) {
   1544   // v7 and above.
   1545   ASSERT(CpuFeatures::IsSupported(ARMv7));
   1546   ASSERT(!dst.is(pc) && !src.is(pc));
   1547   ASSERT((lsb >= 0) && (lsb <= 31));
   1548   ASSERT((width >= 1) && (width <= (32 - lsb)));
   1549   emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
   1550        lsb*B7 | B6 | B4 | src.code());
   1551 }
   1552 
   1553 
   1554 // Signed bit field extract.
   1555 // Extracts #width adjacent bits from position #lsb in a register, and
   1556 // writes them to the low bits of a destination register. The extracted
   1557 // value is sign extended to fill the destination register.
   1558 //   sbfx dst, src, #lsb, #width
   1559 void Assembler::sbfx(Register dst,
   1560                      Register src,
   1561                      int lsb,
   1562                      int width,
   1563                      Condition cond) {
   1564   // v7 and above.
   1565   ASSERT(CpuFeatures::IsSupported(ARMv7));
   1566   ASSERT(!dst.is(pc) && !src.is(pc));
   1567   ASSERT((lsb >= 0) && (lsb <= 31));
   1568   ASSERT((width >= 1) && (width <= (32 - lsb)));
   1569   emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
   1570        lsb*B7 | B6 | B4 | src.code());
   1571 }
   1572 
   1573 
   1574 // Bit field clear.
   1575 // Sets #width adjacent bits at position #lsb in the destination register
   1576 // to zero, preserving the value of the other bits.
   1577 //   bfc dst, #lsb, #width
   1578 void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
   1579   // v7 and above.
   1580   ASSERT(CpuFeatures::IsSupported(ARMv7));
   1581   ASSERT(!dst.is(pc));
   1582   ASSERT((lsb >= 0) && (lsb <= 31));
   1583   ASSERT((width >= 1) && (width <= (32 - lsb)));
   1584   int msb = lsb + width - 1;
   1585   emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
   1586 }
   1587 
   1588 
   1589 // Bit field insert.
   1590 // Inserts #width adjacent bits from the low bits of the source register
   1591 // into position #lsb of the destination register.
   1592 //   bfi dst, src, #lsb, #width
   1593 void Assembler::bfi(Register dst,
   1594                     Register src,
   1595                     int lsb,
   1596                     int width,
   1597                     Condition cond) {
   1598   // v7 and above.
   1599   ASSERT(CpuFeatures::IsSupported(ARMv7));
   1600   ASSERT(!dst.is(pc) && !src.is(pc));
   1601   ASSERT((lsb >= 0) && (lsb <= 31));
   1602   ASSERT((width >= 1) && (width <= (32 - lsb)));
   1603   int msb = lsb + width - 1;
   1604   emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
   1605        src.code());
   1606 }
   1607 
   1608 
   1609 void Assembler::pkhbt(Register dst,
   1610                       Register src1,
   1611                       const Operand& src2,
   1612                       Condition cond ) {
   1613   // Instruction details available in ARM DDI 0406C.b, A8.8.125.
   1614   // cond(31-28) | 01101000(27-20) | Rn(19-16) |
   1615   // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
   1616   ASSERT(!dst.is(pc));
   1617   ASSERT(!src1.is(pc));
   1618   ASSERT(!src2.rm().is(pc));
   1619   ASSERT(!src2.rm().is(no_reg));
   1620   ASSERT(src2.rs().is(no_reg));
   1621   ASSERT((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
   1622   ASSERT(src2.shift_op() == LSL);
   1623   emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
   1624        src2.shift_imm_*B7 | B4 | src2.rm().code());
   1625 }
   1626 
   1627 
   1628 void Assembler::pkhtb(Register dst,
   1629                       Register src1,
   1630                       const Operand& src2,
   1631                       Condition cond) {
   1632   // Instruction details available in ARM DDI 0406C.b, A8.8.125.
   1633   // cond(31-28) | 01101000(27-20) | Rn(19-16) |
   1634   // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
   1635   ASSERT(!dst.is(pc));
   1636   ASSERT(!src1.is(pc));
   1637   ASSERT(!src2.rm().is(pc));
   1638   ASSERT(!src2.rm().is(no_reg));
   1639   ASSERT(src2.rs().is(no_reg));
   1640   ASSERT((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
   1641   ASSERT(src2.shift_op() == ASR);
   1642   int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
   1643   emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
   1644        asr*B7 | B6 | B4 | src2.rm().code());
   1645 }
   1646 
   1647 
   1648 void Assembler::uxtb(Register dst,
   1649                      const Operand& src,
   1650                      Condition cond) {
   1651   // Instruction details available in ARM DDI 0406C.b, A8.8.274.
   1652   // cond(31-28) | 01101110(27-20) | 1111(19-16) |
   1653   // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
   1654   ASSERT(!dst.is(pc));
   1655   ASSERT(!src.rm().is(pc));
   1656   ASSERT(!src.rm().is(no_reg));
   1657   ASSERT(src.rs().is(no_reg));
   1658   ASSERT((src.shift_imm_ == 0) ||
   1659          (src.shift_imm_ == 8) ||
   1660          (src.shift_imm_ == 16) ||
   1661          (src.shift_imm_ == 24));
   1662   ASSERT(src.shift_op() == ROR);
   1663   emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
   1664        ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
   1665 }
   1666 
   1667 
   1668 void Assembler::uxtab(Register dst,
   1669                       Register src1,
   1670                       const Operand& src2,
   1671                       Condition cond) {
   1672   // Instruction details available in ARM DDI 0406C.b, A8.8.271.
   1673   // cond(31-28) | 01101110(27-20) | Rn(19-16) |
   1674   // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
   1675   ASSERT(!dst.is(pc));
   1676   ASSERT(!src1.is(pc));
   1677   ASSERT(!src2.rm().is(pc));
   1678   ASSERT(!src2.rm().is(no_reg));
   1679   ASSERT(src2.rs().is(no_reg));
   1680   ASSERT((src2.shift_imm_ == 0) ||
   1681          (src2.shift_imm_ == 8) ||
   1682          (src2.shift_imm_ == 16) ||
   1683          (src2.shift_imm_ == 24));
   1684   ASSERT(src2.shift_op() == ROR);
   1685   emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
   1686        ((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
   1687 }
   1688 
   1689 
   1690 void Assembler::uxtb16(Register dst,
   1691                        const Operand& src,
   1692                        Condition cond) {
   1693   // Instruction details available in ARM DDI 0406C.b, A8.8.275.
   1694   // cond(31-28) | 01101100(27-20) | 1111(19-16) |
   1695   // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
   1696   ASSERT(!dst.is(pc));
   1697   ASSERT(!src.rm().is(pc));
   1698   ASSERT(!src.rm().is(no_reg));
   1699   ASSERT(src.rs().is(no_reg));
   1700   ASSERT((src.shift_imm_ == 0) ||
   1701          (src.shift_imm_ == 8) ||
   1702          (src.shift_imm_ == 16) ||
   1703          (src.shift_imm_ == 24));
   1704   ASSERT(src.shift_op() == ROR);
   1705   emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
   1706        ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
   1707 }
   1708 
   1709 
   1710 // Status register access instructions.
   1711 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
   1712   ASSERT(!dst.is(pc));
   1713   emit(cond | B24 | s | 15*B16 | dst.code()*B12);
   1714 }
   1715 
   1716 
   1717 void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
   1718                     Condition cond) {
   1719   ASSERT(fields >= B16 && fields < B20);  // at least one field set
   1720   Instr instr;
   1721   if (!src.rm_.is_valid()) {
   1722     // Immediate.
   1723     uint32_t rotate_imm;
   1724     uint32_t immed_8;
   1725     if (src.must_output_reloc_info(this) ||
   1726         !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
   1727       // Immediate operand cannot be encoded, load it first to register ip.
   1728       RecordRelocInfo(src.rmode_, src.imm32_);
   1729       ldr(ip, MemOperand(pc, 0), cond);
   1730       msr(fields, Operand(ip), cond);
   1731       return;
   1732     }
   1733     instr = I | rotate_imm*B8 | immed_8;
   1734   } else {
   1735     ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0);  // only rm allowed
   1736     instr = src.rm_.code();
   1737   }
   1738   emit(cond | instr | B24 | B21 | fields | 15*B12);
   1739 }
   1740 
   1741 
   1742 // Load/Store instructions.
   1743 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
   1744   if (dst.is(pc)) {
   1745     positions_recorder()->WriteRecordedPositions();
   1746   }
   1747   addrmod2(cond | B26 | L, dst, src);
   1748 }
   1749 
   1750 
   1751 void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
   1752   addrmod2(cond | B26, src, dst);
   1753 }
   1754 
   1755 
   1756 void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
   1757   addrmod2(cond | B26 | B | L, dst, src);
   1758 }
   1759 
   1760 
   1761 void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
   1762   addrmod2(cond | B26 | B, src, dst);
   1763 }
   1764 
   1765 
   1766 void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
   1767   addrmod3(cond | L | B7 | H | B4, dst, src);
   1768 }
   1769 
   1770 
   1771 void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
   1772   addrmod3(cond | B7 | H | B4, src, dst);
   1773 }
   1774 
   1775 
   1776 void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
   1777   addrmod3(cond | L | B7 | S6 | B4, dst, src);
   1778 }
   1779 
   1780 
   1781 void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
   1782   addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
   1783 }
   1784 
   1785 
   1786 void Assembler::ldrd(Register dst1, Register dst2,
   1787                      const MemOperand& src, Condition cond) {
   1788   ASSERT(IsEnabled(ARMv7));
   1789   ASSERT(src.rm().is(no_reg));
   1790   ASSERT(!dst1.is(lr));  // r14.
   1791   ASSERT_EQ(0, dst1.code() % 2);
   1792   ASSERT_EQ(dst1.code() + 1, dst2.code());
   1793   addrmod3(cond | B7 | B6 | B4, dst1, src);
   1794 }
   1795 
   1796 
   1797 void Assembler::strd(Register src1, Register src2,
   1798                      const MemOperand& dst, Condition cond) {
   1799   ASSERT(dst.rm().is(no_reg));
   1800   ASSERT(!src1.is(lr));  // r14.
   1801   ASSERT_EQ(0, src1.code() % 2);
   1802   ASSERT_EQ(src1.code() + 1, src2.code());
   1803   ASSERT(IsEnabled(ARMv7));
   1804   addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
   1805 }
   1806 
   1807 
   1808 // Preload instructions.
   1809 void Assembler::pld(const MemOperand& address) {
   1810   // Instruction details available in ARM DDI 0406C.b, A8.8.128.
   1811   // 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) |
   1812   // 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) |
   1813   ASSERT(address.rm().is(no_reg));
   1814   ASSERT(address.am() == Offset);
   1815   int U = B23;
   1816   int offset = address.offset();
   1817   if (offset < 0) {
   1818     offset = -offset;
   1819     U = 0;
   1820   }
   1821   ASSERT(offset < 4096);
   1822   emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
   1823        0xf*B12 | offset);
   1824 }
   1825 
   1826 
   1827 // Load/Store multiple instructions.
   1828 void Assembler::ldm(BlockAddrMode am,
   1829                     Register base,
   1830                     RegList dst,
   1831                     Condition cond) {
   1832   // ABI stack constraint: ldmxx base, {..sp..}  base != sp  is not restartable.
   1833   ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
   1834 
   1835   addrmod4(cond | B27 | am | L, base, dst);
   1836 
   1837   // Emit the constant pool after a function return implemented by ldm ..{..pc}.
   1838   if (cond == al && (dst & pc.bit()) != 0) {
   1839     // There is a slight chance that the ldm instruction was actually a call,
   1840     // in which case it would be wrong to return into the constant pool; we
   1841     // recognize this case by checking if the emission of the pool was blocked
   1842     // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
   1843     // the case, we emit a jump over the pool.
   1844     CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
   1845   }
   1846 }
   1847 
   1848 
   1849 void Assembler::stm(BlockAddrMode am,
   1850                     Register base,
   1851                     RegList src,
   1852                     Condition cond) {
   1853   addrmod4(cond | B27 | am, base, src);
   1854 }
   1855 
   1856 
   1857 // Exception-generating instructions and debugging support.
   1858 // Stops with a non-negative code less than kNumOfWatchedStops support
   1859 // enabling/disabling and a counter feature. See simulator-arm.h .
   1860 void Assembler::stop(const char* msg, Condition cond, int32_t code) {
   1861 #ifndef __arm__
   1862   ASSERT(code >= kDefaultStopCode);
   1863   {
   1864     // The Simulator will handle the stop instruction and get the message
   1865     // address. It expects to find the address just after the svc instruction.
   1866     BlockConstPoolScope block_const_pool(this);
   1867     if (code >= 0) {
   1868       svc(kStopCode + code, cond);
   1869     } else {
   1870       svc(kStopCode + kMaxStopCode, cond);
   1871     }
   1872     emit(reinterpret_cast<Instr>(msg));
   1873   }
   1874 #else  // def __arm__
   1875   if (cond != al) {
   1876     Label skip;
   1877     b(&skip, NegateCondition(cond));
   1878     bkpt(0);
   1879     bind(&skip);
   1880   } else {
   1881     bkpt(0);
   1882   }
   1883 #endif  // def __arm__
   1884 }
   1885 
   1886 
   1887 void Assembler::bkpt(uint32_t imm16) {  // v5 and above
   1888   ASSERT(is_uint16(imm16));
   1889   emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
   1890 }
   1891 
   1892 
   1893 void Assembler::svc(uint32_t imm24, Condition cond) {
   1894   ASSERT(is_uint24(imm24));
   1895   emit(cond | 15*B24 | imm24);
   1896 }
   1897 
   1898 
   1899 // Coprocessor instructions.
   1900 void Assembler::cdp(Coprocessor coproc,
   1901                     int opcode_1,
   1902                     CRegister crd,
   1903                     CRegister crn,
   1904                     CRegister crm,
   1905                     int opcode_2,
   1906                     Condition cond) {
   1907   ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
   1908   emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
   1909        crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
   1910 }
   1911 
   1912 
   1913 void Assembler::cdp2(Coprocessor coproc,
   1914                      int opcode_1,
   1915                      CRegister crd,
   1916                      CRegister crn,
   1917                      CRegister crm,
   1918                      int opcode_2) {  // v5 and above
   1919   cdp(coproc, opcode_1, crd, crn, crm, opcode_2, kSpecialCondition);
   1920 }
   1921 
   1922 
   1923 void Assembler::mcr(Coprocessor coproc,
   1924                     int opcode_1,
   1925                     Register rd,
   1926                     CRegister crn,
   1927                     CRegister crm,
   1928                     int opcode_2,
   1929                     Condition cond) {
   1930   ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
   1931   emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
   1932        rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
   1933 }
   1934 
   1935 
   1936 void Assembler::mcr2(Coprocessor coproc,
   1937                      int opcode_1,
   1938                      Register rd,
   1939                      CRegister crn,
   1940                      CRegister crm,
   1941                      int opcode_2) {  // v5 and above
   1942   mcr(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
   1943 }
   1944 
   1945 
   1946 void Assembler::mrc(Coprocessor coproc,
   1947                     int opcode_1,
   1948                     Register rd,
   1949                     CRegister crn,
   1950                     CRegister crm,
   1951                     int opcode_2,
   1952                     Condition cond) {
   1953   ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
   1954   emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
   1955        rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
   1956 }
   1957 
   1958 
   1959 void Assembler::mrc2(Coprocessor coproc,
   1960                      int opcode_1,
   1961                      Register rd,
   1962                      CRegister crn,
   1963                      CRegister crm,
   1964                      int opcode_2) {  // v5 and above
   1965   mrc(coproc, opcode_1, rd, crn, crm, opcode_2, kSpecialCondition);
   1966 }
   1967 
   1968 
   1969 void Assembler::ldc(Coprocessor coproc,
   1970                     CRegister crd,
   1971                     const MemOperand& src,
   1972                     LFlag l,
   1973                     Condition cond) {
   1974   addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
   1975 }
   1976 
   1977 
   1978 void Assembler::ldc(Coprocessor coproc,
   1979                     CRegister crd,
   1980                     Register rn,
   1981                     int option,
   1982                     LFlag l,
   1983                     Condition cond) {
   1984   // Unindexed addressing.
   1985   ASSERT(is_uint8(option));
   1986   emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
   1987        coproc*B8 | (option & 255));
   1988 }
   1989 
   1990 
   1991 void Assembler::ldc2(Coprocessor coproc,
   1992                      CRegister crd,
   1993                      const MemOperand& src,
   1994                      LFlag l) {  // v5 and above
   1995   ldc(coproc, crd, src, l, kSpecialCondition);
   1996 }
   1997 
   1998 
   1999 void Assembler::ldc2(Coprocessor coproc,
   2000                      CRegister crd,
   2001                      Register rn,
   2002                      int option,
   2003                      LFlag l) {  // v5 and above
   2004   ldc(coproc, crd, rn, option, l, kSpecialCondition);
   2005 }
   2006 
   2007 
   2008 // Support for VFP.
   2009 
   2010 void Assembler::vldr(const DwVfpRegister dst,
   2011                      const Register base,
   2012                      int offset,
   2013                      const Condition cond) {
   2014   // Ddst = MEM(Rbase + offset).
   2015   // Instruction details available in ARM DDI 0406C.b, A8-924.
   2016   // cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
   2017   // Vd(15-12) | 1011(11-8) | offset
   2018   int u = 1;
   2019   if (offset < 0) {
   2020     offset = -offset;
   2021     u = 0;
   2022   }
   2023   int vd, d;
   2024   dst.split_code(&vd, &d);
   2025 
   2026   ASSERT(offset >= 0);
   2027   if ((offset % 4) == 0 && (offset / 4) < 256) {
   2028     emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
   2029          0xB*B8 | ((offset / 4) & 255));
   2030   } else {
   2031     // Larger offsets must be handled by computing the correct address
   2032     // in the ip register.
   2033     ASSERT(!base.is(ip));
   2034     if (u == 1) {
   2035       add(ip, base, Operand(offset));
   2036     } else {
   2037       sub(ip, base, Operand(offset));
   2038     }
   2039     emit(cond | 0xD*B24 | d*B22 | B20 | ip.code()*B16 | vd*B12 | 0xB*B8);
   2040   }
   2041 }
   2042 
   2043 
   2044 void Assembler::vldr(const DwVfpRegister dst,
   2045                      const MemOperand& operand,
   2046                      const Condition cond) {
   2047   ASSERT(!operand.rm().is_valid());
   2048   ASSERT(operand.am_ == Offset);
   2049   vldr(dst, operand.rn(), operand.offset(), cond);
   2050 }
   2051 
   2052 
   2053 void Assembler::vldr(const SwVfpRegister dst,
   2054                      const Register base,
   2055                      int offset,
   2056                      const Condition cond) {
   2057   // Sdst = MEM(Rbase + offset).
   2058   // Instruction details available in ARM DDI 0406A, A8-628.
   2059   // cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
   2060   // Vdst(15-12) | 1010(11-8) | offset
   2061   int u = 1;
   2062   if (offset < 0) {
   2063     offset = -offset;
   2064     u = 0;
   2065   }
   2066   int sd, d;
   2067   dst.split_code(&sd, &d);
   2068   ASSERT(offset >= 0);
   2069 
   2070   if ((offset % 4) == 0 && (offset / 4) < 256) {
   2071   emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
   2072        0xA*B8 | ((offset / 4) & 255));
   2073   } else {
   2074     // Larger offsets must be handled by computing the correct address
   2075     // in the ip register.
   2076     ASSERT(!base.is(ip));
   2077     if (u == 1) {
   2078       add(ip, base, Operand(offset));
   2079     } else {
   2080       sub(ip, base, Operand(offset));
   2081     }
   2082     emit(cond | d*B22 | 0xD1*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
   2083   }
   2084 }
   2085 
   2086 
   2087 void Assembler::vldr(const SwVfpRegister dst,
   2088                      const MemOperand& operand,
   2089                      const Condition cond) {
   2090   ASSERT(!operand.rm().is_valid());
   2091   ASSERT(operand.am_ == Offset);
   2092   vldr(dst, operand.rn(), operand.offset(), cond);
   2093 }
   2094 
   2095 
   2096 void Assembler::vstr(const DwVfpRegister src,
   2097                      const Register base,
   2098                      int offset,
   2099                      const Condition cond) {
   2100   // MEM(Rbase + offset) = Dsrc.
   2101   // Instruction details available in ARM DDI 0406C.b, A8-1082.
   2102   // cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
   2103   // Vd(15-12) | 1011(11-8) | (offset/4)
   2104   int u = 1;
   2105   if (offset < 0) {
   2106     offset = -offset;
   2107     u = 0;
   2108   }
   2109   ASSERT(offset >= 0);
   2110   int vd, d;
   2111   src.split_code(&vd, &d);
   2112 
   2113   if ((offset % 4) == 0 && (offset / 4) < 256) {
   2114     emit(cond | 0xD*B24 | u*B23 | d*B22 | base.code()*B16 | vd*B12 | 0xB*B8 |
   2115          ((offset / 4) & 255));
   2116   } else {
   2117     // Larger offsets must be handled by computing the correct address
   2118     // in the ip register.
   2119     ASSERT(!base.is(ip));
   2120     if (u == 1) {
   2121       add(ip, base, Operand(offset));
   2122     } else {
   2123       sub(ip, base, Operand(offset));
   2124     }
   2125     emit(cond | 0xD*B24 | d*B22 | ip.code()*B16 | vd*B12 | 0xB*B8);
   2126   }
   2127 }
   2128 
   2129 
   2130 void Assembler::vstr(const DwVfpRegister src,
   2131                      const MemOperand& operand,
   2132                      const Condition cond) {
   2133   ASSERT(!operand.rm().is_valid());
   2134   ASSERT(operand.am_ == Offset);
   2135   vstr(src, operand.rn(), operand.offset(), cond);
   2136 }
   2137 
   2138 
   2139 void Assembler::vstr(const SwVfpRegister src,
   2140                      const Register base,
   2141                      int offset,
   2142                      const Condition cond) {
   2143   // MEM(Rbase + offset) = SSrc.
   2144   // Instruction details available in ARM DDI 0406A, A8-786.
   2145   // cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
   2146   // Vdst(15-12) | 1010(11-8) | (offset/4)
   2147   int u = 1;
   2148   if (offset < 0) {
   2149     offset = -offset;
   2150     u = 0;
   2151   }
   2152   int sd, d;
   2153   src.split_code(&sd, &d);
   2154   ASSERT(offset >= 0);
   2155   if ((offset % 4) == 0 && (offset / 4) < 256) {
   2156     emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
   2157          0xA*B8 | ((offset / 4) & 255));
   2158   } else {
   2159     // Larger offsets must be handled by computing the correct address
   2160     // in the ip register.
   2161     ASSERT(!base.is(ip));
   2162     if (u == 1) {
   2163       add(ip, base, Operand(offset));
   2164     } else {
   2165       sub(ip, base, Operand(offset));
   2166     }
   2167     emit(cond | d*B22 | 0xD0*B20 | ip.code()*B16 | sd*B12 | 0xA*B8);
   2168   }
   2169 }
   2170 
   2171 
   2172 void Assembler::vstr(const SwVfpRegister src,
   2173                      const MemOperand& operand,
   2174                      const Condition cond) {
   2175   ASSERT(!operand.rm().is_valid());
   2176   ASSERT(operand.am_ == Offset);
   2177   vstr(src, operand.rn(), operand.offset(), cond);
   2178 }
   2179 
   2180 
   2181 void  Assembler::vldm(BlockAddrMode am,
   2182                       Register base,
   2183                       DwVfpRegister first,
   2184                       DwVfpRegister last,
   2185                       Condition cond) {
   2186   // Instruction details available in ARM DDI 0406C.b, A8-922.
   2187   // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
   2188   // first(15-12) | 1011(11-8) | (count * 2)
   2189   ASSERT_LE(first.code(), last.code());
   2190   ASSERT(am == ia || am == ia_w || am == db_w);
   2191   ASSERT(!base.is(pc));
   2192 
   2193   int sd, d;
   2194   first.split_code(&sd, &d);
   2195   int count = last.code() - first.code() + 1;
   2196   ASSERT(count <= 16);
   2197   emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
   2198        0xB*B8 | count*2);
   2199 }
   2200 
   2201 
   2202 void  Assembler::vstm(BlockAddrMode am,
   2203                       Register base,
   2204                       DwVfpRegister first,
   2205                       DwVfpRegister last,
   2206                       Condition cond) {
   2207   // Instruction details available in ARM DDI 0406C.b, A8-1080.
   2208   // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
   2209   // first(15-12) | 1011(11-8) | (count * 2)
   2210   ASSERT_LE(first.code(), last.code());
   2211   ASSERT(am == ia || am == ia_w || am == db_w);
   2212   ASSERT(!base.is(pc));
   2213 
   2214   int sd, d;
   2215   first.split_code(&sd, &d);
   2216   int count = last.code() - first.code() + 1;
   2217   ASSERT(count <= 16);
   2218   emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
   2219        0xB*B8 | count*2);
   2220 }
   2221 
   2222 void  Assembler::vldm(BlockAddrMode am,
   2223                       Register base,
   2224                       SwVfpRegister first,
   2225                       SwVfpRegister last,
   2226                       Condition cond) {
   2227   // Instruction details available in ARM DDI 0406A, A8-626.
   2228   // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
   2229   // first(15-12) | 1010(11-8) | (count/2)
   2230   ASSERT_LE(first.code(), last.code());
   2231   ASSERT(am == ia || am == ia_w || am == db_w);
   2232   ASSERT(!base.is(pc));
   2233 
   2234   int sd, d;
   2235   first.split_code(&sd, &d);
   2236   int count = last.code() - first.code() + 1;
   2237   emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
   2238        0xA*B8 | count);
   2239 }
   2240 
   2241 
   2242 void  Assembler::vstm(BlockAddrMode am,
   2243                       Register base,
   2244                       SwVfpRegister first,
   2245                       SwVfpRegister last,
   2246                       Condition cond) {
   2247   // Instruction details available in ARM DDI 0406A, A8-784.
   2248   // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
   2249   // first(15-12) | 1011(11-8) | (count/2)
   2250   ASSERT_LE(first.code(), last.code());
   2251   ASSERT(am == ia || am == ia_w || am == db_w);
   2252   ASSERT(!base.is(pc));
   2253 
   2254   int sd, d;
   2255   first.split_code(&sd, &d);
   2256   int count = last.code() - first.code() + 1;
   2257   emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
   2258        0xA*B8 | count);
   2259 }
   2260 
   2261 
   2262 static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
   2263   uint64_t i;
   2264   OS::MemCopy(&i, &d, 8);
   2265 
   2266   *lo = i & 0xffffffff;
   2267   *hi = i >> 32;
   2268 }
   2269 
   2270 
   2271 // Only works for little endian floating point formats.
   2272 // We don't support VFP on the mixed endian floating point platform.
   2273 static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
   2274   ASSERT(CpuFeatures::IsSupported(VFP3));
   2275 
   2276   // VMOV can accept an immediate of the form:
   2277   //
   2278   //  +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
   2279   //
   2280   // The immediate is encoded using an 8-bit quantity, comprised of two
   2281   // 4-bit fields. For an 8-bit immediate of the form:
   2282   //
   2283   //  [abcdefgh]
   2284   //
   2285   // where a is the MSB and h is the LSB, an immediate 64-bit double can be
   2286   // created of the form:
   2287   //
   2288   //  [aBbbbbbb,bbcdefgh,00000000,00000000,
   2289   //      00000000,00000000,00000000,00000000]
   2290   //
   2291   // where B = ~b.
   2292   //
   2293 
   2294   uint32_t lo, hi;
   2295   DoubleAsTwoUInt32(d, &lo, &hi);
   2296 
   2297   // The most obvious constraint is the long block of zeroes.
   2298   if ((lo != 0) || ((hi & 0xffff) != 0)) {
   2299     return false;
   2300   }
   2301 
   2302   // Bits 62:55 must be all clear or all set.
   2303   if (((hi & 0x3fc00000) != 0) && ((hi & 0x3fc00000) != 0x3fc00000)) {
   2304     return false;
   2305   }
   2306 
   2307   // Bit 63 must be NOT bit 62.
   2308   if (((hi ^ (hi << 1)) & (0x40000000)) == 0) {
   2309     return false;
   2310   }
   2311 
   2312   // Create the encoded immediate in the form:
   2313   //  [00000000,0000abcd,00000000,0000efgh]
   2314   *encoding  = (hi >> 16) & 0xf;      // Low nybble.
   2315   *encoding |= (hi >> 4) & 0x70000;   // Low three bits of the high nybble.
   2316   *encoding |= (hi >> 12) & 0x80000;  // Top bit of the high nybble.
   2317 
   2318   return true;
   2319 }
   2320 
   2321 
   2322 void Assembler::vmov(const DwVfpRegister dst,
   2323                      double imm,
   2324                      const Register scratch) {
   2325   uint32_t enc;
   2326   if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
   2327     // The double can be encoded in the instruction.
   2328     //
   2329     // Dd = immediate
   2330     // Instruction details available in ARM DDI 0406C.b, A8-936.
   2331     // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | imm4H(19-16) |
   2332     // Vd(15-12) | 101(11-9) | sz=1(8) | imm4L(3-0)
   2333     int vd, d;
   2334     dst.split_code(&vd, &d);
   2335     emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
   2336   } else if (FLAG_enable_vldr_imm) {
   2337     // TODO(jfb) Temporarily turned off until we have constant blinding or
   2338     //           some equivalent mitigation: an attacker can otherwise control
   2339     //           generated data which also happens to be executable, a Very Bad
   2340     //           Thing indeed.
   2341     //           Blinding gets tricky because we don't have xor, we probably
   2342     //           need to add/subtract without losing precision, which requires a
   2343     //           cookie value that Lithium is probably better positioned to
   2344     //           choose.
   2345     //           We could also add a few peepholes here like detecting 0.0 and
   2346     //           -0.0 and doing a vmov from the sequestered d14, forcing denorms
   2347     //           to zero (we set flush-to-zero), and normalizing NaN values.
   2348     //           We could also detect redundant values.
   2349     //           The code could also randomize the order of values, though
   2350     //           that's tricky because vldr has a limited reach. Furthermore
   2351     //           it breaks load locality.
   2352     RecordRelocInfo(imm);
   2353     vldr(dst, MemOperand(pc, 0));
   2354   } else {
   2355     // Synthesise the double from ARM immediates.
   2356     uint32_t lo, hi;
   2357     DoubleAsTwoUInt32(imm, &lo, &hi);
   2358 
   2359     if (scratch.is(no_reg)) {
   2360       if (dst.code() < 16) {
   2361         const LowDwVfpRegister loc = LowDwVfpRegister::from_code(dst.code());
   2362         // Move the low part of the double into the lower of the corresponsing S
   2363         // registers of D register dst.
   2364         mov(ip, Operand(lo));
   2365         vmov(loc.low(), ip);
   2366 
   2367         // Move the high part of the double into the higher of the
   2368         // corresponsing S registers of D register dst.
   2369         mov(ip, Operand(hi));
   2370         vmov(loc.high(), ip);
   2371       } else {
   2372         // D16-D31 does not have S registers, so move the low and high parts
   2373         // directly to the D register using vmov.32.
   2374         // Note: This may be slower, so we only do this when we have to.
   2375         mov(ip, Operand(lo));
   2376         vmov(dst, VmovIndexLo, ip);
   2377         mov(ip, Operand(hi));
   2378         vmov(dst, VmovIndexHi, ip);
   2379       }
   2380     } else {
   2381       // Move the low and high parts of the double to a D register in one
   2382       // instruction.
   2383       mov(ip, Operand(lo));
   2384       mov(scratch, Operand(hi));
   2385       vmov(dst, ip, scratch);
   2386     }
   2387   }
   2388 }
   2389 
   2390 
   2391 void Assembler::vmov(const SwVfpRegister dst,
   2392                      const SwVfpRegister src,
   2393                      const Condition cond) {
   2394   // Sd = Sm
   2395   // Instruction details available in ARM DDI 0406B, A8-642.
   2396   int sd, d, sm, m;
   2397   dst.split_code(&sd, &d);
   2398   src.split_code(&sm, &m);
   2399   emit(cond | 0xE*B24 | d*B22 | 0xB*B20 | sd*B12 | 0xA*B8 | B6 | m*B5 | sm);
   2400 }
   2401 
   2402 
   2403 void Assembler::vmov(const DwVfpRegister dst,
   2404                      const DwVfpRegister src,
   2405                      const Condition cond) {
   2406   // Dd = Dm
   2407   // Instruction details available in ARM DDI 0406C.b, A8-938.
   2408   // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
   2409   // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
   2410   int vd, d;
   2411   dst.split_code(&vd, &d);
   2412   int vm, m;
   2413   src.split_code(&vm, &m);
   2414   emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B6 | m*B5 |
   2415        vm);
   2416 }
   2417 
   2418 
   2419 void Assembler::vmov(const DwVfpRegister dst,
   2420                      const VmovIndex index,
   2421                      const Register src,
   2422                      const Condition cond) {
   2423   // Dd[index] = Rt
   2424   // Instruction details available in ARM DDI 0406C.b, A8-940.
   2425   // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
   2426   // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
   2427   ASSERT(index.index == 0 || index.index == 1);
   2428   int vd, d;
   2429   dst.split_code(&vd, &d);
   2430   emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 |
   2431        d*B7 | B4);
   2432 }
   2433 
   2434 
   2435 void Assembler::vmov(const Register dst,
   2436                      const VmovIndex index,
   2437                      const DwVfpRegister src,
   2438                      const Condition cond) {
   2439   // Dd[index] = Rt
   2440   // Instruction details available in ARM DDI 0406C.b, A8.8.342.
   2441   // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
   2442   // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
   2443   ASSERT(index.index == 0 || index.index == 1);
   2444   int vn, n;
   2445   src.split_code(&vn, &n);
   2446   emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 |
   2447        0xB*B8 | n*B7 | B4);
   2448 }
   2449 
   2450 
   2451 void Assembler::vmov(const DwVfpRegister dst,
   2452                      const Register src1,
   2453                      const Register src2,
   2454                      const Condition cond) {
   2455   // Dm = <Rt,Rt2>.
   2456   // Instruction details available in ARM DDI 0406C.b, A8-948.
   2457   // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
   2458   // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
   2459   ASSERT(!src1.is(pc) && !src2.is(pc));
   2460   int vm, m;
   2461   dst.split_code(&vm, &m);
   2462   emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
   2463        src1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
   2464 }
   2465 
   2466 
   2467 void Assembler::vmov(const Register dst1,
   2468                      const Register dst2,
   2469                      const DwVfpRegister src,
   2470                      const Condition cond) {
   2471   // <Rt,Rt2> = Dm.
   2472   // Instruction details available in ARM DDI 0406C.b, A8-948.
   2473   // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
   2474   // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
   2475   ASSERT(!dst1.is(pc) && !dst2.is(pc));
   2476   int vm, m;
   2477   src.split_code(&vm, &m);
   2478   emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
   2479        dst1.code()*B12 | 0xB*B8 | m*B5 | B4 | vm);
   2480 }
   2481 
   2482 
   2483 void Assembler::vmov(const SwVfpRegister dst,
   2484                      const Register src,
   2485                      const Condition cond) {
   2486   // Sn = Rt.
   2487   // Instruction details available in ARM DDI 0406A, A8-642.
   2488   // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
   2489   // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
   2490   ASSERT(!src.is(pc));
   2491   int sn, n;
   2492   dst.split_code(&sn, &n);
   2493   emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
   2494 }
   2495 
   2496 
   2497 void Assembler::vmov(const Register dst,
   2498                      const SwVfpRegister src,
   2499                      const Condition cond) {
   2500   // Rt = Sn.
   2501   // Instruction details available in ARM DDI 0406A, A8-642.
   2502   // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
   2503   // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
   2504   ASSERT(!dst.is(pc));
   2505   int sn, n;
   2506   src.split_code(&sn, &n);
   2507   emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
   2508 }
   2509 
   2510 
   2511 // Type of data to read from or write to VFP register.
   2512 // Used as specifier in generic vcvt instruction.
   2513 enum VFPType { S32, U32, F32, F64 };
   2514 
   2515 
   2516 static bool IsSignedVFPType(VFPType type) {
   2517   switch (type) {
   2518     case S32:
   2519       return true;
   2520     case U32:
   2521       return false;
   2522     default:
   2523       UNREACHABLE();
   2524       return false;
   2525   }
   2526 }
   2527 
   2528 
   2529 static bool IsIntegerVFPType(VFPType type) {
   2530   switch (type) {
   2531     case S32:
   2532     case U32:
   2533       return true;
   2534     case F32:
   2535     case F64:
   2536       return false;
   2537     default:
   2538       UNREACHABLE();
   2539       return false;
   2540   }
   2541 }
   2542 
   2543 
   2544 static bool IsDoubleVFPType(VFPType type) {
   2545   switch (type) {
   2546     case F32:
   2547       return false;
   2548     case F64:
   2549       return true;
   2550     default:
   2551       UNREACHABLE();
   2552       return false;
   2553   }
   2554 }
   2555 
   2556 
   2557 // Split five bit reg_code based on size of reg_type.
   2558 //  32-bit register codes are Vm:M
   2559 //  64-bit register codes are M:Vm
   2560 // where Vm is four bits, and M is a single bit.
   2561 static void SplitRegCode(VFPType reg_type,
   2562                          int reg_code,
   2563                          int* vm,
   2564                          int* m) {
   2565   ASSERT((reg_code >= 0) && (reg_code <= 31));
   2566   if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
   2567     // 32 bit type.
   2568     *m  = reg_code & 0x1;
   2569     *vm = reg_code >> 1;
   2570   } else {
   2571     // 64 bit type.
   2572     *m  = (reg_code & 0x10) >> 4;
   2573     *vm = reg_code & 0x0F;
   2574   }
   2575 }
   2576 
   2577 
   2578 // Encode vcvt.src_type.dst_type instruction.
   2579 static Instr EncodeVCVT(const VFPType dst_type,
   2580                         const int dst_code,
   2581                         const VFPType src_type,
   2582                         const int src_code,
   2583                         VFPConversionMode mode,
   2584                         const Condition cond) {
   2585   ASSERT(src_type != dst_type);
   2586   int D, Vd, M, Vm;
   2587   SplitRegCode(src_type, src_code, &Vm, &M);
   2588   SplitRegCode(dst_type, dst_code, &Vd, &D);
   2589 
   2590   if (IsIntegerVFPType(dst_type) || IsIntegerVFPType(src_type)) {
   2591     // Conversion between IEEE floating point and 32-bit integer.
   2592     // Instruction details available in ARM DDI 0406B, A8.6.295.
   2593     // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
   2594     // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
   2595     ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
   2596 
   2597     int sz, opc2, op;
   2598 
   2599     if (IsIntegerVFPType(dst_type)) {
   2600       opc2 = IsSignedVFPType(dst_type) ? 0x5 : 0x4;
   2601       sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
   2602       op = mode;
   2603     } else {
   2604       ASSERT(IsIntegerVFPType(src_type));
   2605       opc2 = 0x0;
   2606       sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
   2607       op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
   2608     }
   2609 
   2610     return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | B19 | opc2*B16 |
   2611             Vd*B12 | 0x5*B9 | sz*B8 | op*B7 | B6 | M*B5 | Vm);
   2612   } else {
   2613     // Conversion between IEEE double and single precision.
   2614     // Instruction details available in ARM DDI 0406B, A8.6.298.
   2615     // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0111(19-16) |
   2616     // Vd(15-12) | 101(11-9) | sz(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
   2617     int sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
   2618     return (cond | 0xE*B24 | B23 | D*B22 | 0x3*B20 | 0x7*B16 |
   2619             Vd*B12 | 0x5*B9 | sz*B8 | B7 | B6 | M*B5 | Vm);
   2620   }
   2621 }
   2622 
   2623 
   2624 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
   2625                              const SwVfpRegister src,
   2626                              VFPConversionMode mode,
   2627                              const Condition cond) {
   2628   emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
   2629 }
   2630 
   2631 
   2632 void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
   2633                              const SwVfpRegister src,
   2634                              VFPConversionMode mode,
   2635                              const Condition cond) {
   2636   emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
   2637 }
   2638 
   2639 
   2640 void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
   2641                              const SwVfpRegister src,
   2642                              VFPConversionMode mode,
   2643                              const Condition cond) {
   2644   emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
   2645 }
   2646 
   2647 
   2648 void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
   2649                              const DwVfpRegister src,
   2650                              VFPConversionMode mode,
   2651                              const Condition cond) {
   2652   emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
   2653 }
   2654 
   2655 
   2656 void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
   2657                              const DwVfpRegister src,
   2658                              VFPConversionMode mode,
   2659                              const Condition cond) {
   2660   emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
   2661 }
   2662 
   2663 
   2664 void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
   2665                              const SwVfpRegister src,
   2666                              VFPConversionMode mode,
   2667                              const Condition cond) {
   2668   emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
   2669 }
   2670 
   2671 
   2672 void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
   2673                              const DwVfpRegister src,
   2674                              VFPConversionMode mode,
   2675                              const Condition cond) {
   2676   emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
   2677 }
   2678 
   2679 
   2680 void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
   2681                              int fraction_bits,
   2682                              const Condition cond) {
   2683   // Instruction details available in ARM DDI 0406C.b, A8-874.
   2684   // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
   2685   // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0)
   2686   ASSERT(fraction_bits > 0 && fraction_bits <= 32);
   2687   ASSERT(CpuFeatures::IsSupported(VFP3));
   2688   int vd, d;
   2689   dst.split_code(&vd, &d);
   2690   int i = ((32 - fraction_bits) >> 4) & 1;
   2691   int imm4 = (32 - fraction_bits) & 0xf;
   2692   emit(cond | 0xE*B24 | B23 | d*B22 | 0x3*B20 | B19 | 0x2*B16 |
   2693        vd*B12 | 0x5*B9 | B8 | B7 | B6 | i*B5 | imm4);
   2694 }
   2695 
   2696 
   2697 void Assembler::vneg(const DwVfpRegister dst,
   2698                      const DwVfpRegister src,
   2699                      const Condition cond) {
   2700   // Instruction details available in ARM DDI 0406C.b, A8-968.
   2701   // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
   2702   // 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
   2703   int vd, d;
   2704   dst.split_code(&vd, &d);
   2705   int vm, m;
   2706   src.split_code(&vm, &m);
   2707 
   2708   emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | B6 |
   2709        m*B5 | vm);
   2710 }
   2711 
   2712 
   2713 void Assembler::vabs(const DwVfpRegister dst,
   2714                      const DwVfpRegister src,
   2715                      const Condition cond) {
   2716   // Instruction details available in ARM DDI 0406C.b, A8-524.
   2717   // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
   2718   // 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
   2719   int vd, d;
   2720   dst.split_code(&vd, &d);
   2721   int vm, m;
   2722   src.split_code(&vm, &m);
   2723   emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | B7 | B6 |
   2724        m*B5 | vm);
   2725 }
   2726 
   2727 
   2728 void Assembler::vadd(const DwVfpRegister dst,
   2729                      const DwVfpRegister src1,
   2730                      const DwVfpRegister src2,
   2731                      const Condition cond) {
   2732   // Dd = vadd(Dn, Dm) double precision floating point addition.
   2733   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
   2734   // Instruction details available in ARM DDI 0406C.b, A8-830.
   2735   // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
   2736   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
   2737   int vd, d;
   2738   dst.split_code(&vd, &d);
   2739   int vn, n;
   2740   src1.split_code(&vn, &n);
   2741   int vm, m;
   2742   src2.split_code(&vm, &m);
   2743   emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
   2744        n*B7 | m*B5 | vm);
   2745 }
   2746 
   2747 
   2748 void Assembler::vsub(const DwVfpRegister dst,
   2749                      const DwVfpRegister src1,
   2750                      const DwVfpRegister src2,
   2751                      const Condition cond) {
   2752   // Dd = vsub(Dn, Dm) double precision floating point subtraction.
   2753   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
   2754   // Instruction details available in ARM DDI 0406C.b, A8-1086.
   2755   // cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
   2756   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
   2757   int vd, d;
   2758   dst.split_code(&vd, &d);
   2759   int vn, n;
   2760   src1.split_code(&vn, &n);
   2761   int vm, m;
   2762   src2.split_code(&vm, &m);
   2763   emit(cond | 0x1C*B23 | d*B22 | 0x3*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
   2764        n*B7 | B6 | m*B5 | vm);
   2765 }
   2766 
   2767 
   2768 void Assembler::vmul(const DwVfpRegister dst,
   2769                      const DwVfpRegister src1,
   2770                      const DwVfpRegister src2,
   2771                      const Condition cond) {
   2772   // Dd = vmul(Dn, Dm) double precision floating point multiplication.
   2773   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
   2774   // Instruction details available in ARM DDI 0406C.b, A8-960.
   2775   // cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
   2776   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
   2777   int vd, d;
   2778   dst.split_code(&vd, &d);
   2779   int vn, n;
   2780   src1.split_code(&vn, &n);
   2781   int vm, m;
   2782   src2.split_code(&vm, &m);
   2783   emit(cond | 0x1C*B23 | d*B22 | 0x2*B20 | vn*B16 | vd*B12 | 0x5*B9 | B8 |
   2784        n*B7 | m*B5 | vm);
   2785 }
   2786 
   2787 
   2788 void Assembler::vmla(const DwVfpRegister dst,
   2789                      const DwVfpRegister src1,
   2790                      const DwVfpRegister src2,
   2791                      const Condition cond) {
   2792   // Instruction details available in ARM DDI 0406C.b, A8-932.
   2793   // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
   2794   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=0(6) | M(5) | 0(4) | Vm(3-0)
   2795   int vd, d;
   2796   dst.split_code(&vd, &d);
   2797   int vn, n;
   2798   src1.split_code(&vn, &n);
   2799   int vm, m;
   2800   src2.split_code(&vm, &m);
   2801   emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
   2802        vm);
   2803 }
   2804 
   2805 
   2806 void Assembler::vmls(const DwVfpRegister dst,
   2807                      const DwVfpRegister src1,
   2808                      const DwVfpRegister src2,
   2809                      const Condition cond) {
   2810   // Instruction details available in ARM DDI 0406C.b, A8-932.
   2811   // cond(31-28) | 11100(27-23) | D(22) | 00(21-20) | Vn(19-16) |
   2812   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | op=1(6) | M(5) | 0(4) | Vm(3-0)
   2813   int vd, d;
   2814   dst.split_code(&vd, &d);
   2815   int vn, n;
   2816   src1.split_code(&vn, &n);
   2817   int vm, m;
   2818   src2.split_code(&vm, &m);
   2819   emit(cond | 0x1C*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | B6 |
   2820        m*B5 | vm);
   2821 }
   2822 
   2823 
   2824 void Assembler::vdiv(const DwVfpRegister dst,
   2825                      const DwVfpRegister src1,
   2826                      const DwVfpRegister src2,
   2827                      const Condition cond) {
   2828   // Dd = vdiv(Dn, Dm) double precision floating point division.
   2829   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
   2830   // Instruction details available in ARM DDI 0406C.b, A8-882.
   2831   // cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
   2832   // Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
   2833   int vd, d;
   2834   dst.split_code(&vd, &d);
   2835   int vn, n;
   2836   src1.split_code(&vn, &n);
   2837   int vm, m;
   2838   src2.split_code(&vm, &m);
   2839   emit(cond | 0x1D*B23 | d*B22 | vn*B16 | vd*B12 | 0x5*B9 | B8 | n*B7 | m*B5 |
   2840        vm);
   2841 }
   2842 
   2843 
   2844 void Assembler::vcmp(const DwVfpRegister src1,
   2845                      const DwVfpRegister src2,
   2846                      const Condition cond) {
   2847   // vcmp(Dd, Dm) double precision floating point comparison.
   2848   // Instruction details available in ARM DDI 0406C.b, A8-864.
   2849   // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
   2850   // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
   2851   int vd, d;
   2852   src1.split_code(&vd, &d);
   2853   int vm, m;
   2854   src2.split_code(&vm, &m);
   2855   emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x4*B16 | vd*B12 | 0x5*B9 | B8 | B6 |
   2856        m*B5 | vm);
   2857 }
   2858 
   2859 
   2860 void Assembler::vcmp(const DwVfpRegister src1,
   2861                      const double src2,
   2862                      const Condition cond) {
   2863   // vcmp(Dd, #0.0) double precision floating point comparison.
   2864   // Instruction details available in ARM DDI 0406C.b, A8-864.
   2865   // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
   2866   // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
   2867   ASSERT(src2 == 0.0);
   2868   int vd, d;
   2869   src1.split_code(&vd, &d);
   2870   emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
   2871 }
   2872 
   2873 
   2874 void Assembler::vmsr(Register dst, Condition cond) {
   2875   // Instruction details available in ARM DDI 0406A, A8-652.
   2876   // cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
   2877   // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
   2878   emit(cond | 0xE*B24 | 0xE*B20 |  B16 |
   2879        dst.code()*B12 | 0xA*B8 | B4);
   2880 }
   2881 
   2882 
   2883 void Assembler::vmrs(Register dst, Condition cond) {
   2884   // Instruction details available in ARM DDI 0406A, A8-652.
   2885   // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
   2886   // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
   2887   emit(cond | 0xE*B24 | 0xF*B20 |  B16 |
   2888        dst.code()*B12 | 0xA*B8 | B4);
   2889 }
   2890 
   2891 
   2892 void Assembler::vsqrt(const DwVfpRegister dst,
   2893                       const DwVfpRegister src,
   2894                       const Condition cond) {
   2895   // Instruction details available in ARM DDI 0406C.b, A8-1058.
   2896   // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
   2897   // Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
   2898   int vd, d;
   2899   dst.split_code(&vd, &d);
   2900   int vm, m;
   2901   src.split_code(&vm, &m);
   2902   emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | B16 | vd*B12 | 0x5*B9 | B8 | 0x3*B6 |
   2903        m*B5 | vm);
   2904 }
   2905 
   2906 
   2907 // Support for NEON.
   2908 
   2909 void Assembler::vld1(NeonSize size,
   2910                      const NeonListOperand& dst,
   2911                      const NeonMemOperand& src) {
   2912   // Instruction details available in ARM DDI 0406C.b, A8.8.320.
   2913   // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
   2914   // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
   2915   ASSERT(CpuFeatures::IsSupported(NEON));
   2916   int vd, d;
   2917   dst.base().split_code(&vd, &d);
   2918   emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
   2919        dst.type()*B8 | size*B6 | src.align()*B4 | src.rm().code());
   2920 }
   2921 
   2922 
   2923 void Assembler::vst1(NeonSize size,
   2924                      const NeonListOperand& src,
   2925                      const NeonMemOperand& dst) {
   2926   // Instruction details available in ARM DDI 0406C.b, A8.8.404.
   2927   // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
   2928   // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
   2929   ASSERT(CpuFeatures::IsSupported(NEON));
   2930   int vd, d;
   2931   src.base().split_code(&vd, &d);
   2932   emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
   2933        size*B6 | dst.align()*B4 | dst.rm().code());
   2934 }
   2935 
   2936 
   2937 void Assembler::vmovl(NeonDataType dt, QwNeonRegister dst, DwVfpRegister src) {
   2938   // Instruction details available in ARM DDI 0406C.b, A8.8.346.
   2939   // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
   2940   // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
   2941   ASSERT(CpuFeatures::IsSupported(NEON));
   2942   int vd, d;
   2943   dst.split_code(&vd, &d);
   2944   int vm, m;
   2945   src.split_code(&vm, &m);
   2946   emit(0xFU*B28 | B25 | (dt & NeonDataTypeUMask) | B23 | d*B22 |
   2947         (dt & NeonDataTypeSizeMask)*B19 | vd*B12 | 0xA*B8 | m*B5 | B4 | vm);
   2948 }
   2949 
   2950 
   2951 // Pseudo instructions.
   2952 void Assembler::nop(int type) {
   2953   // ARMv6{K/T2} and v7 have an actual NOP instruction but it serializes
   2954   // some of the CPU's pipeline and has to issue. Older ARM chips simply used
   2955   // MOV Rx, Rx as NOP and it performs better even in newer CPUs.
   2956   // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
   2957   // a type.
   2958   ASSERT(0 <= type && type <= 14);  // mov pc, pc isn't a nop.
   2959   emit(al | 13*B21 | type*B12 | type);
   2960 }
   2961 
   2962 
   2963 bool Assembler::IsMovT(Instr instr) {
   2964   instr &= ~(((kNumberOfConditions - 1) << 28) |  // Mask off conditions
   2965              ((kNumRegisters-1)*B12) |            // mask out register
   2966              EncodeMovwImmediate(0xFFFF));        // mask out immediate value
   2967   return instr == 0x34*B20;
   2968 }
   2969 
   2970 
   2971 bool Assembler::IsMovW(Instr instr) {
   2972   instr &= ~(((kNumberOfConditions - 1) << 28) |  // Mask off conditions
   2973              ((kNumRegisters-1)*B12) |            // mask out destination
   2974              EncodeMovwImmediate(0xFFFF));        // mask out immediate value
   2975   return instr == 0x30*B20;
   2976 }
   2977 
   2978 
   2979 bool Assembler::IsNop(Instr instr, int type) {
   2980   ASSERT(0 <= type && type <= 14);  // mov pc, pc isn't a nop.
   2981   // Check for mov rx, rx where x = type.
   2982   return instr == (al | 13*B21 | type*B12 | type);
   2983 }
   2984 
   2985 
   2986 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
   2987   uint32_t dummy1;
   2988   uint32_t dummy2;
   2989   return fits_shifter(imm32, &dummy1, &dummy2, NULL);
   2990 }
   2991 
   2992 
   2993 // Debugging.
   2994 void Assembler::RecordJSReturn() {
   2995   positions_recorder()->WriteRecordedPositions();
   2996   CheckBuffer();
   2997   RecordRelocInfo(RelocInfo::JS_RETURN);
   2998 }
   2999 
   3000 
   3001 void Assembler::RecordDebugBreakSlot() {
   3002   positions_recorder()->WriteRecordedPositions();
   3003   CheckBuffer();
   3004   RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
   3005 }
   3006 
   3007 
   3008 void Assembler::RecordComment(const char* msg) {
   3009   if (FLAG_code_comments) {
   3010     CheckBuffer();
   3011     RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
   3012   }
   3013 }
   3014 
   3015 
   3016 void Assembler::RecordConstPool(int size) {
   3017   // We only need this for debugger support, to correctly compute offsets in the
   3018   // code.
   3019 #ifdef ENABLE_DEBUGGER_SUPPORT
   3020   RecordRelocInfo(RelocInfo::CONST_POOL, static_cast<intptr_t>(size));
   3021 #endif
   3022 }
   3023 
   3024 
   3025 void Assembler::GrowBuffer() {
   3026   if (!own_buffer_) FATAL("external code buffer is too small");
   3027 
   3028   // Compute new buffer size.
   3029   CodeDesc desc;  // the new buffer
   3030   if (buffer_size_ < 4*KB) {
   3031     desc.buffer_size = 4*KB;
   3032   } else if (buffer_size_ < 1*MB) {
   3033     desc.buffer_size = 2*buffer_size_;
   3034   } else {
   3035     desc.buffer_size = buffer_size_ + 1*MB;
   3036   }
   3037   CHECK_GT(desc.buffer_size, 0);  // no overflow
   3038 
   3039   // Set up new buffer.
   3040   desc.buffer = NewArray<byte>(desc.buffer_size);
   3041 
   3042   desc.instr_size = pc_offset();
   3043   desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
   3044 
   3045   // Copy the data.
   3046   int pc_delta = desc.buffer - buffer_;
   3047   int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
   3048   OS::MemMove(desc.buffer, buffer_, desc.instr_size);
   3049   OS::MemMove(reloc_info_writer.pos() + rc_delta,
   3050               reloc_info_writer.pos(), desc.reloc_size);
   3051 
   3052   // Switch buffers.
   3053   DeleteArray(buffer_);
   3054   buffer_ = desc.buffer;
   3055   buffer_size_ = desc.buffer_size;
   3056   pc_ += pc_delta;
   3057   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
   3058                                reloc_info_writer.last_pc() + pc_delta);
   3059 
   3060   // None of our relocation types are pc relative pointing outside the code
   3061   // buffer nor pc absolute pointing inside the code buffer, so there is no need
   3062   // to relocate any emitted relocation entries.
   3063 
   3064   // Relocate pending relocation entries.
   3065   for (int i = 0; i < num_pending_reloc_info_; i++) {
   3066     RelocInfo& rinfo = pending_reloc_info_[i];
   3067     ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
   3068            rinfo.rmode() != RelocInfo::POSITION);
   3069     if (rinfo.rmode() != RelocInfo::JS_RETURN) {
   3070       rinfo.set_pc(rinfo.pc() + pc_delta);
   3071     }
   3072   }
   3073 }
   3074 
   3075 
   3076 void Assembler::db(uint8_t data) {
   3077   // No relocation info should be pending while using db. db is used
   3078   // to write pure data with no pointers and the constant pool should
   3079   // be emitted before using db.
   3080   ASSERT(num_pending_reloc_info_ == 0);
   3081   ASSERT(num_pending_64_bit_reloc_info_ == 0);
   3082   CheckBuffer();
   3083   *reinterpret_cast<uint8_t*>(pc_) = data;
   3084   pc_ += sizeof(uint8_t);
   3085 }
   3086 
   3087 
   3088 void Assembler::dd(uint32_t data) {
   3089   // No relocation info should be pending while using dd. dd is used
   3090   // to write pure data with no pointers and the constant pool should
   3091   // be emitted before using dd.
   3092   ASSERT(num_pending_reloc_info_ == 0);
   3093   ASSERT(num_pending_64_bit_reloc_info_ == 0);
   3094   CheckBuffer();
   3095   *reinterpret_cast<uint32_t*>(pc_) = data;
   3096   pc_ += sizeof(uint32_t);
   3097 }
   3098 
   3099 
   3100 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data,
   3101                                 UseConstantPoolMode mode) {
   3102   // We do not try to reuse pool constants.
   3103   RelocInfo rinfo(pc_, rmode, data, NULL);
   3104   if (((rmode >= RelocInfo::JS_RETURN) &&
   3105        (rmode <= RelocInfo::DEBUG_BREAK_SLOT)) ||
   3106       (rmode == RelocInfo::CONST_POOL) ||
   3107       mode == DONT_USE_CONSTANT_POOL) {
   3108     // Adjust code for new modes.
   3109     ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
   3110            || RelocInfo::IsJSReturn(rmode)
   3111            || RelocInfo::IsComment(rmode)
   3112            || RelocInfo::IsPosition(rmode)
   3113            || RelocInfo::IsConstPool(rmode)
   3114            || mode == DONT_USE_CONSTANT_POOL);
   3115     // These modes do not need an entry in the constant pool.
   3116   } else {
   3117     RecordRelocInfoConstantPoolEntryHelper(rinfo);
   3118   }
   3119   if (!RelocInfo::IsNone(rinfo.rmode())) {
   3120     // Don't record external references unless the heap will be serialized.
   3121     if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
   3122 #ifdef DEBUG
   3123       if (!Serializer::enabled()) {
   3124         Serializer::TooLateToEnableNow();
   3125       }
   3126 #endif
   3127       if (!Serializer::enabled() && !emit_debug_code()) {
   3128         return;
   3129       }
   3130     }
   3131     ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
   3132     if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
   3133       RelocInfo reloc_info_with_ast_id(pc_,
   3134                                        rmode,
   3135                                        RecordedAstId().ToInt(),
   3136                                        NULL);
   3137       ClearRecordedAstId();
   3138       reloc_info_writer.Write(&reloc_info_with_ast_id);
   3139     } else {
   3140       reloc_info_writer.Write(&rinfo);
   3141     }
   3142   }
   3143 }
   3144 
   3145 
   3146 void Assembler::RecordRelocInfo(double data) {
   3147   // We do not try to reuse pool constants.
   3148   RelocInfo rinfo(pc_, data);
   3149   RecordRelocInfoConstantPoolEntryHelper(rinfo);
   3150 }
   3151 
   3152 
   3153 void Assembler::RecordRelocInfoConstantPoolEntryHelper(const RelocInfo& rinfo) {
   3154   ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
   3155   if (num_pending_reloc_info_ == 0) {
   3156     first_const_pool_use_ = pc_offset();
   3157   }
   3158   pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
   3159   if (rinfo.rmode() == RelocInfo::NONE64) {
   3160     ++num_pending_64_bit_reloc_info_;
   3161   }
   3162   ASSERT(num_pending_64_bit_reloc_info_ <= num_pending_reloc_info_);
   3163   // Make sure the constant pool is not emitted in place of the next
   3164   // instruction for which we just recorded relocation info.
   3165   BlockConstPoolFor(1);
   3166 }
   3167 
   3168 
   3169 void Assembler::BlockConstPoolFor(int instructions) {
   3170   int pc_limit = pc_offset() + instructions * kInstrSize;
   3171   if (no_const_pool_before_ < pc_limit) {
   3172     // If there are some pending entries, the constant pool cannot be blocked
   3173     // further than constant pool instruction's reach.
   3174     ASSERT((num_pending_reloc_info_ == 0) ||
   3175            (pc_limit - first_const_pool_use_ < kMaxDistToIntPool));
   3176     // TODO(jfb) Also check 64-bit entries are in range (requires splitting
   3177     //           them up from 32-bit entries).
   3178     no_const_pool_before_ = pc_limit;
   3179   }
   3180 
   3181   if (next_buffer_check_ < no_const_pool_before_) {
   3182     next_buffer_check_ = no_const_pool_before_;
   3183   }
   3184 }
   3185 
   3186 
   3187 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
   3188   // Some short sequence of instruction mustn't be broken up by constant pool
   3189   // emission, such sequences are protected by calls to BlockConstPoolFor and
   3190   // BlockConstPoolScope.
   3191   if (is_const_pool_blocked()) {
   3192     // Something is wrong if emission is forced and blocked at the same time.
   3193     ASSERT(!force_emit);
   3194     return;
   3195   }
   3196 
   3197   // There is nothing to do if there are no pending constant pool entries.
   3198   if (num_pending_reloc_info_ == 0)  {
   3199     ASSERT(num_pending_64_bit_reloc_info_ == 0);
   3200     // Calculate the offset of the next check.
   3201     next_buffer_check_ = pc_offset() + kCheckPoolInterval;
   3202     return;
   3203   }
   3204 
   3205   // Check that the code buffer is large enough before emitting the constant
   3206   // pool (include the jump over the pool and the constant pool marker and
   3207   // the gap to the relocation information).
   3208   // Note 64-bit values are wider, and the first one needs to be 64-bit aligned.
   3209   int jump_instr = require_jump ? kInstrSize : 0;
   3210   int size_up_to_marker = jump_instr + kInstrSize;
   3211   int size_after_marker = num_pending_reloc_info_ * kPointerSize;
   3212   bool has_fp_values = (num_pending_64_bit_reloc_info_ > 0);
   3213   // 64-bit values must be 64-bit aligned.
   3214   // We'll start emitting at PC: branch+marker, then 32-bit values, then
   3215   // 64-bit values which might need to be aligned.
   3216   bool require_64_bit_align = has_fp_values &&
   3217       (((uintptr_t)pc_ + size_up_to_marker + size_after_marker) & 0x3);
   3218   if (require_64_bit_align) {
   3219     size_after_marker += kInstrSize;
   3220   }
   3221   // num_pending_reloc_info_ also contains 64-bit entries, the above code
   3222   // therefore already counted half of the size for 64-bit entries. Add the
   3223   // remaining size.
   3224   STATIC_ASSERT(kPointerSize == kDoubleSize / 2);
   3225   size_after_marker += num_pending_64_bit_reloc_info_ * (kDoubleSize / 2);
   3226 
   3227   int size = size_up_to_marker + size_after_marker;
   3228 
   3229   // We emit a constant pool when:
   3230   //  * requested to do so by parameter force_emit (e.g. after each function).
   3231   //  * the distance from the first instruction accessing the constant pool to
   3232   //    any of the constant pool entries will exceed its limit the next
   3233   //    time the pool is checked. This is overly restrictive, but we don't emit
   3234   //    constant pool entries in-order so it's conservatively correct.
   3235   //  * the instruction doesn't require a jump after itself to jump over the
   3236   //    constant pool, and we're getting close to running out of range.
   3237   if (!force_emit) {
   3238     ASSERT((first_const_pool_use_ >= 0) && (num_pending_reloc_info_ > 0));
   3239     int dist = pc_offset() + size - first_const_pool_use_;
   3240     if (has_fp_values) {
   3241       if ((dist < kMaxDistToFPPool - kCheckPoolInterval) &&
   3242           (require_jump || (dist < kMaxDistToFPPool / 2))) {
   3243         return;
   3244       }
   3245     } else {
   3246       if ((dist < kMaxDistToIntPool - kCheckPoolInterval) &&
   3247           (require_jump || (dist < kMaxDistToIntPool / 2))) {
   3248         return;
   3249       }
   3250     }
   3251   }
   3252 
   3253   int needed_space = size + kGap;
   3254   while (buffer_space() <= needed_space) GrowBuffer();
   3255 
   3256   {
   3257     // Block recursive calls to CheckConstPool.
   3258     BlockConstPoolScope block_const_pool(this);
   3259     RecordComment("[ Constant Pool");
   3260     RecordConstPool(size);
   3261 
   3262     // Emit jump over constant pool if necessary.
   3263     Label after_pool;
   3264     if (require_jump) {
   3265       b(&after_pool);
   3266     }
   3267 
   3268     // Put down constant pool marker "Undefined instruction".
   3269     // The data size helps disassembly know what to print.
   3270     emit(kConstantPoolMarker |
   3271          EncodeConstantPoolLength(size_after_marker / kPointerSize));
   3272 
   3273     if (require_64_bit_align) {
   3274       emit(kConstantPoolMarker);
   3275     }
   3276 
   3277     // Emit 64-bit constant pool entries first: their range is smaller than
   3278     // 32-bit entries.
   3279     for (int i = 0; i < num_pending_reloc_info_; i++) {
   3280       RelocInfo& rinfo = pending_reloc_info_[i];
   3281 
   3282       if (rinfo.rmode() != RelocInfo::NONE64) {
   3283         // 32-bit values emitted later.
   3284         continue;
   3285       }
   3286 
   3287       ASSERT(!((uintptr_t)pc_ & 0x3));  // Check 64-bit alignment.
   3288 
   3289       Instr instr = instr_at(rinfo.pc());
   3290       // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
   3291       ASSERT((IsVldrDPcImmediateOffset(instr) &&
   3292               GetVldrDRegisterImmediateOffset(instr) == 0));
   3293 
   3294       int delta = pc_ - rinfo.pc() - kPcLoadDelta;
   3295       ASSERT(is_uint10(delta));
   3296 
   3297       instr_at_put(rinfo.pc(), SetVldrDRegisterImmediateOffset(instr, delta));
   3298 
   3299       const double double_data = rinfo.data64();
   3300       uint64_t uint_data = 0;
   3301       OS::MemCopy(&uint_data, &double_data, sizeof(double_data));
   3302       emit(uint_data & 0xFFFFFFFF);
   3303       emit(uint_data >> 32);
   3304     }
   3305 
   3306     // Emit 32-bit constant pool entries.
   3307     for (int i = 0; i < num_pending_reloc_info_; i++) {
   3308       RelocInfo& rinfo = pending_reloc_info_[i];
   3309       ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
   3310              rinfo.rmode() != RelocInfo::POSITION &&
   3311              rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
   3312              rinfo.rmode() != RelocInfo::CONST_POOL);
   3313 
   3314       if (rinfo.rmode() == RelocInfo::NONE64) {
   3315         // 64-bit values emitted earlier.
   3316         continue;
   3317       }
   3318 
   3319       Instr instr = instr_at(rinfo.pc());
   3320 
   3321       // 64-bit loads shouldn't get here.
   3322       ASSERT(!IsVldrDPcImmediateOffset(instr));
   3323 
   3324       int delta = pc_ - rinfo.pc() - kPcLoadDelta;
   3325       // 0 is the smallest delta:
   3326       //   ldr rd, [pc, #0]
   3327       //   constant pool marker
   3328       //   data
   3329 
   3330       if (IsLdrPcImmediateOffset(instr) &&
   3331           GetLdrRegisterImmediateOffset(instr) == 0) {
   3332         ASSERT(is_uint12(delta));
   3333         instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
   3334         emit(rinfo.data());
   3335       } else {
   3336         ASSERT(IsMovW(instr));
   3337         emit(rinfo.data());
   3338       }
   3339     }
   3340 
   3341     num_pending_reloc_info_ = 0;
   3342     num_pending_64_bit_reloc_info_ = 0;
   3343     first_const_pool_use_ = -1;
   3344 
   3345     RecordComment("]");
   3346 
   3347     if (after_pool.is_linked()) {
   3348       bind(&after_pool);
   3349     }
   3350   }
   3351 
   3352   // Since a constant pool was just emitted, move the check offset forward by
   3353   // the standard interval.
   3354   next_buffer_check_ = pc_offset() + kCheckPoolInterval;
   3355 }
   3356 
   3357 
   3358 } }  // namespace v8::internal
   3359 
   3360 #endif  // V8_TARGET_ARCH_ARM
   3361