Home | History | Annotate | Download | only in arm
      1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
      2 // All Rights Reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions
      6 // are met:
      7 //
      8 // - Redistributions of source code must retain the above copyright notice,
      9 // this list of conditions and the following disclaimer.
     10 //
     11 // - Redistribution in binary form must reproduce the above copyright
     12 // notice, this list of conditions and the following disclaimer in the
     13 // documentation and/or other materials provided with the
     14 // distribution.
     15 //
     16 // - Neither the name of Sun Microsystems or the names of contributors may
     17 // be used to endorse or promote products derived from this software without
     18 // specific prior written permission.
     19 //
     20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
     31 // OF THE POSSIBILITY OF SUCH DAMAGE.
     32 
     33 // The original source code covered by the above license above has been
     34 // modified significantly by Google Inc.
     35 // Copyright 2010 the V8 project authors. All rights reserved.
     36 
     37 #include "v8.h"
     38 
     39 #include "arm/assembler-arm-inl.h"
     40 #include "serialize.h"
     41 
     42 namespace v8 {
     43 namespace internal {
     44 
     45 // Safe default is no features.
     46 unsigned CpuFeatures::supported_ = 0;
     47 unsigned CpuFeatures::enabled_ = 0;
     48 unsigned CpuFeatures::found_by_runtime_probing_ = 0;
     49 
     50 
     51 #ifdef __arm__
     52 static uint64_t CpuFeaturesImpliedByCompiler() {
     53   uint64_t answer = 0;
     54 #ifdef CAN_USE_ARMV7_INSTRUCTIONS
     55   answer |= 1u << ARMv7;
     56 #endif  // def CAN_USE_ARMV7_INSTRUCTIONS
     57   // If the compiler is allowed to use VFP then we can use VFP too in our code
     58   // generation even when generating snapshots.  This won't work for cross
     59   // compilation.
     60 #if defined(__VFP_FP__) && !defined(__SOFTFP__)
     61   answer |= 1u << VFP3;
     62 #endif  // defined(__VFP_FP__) && !defined(__SOFTFP__)
     63 #ifdef CAN_USE_VFP_INSTRUCTIONS
     64   answer |= 1u << VFP3;
     65 #endif  // def CAN_USE_VFP_INSTRUCTIONS
     66   return answer;
     67 }
     68 #endif  // def __arm__
     69 
     70 
     71 void CpuFeatures::Probe() {
     72 #ifndef __arm__
     73   // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
     74   if (FLAG_enable_vfp3) {
     75       supported_ |= 1u << VFP3;
     76   }
     77   // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
     78   if (FLAG_enable_armv7) {
     79       supported_ |= 1u << ARMv7;
     80   }
     81 #else  // def __arm__
     82   if (Serializer::enabled()) {
     83     supported_ |= OS::CpuFeaturesImpliedByPlatform();
     84     supported_ |= CpuFeaturesImpliedByCompiler();
     85     return;  // No features if we might serialize.
     86   }
     87 
     88   if (OS::ArmCpuHasFeature(VFP3)) {
     89     // This implementation also sets the VFP flags if
     90     // runtime detection of VFP returns true.
     91     supported_ |= 1u << VFP3;
     92     found_by_runtime_probing_ |= 1u << VFP3;
     93   }
     94 
     95   if (OS::ArmCpuHasFeature(ARMv7)) {
     96     supported_ |= 1u << ARMv7;
     97     found_by_runtime_probing_ |= 1u << ARMv7;
     98   }
     99 #endif  // def __arm__
    100 }
    101 
    102 
    103 // -----------------------------------------------------------------------------
    104 // Implementation of Register and CRegister
    105 
    106 Register no_reg = { -1 };
    107 
    108 Register r0  = {  0 };
    109 Register r1  = {  1 };
    110 Register r2  = {  2 };
    111 Register r3  = {  3 };
    112 Register r4  = {  4 };
    113 Register r5  = {  5 };
    114 Register r6  = {  6 };
    115 Register r7  = {  7 };
    116 Register r8  = {  8 };  // Used as context register.
    117 Register r9  = {  9 };
    118 Register r10 = { 10 };  // Used as roots register.
    119 Register fp  = { 11 };
    120 Register ip  = { 12 };
    121 Register sp  = { 13 };
    122 Register lr  = { 14 };
    123 Register pc  = { 15 };
    124 
    125 
    126 CRegister no_creg = { -1 };
    127 
    128 CRegister cr0  = {  0 };
    129 CRegister cr1  = {  1 };
    130 CRegister cr2  = {  2 };
    131 CRegister cr3  = {  3 };
    132 CRegister cr4  = {  4 };
    133 CRegister cr5  = {  5 };
    134 CRegister cr6  = {  6 };
    135 CRegister cr7  = {  7 };
    136 CRegister cr8  = {  8 };
    137 CRegister cr9  = {  9 };
    138 CRegister cr10 = { 10 };
    139 CRegister cr11 = { 11 };
    140 CRegister cr12 = { 12 };
    141 CRegister cr13 = { 13 };
    142 CRegister cr14 = { 14 };
    143 CRegister cr15 = { 15 };
    144 
    145 // Support for the VFP registers s0 to s31 (d0 to d15).
    146 // Note that "sN:sM" is the same as "dN/2".
    147 SwVfpRegister s0  = {  0 };
    148 SwVfpRegister s1  = {  1 };
    149 SwVfpRegister s2  = {  2 };
    150 SwVfpRegister s3  = {  3 };
    151 SwVfpRegister s4  = {  4 };
    152 SwVfpRegister s5  = {  5 };
    153 SwVfpRegister s6  = {  6 };
    154 SwVfpRegister s7  = {  7 };
    155 SwVfpRegister s8  = {  8 };
    156 SwVfpRegister s9  = {  9 };
    157 SwVfpRegister s10 = { 10 };
    158 SwVfpRegister s11 = { 11 };
    159 SwVfpRegister s12 = { 12 };
    160 SwVfpRegister s13 = { 13 };
    161 SwVfpRegister s14 = { 14 };
    162 SwVfpRegister s15 = { 15 };
    163 SwVfpRegister s16 = { 16 };
    164 SwVfpRegister s17 = { 17 };
    165 SwVfpRegister s18 = { 18 };
    166 SwVfpRegister s19 = { 19 };
    167 SwVfpRegister s20 = { 20 };
    168 SwVfpRegister s21 = { 21 };
    169 SwVfpRegister s22 = { 22 };
    170 SwVfpRegister s23 = { 23 };
    171 SwVfpRegister s24 = { 24 };
    172 SwVfpRegister s25 = { 25 };
    173 SwVfpRegister s26 = { 26 };
    174 SwVfpRegister s27 = { 27 };
    175 SwVfpRegister s28 = { 28 };
    176 SwVfpRegister s29 = { 29 };
    177 SwVfpRegister s30 = { 30 };
    178 SwVfpRegister s31 = { 31 };
    179 
    180 DwVfpRegister d0  = {  0 };
    181 DwVfpRegister d1  = {  1 };
    182 DwVfpRegister d2  = {  2 };
    183 DwVfpRegister d3  = {  3 };
    184 DwVfpRegister d4  = {  4 };
    185 DwVfpRegister d5  = {  5 };
    186 DwVfpRegister d6  = {  6 };
    187 DwVfpRegister d7  = {  7 };
    188 DwVfpRegister d8  = {  8 };
    189 DwVfpRegister d9  = {  9 };
    190 DwVfpRegister d10 = { 10 };
    191 DwVfpRegister d11 = { 11 };
    192 DwVfpRegister d12 = { 12 };
    193 DwVfpRegister d13 = { 13 };
    194 DwVfpRegister d14 = { 14 };
    195 DwVfpRegister d15 = { 15 };
    196 
    197 // -----------------------------------------------------------------------------
    198 // Implementation of RelocInfo
    199 
    200 const int RelocInfo::kApplyMask = 0;
    201 
    202 
    203 void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
    204   // Patch the code at the current address with the supplied instructions.
    205   Instr* pc = reinterpret_cast<Instr*>(pc_);
    206   Instr* instr = reinterpret_cast<Instr*>(instructions);
    207   for (int i = 0; i < instruction_count; i++) {
    208     *(pc + i) = *(instr + i);
    209   }
    210 
    211   // Indicate that code has changed.
    212   CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
    213 }
    214 
    215 
    216 // Patch the code at the current PC with a call to the target address.
    217 // Additional guard instructions can be added if required.
    218 void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
    219   // Patch the code at the current address with a call to the target.
    220   UNIMPLEMENTED();
    221 }
    222 
    223 
    224 // -----------------------------------------------------------------------------
    225 // Implementation of Operand and MemOperand
    226 // See assembler-arm-inl.h for inlined constructors
    227 
    228 Operand::Operand(Handle<Object> handle) {
    229   rm_ = no_reg;
    230   // Verify all Objects referred by code are NOT in new space.
    231   Object* obj = *handle;
    232   ASSERT(!Heap::InNewSpace(obj));
    233   if (obj->IsHeapObject()) {
    234     imm32_ = reinterpret_cast<intptr_t>(handle.location());
    235     rmode_ = RelocInfo::EMBEDDED_OBJECT;
    236   } else {
    237     // no relocation needed
    238     imm32_ =  reinterpret_cast<intptr_t>(obj);
    239     rmode_ = RelocInfo::NONE;
    240   }
    241 }
    242 
    243 
    244 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
    245   ASSERT(is_uint5(shift_imm));
    246   ASSERT(shift_op != ROR || shift_imm != 0);  // use RRX if you mean it
    247   rm_ = rm;
    248   rs_ = no_reg;
    249   shift_op_ = shift_op;
    250   shift_imm_ = shift_imm & 31;
    251   if (shift_op == RRX) {
    252     // encoded as ROR with shift_imm == 0
    253     ASSERT(shift_imm == 0);
    254     shift_op_ = ROR;
    255     shift_imm_ = 0;
    256   }
    257 }
    258 
    259 
    260 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
    261   ASSERT(shift_op != RRX);
    262   rm_ = rm;
    263   rs_ = no_reg;
    264   shift_op_ = shift_op;
    265   rs_ = rs;
    266 }
    267 
    268 
    269 MemOperand::MemOperand(Register rn, int32_t offset, AddrMode am) {
    270   rn_ = rn;
    271   rm_ = no_reg;
    272   offset_ = offset;
    273   am_ = am;
    274 }
    275 
    276 MemOperand::MemOperand(Register rn, Register rm, AddrMode am) {
    277   rn_ = rn;
    278   rm_ = rm;
    279   shift_op_ = LSL;
    280   shift_imm_ = 0;
    281   am_ = am;
    282 }
    283 
    284 
    285 MemOperand::MemOperand(Register rn, Register rm,
    286                        ShiftOp shift_op, int shift_imm, AddrMode am) {
    287   ASSERT(is_uint5(shift_imm));
    288   rn_ = rn;
    289   rm_ = rm;
    290   shift_op_ = shift_op;
    291   shift_imm_ = shift_imm & 31;
    292   am_ = am;
    293 }
    294 
    295 
    296 // -----------------------------------------------------------------------------
    297 // Implementation of Assembler.
    298 
    299 // Instruction encoding bits.
    300 enum {
    301   H   = 1 << 5,   // halfword (or byte)
    302   S6  = 1 << 6,   // signed (or unsigned)
    303   L   = 1 << 20,  // load (or store)
    304   S   = 1 << 20,  // set condition code (or leave unchanged)
    305   W   = 1 << 21,  // writeback base register (or leave unchanged)
    306   A   = 1 << 21,  // accumulate in multiply instruction (or not)
    307   B   = 1 << 22,  // unsigned byte (or word)
    308   N   = 1 << 22,  // long (or short)
    309   U   = 1 << 23,  // positive (or negative) offset/index
    310   P   = 1 << 24,  // offset/pre-indexed addressing (or post-indexed addressing)
    311   I   = 1 << 25,  // immediate shifter operand (or not)
    312 
    313   B4  = 1 << 4,
    314   B5  = 1 << 5,
    315   B6  = 1 << 6,
    316   B7  = 1 << 7,
    317   B8  = 1 << 8,
    318   B9  = 1 << 9,
    319   B12 = 1 << 12,
    320   B16 = 1 << 16,
    321   B18 = 1 << 18,
    322   B19 = 1 << 19,
    323   B20 = 1 << 20,
    324   B21 = 1 << 21,
    325   B22 = 1 << 22,
    326   B23 = 1 << 23,
    327   B24 = 1 << 24,
    328   B25 = 1 << 25,
    329   B26 = 1 << 26,
    330   B27 = 1 << 27,
    331 
    332   // Instruction bit masks.
    333   RdMask     = 15 << 12,  // in str instruction
    334   CondMask   = 15 << 28,
    335   CoprocessorMask = 15 << 8,
    336   OpCodeMask = 15 << 21,  // in data-processing instructions
    337   Imm24Mask  = (1 << 24) - 1,
    338   Off12Mask  = (1 << 12) - 1,
    339   // Reserved condition.
    340   nv = 15 << 28
    341 };
    342 
    343 
    344 // add(sp, sp, 4) instruction (aka Pop())
    345 static const Instr kPopInstruction =
    346     al | 4 * B21 | 4 | LeaveCC | I | sp.code() * B16 | sp.code() * B12;
    347 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
    348 // register r is not encoded.
    349 static const Instr kPushRegPattern =
    350     al | B26 | 4 | NegPreIndex | sp.code() * B16;
    351 // ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
    352 // register r is not encoded.
    353 static const Instr kPopRegPattern =
    354     al | B26 | L | 4 | PostIndex | sp.code() * B16;
    355 // mov lr, pc
    356 const Instr kMovLrPc = al | 13*B21 | pc.code() | lr.code() * B12;
    357 // ldr pc, [pc, #XXX]
    358 const Instr kLdrPCPattern = al | B26 | L | pc.code() * B16;
    359 
    360 // Spare buffer.
    361 static const int kMinimalBufferSize = 4*KB;
    362 static byte* spare_buffer_ = NULL;
    363 
    364 Assembler::Assembler(void* buffer, int buffer_size) {
    365   if (buffer == NULL) {
    366     // Do our own buffer management.
    367     if (buffer_size <= kMinimalBufferSize) {
    368       buffer_size = kMinimalBufferSize;
    369 
    370       if (spare_buffer_ != NULL) {
    371         buffer = spare_buffer_;
    372         spare_buffer_ = NULL;
    373       }
    374     }
    375     if (buffer == NULL) {
    376       buffer_ = NewArray<byte>(buffer_size);
    377     } else {
    378       buffer_ = static_cast<byte*>(buffer);
    379     }
    380     buffer_size_ = buffer_size;
    381     own_buffer_ = true;
    382 
    383   } else {
    384     // Use externally provided buffer instead.
    385     ASSERT(buffer_size > 0);
    386     buffer_ = static_cast<byte*>(buffer);
    387     buffer_size_ = buffer_size;
    388     own_buffer_ = false;
    389   }
    390 
    391   // Setup buffer pointers.
    392   ASSERT(buffer_ != NULL);
    393   pc_ = buffer_;
    394   reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
    395   num_prinfo_ = 0;
    396   next_buffer_check_ = 0;
    397   no_const_pool_before_ = 0;
    398   last_const_pool_end_ = 0;
    399   last_bound_pos_ = 0;
    400   current_statement_position_ = RelocInfo::kNoPosition;
    401   current_position_ = RelocInfo::kNoPosition;
    402   written_statement_position_ = current_statement_position_;
    403   written_position_ = current_position_;
    404 }
    405 
    406 
    407 Assembler::~Assembler() {
    408   if (own_buffer_) {
    409     if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
    410       spare_buffer_ = buffer_;
    411     } else {
    412       DeleteArray(buffer_);
    413     }
    414   }
    415 }
    416 
    417 
    418 void Assembler::GetCode(CodeDesc* desc) {
    419   // Emit constant pool if necessary.
    420   CheckConstPool(true, false);
    421   ASSERT(num_prinfo_ == 0);
    422 
    423   // Setup code descriptor.
    424   desc->buffer = buffer_;
    425   desc->buffer_size = buffer_size_;
    426   desc->instr_size = pc_offset();
    427   desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
    428 }
    429 
    430 
    431 void Assembler::Align(int m) {
    432   ASSERT(m >= 4 && IsPowerOf2(m));
    433   while ((pc_offset() & (m - 1)) != 0) {
    434     nop();
    435   }
    436 }
    437 
    438 
    439 // Labels refer to positions in the (to be) generated code.
    440 // There are bound, linked, and unused labels.
    441 //
    442 // Bound labels refer to known positions in the already
    443 // generated code. pos() is the position the label refers to.
    444 //
    445 // Linked labels refer to unknown positions in the code
    446 // to be generated; pos() is the position of the last
    447 // instruction using the label.
    448 
    449 
    450 // The link chain is terminated by a negative code position (must be aligned)
    451 const int kEndOfChain = -4;
    452 
    453 
    454 int Assembler::target_at(int pos)  {
    455   Instr instr = instr_at(pos);
    456   if ((instr & ~Imm24Mask) == 0) {
    457     // Emitted label constant, not part of a branch.
    458     return instr - (Code::kHeaderSize - kHeapObjectTag);
    459   }
    460   ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
    461   int imm26 = ((instr & Imm24Mask) << 8) >> 6;
    462   if ((instr & CondMask) == nv && (instr & B24) != 0)
    463     // blx uses bit 24 to encode bit 2 of imm26
    464     imm26 += 2;
    465 
    466   return pos + kPcLoadDelta + imm26;
    467 }
    468 
    469 
    470 void Assembler::target_at_put(int pos, int target_pos) {
    471   Instr instr = instr_at(pos);
    472   if ((instr & ~Imm24Mask) == 0) {
    473     ASSERT(target_pos == kEndOfChain || target_pos >= 0);
    474     // Emitted label constant, not part of a branch.
    475     // Make label relative to Code* of generated Code object.
    476     instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
    477     return;
    478   }
    479   int imm26 = target_pos - (pos + kPcLoadDelta);
    480   ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
    481   if ((instr & CondMask) == nv) {
    482     // blx uses bit 24 to encode bit 2 of imm26
    483     ASSERT((imm26 & 1) == 0);
    484     instr = (instr & ~(B24 | Imm24Mask)) | ((imm26 & 2) >> 1)*B24;
    485   } else {
    486     ASSERT((imm26 & 3) == 0);
    487     instr &= ~Imm24Mask;
    488   }
    489   int imm24 = imm26 >> 2;
    490   ASSERT(is_int24(imm24));
    491   instr_at_put(pos, instr | (imm24 & Imm24Mask));
    492 }
    493 
    494 
    495 void Assembler::print(Label* L) {
    496   if (L->is_unused()) {
    497     PrintF("unused label\n");
    498   } else if (L->is_bound()) {
    499     PrintF("bound label to %d\n", L->pos());
    500   } else if (L->is_linked()) {
    501     Label l = *L;
    502     PrintF("unbound label");
    503     while (l.is_linked()) {
    504       PrintF("@ %d ", l.pos());
    505       Instr instr = instr_at(l.pos());
    506       if ((instr & ~Imm24Mask) == 0) {
    507         PrintF("value\n");
    508       } else {
    509         ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx
    510         int cond = instr & CondMask;
    511         const char* b;
    512         const char* c;
    513         if (cond == nv) {
    514           b = "blx";
    515           c = "";
    516         } else {
    517           if ((instr & B24) != 0)
    518             b = "bl";
    519           else
    520             b = "b";
    521 
    522           switch (cond) {
    523             case eq: c = "eq"; break;
    524             case ne: c = "ne"; break;
    525             case hs: c = "hs"; break;
    526             case lo: c = "lo"; break;
    527             case mi: c = "mi"; break;
    528             case pl: c = "pl"; break;
    529             case vs: c = "vs"; break;
    530             case vc: c = "vc"; break;
    531             case hi: c = "hi"; break;
    532             case ls: c = "ls"; break;
    533             case ge: c = "ge"; break;
    534             case lt: c = "lt"; break;
    535             case gt: c = "gt"; break;
    536             case le: c = "le"; break;
    537             case al: c = ""; break;
    538             default:
    539               c = "";
    540               UNREACHABLE();
    541           }
    542         }
    543         PrintF("%s%s\n", b, c);
    544       }
    545       next(&l);
    546     }
    547   } else {
    548     PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
    549   }
    550 }
    551 
    552 
    553 void Assembler::bind_to(Label* L, int pos) {
    554   ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
    555   while (L->is_linked()) {
    556     int fixup_pos = L->pos();
    557     next(L);  // call next before overwriting link with target at fixup_pos
    558     target_at_put(fixup_pos, pos);
    559   }
    560   L->bind_to(pos);
    561 
    562   // Keep track of the last bound label so we don't eliminate any instructions
    563   // before a bound label.
    564   if (pos > last_bound_pos_)
    565     last_bound_pos_ = pos;
    566 }
    567 
    568 
    569 void Assembler::link_to(Label* L, Label* appendix) {
    570   if (appendix->is_linked()) {
    571     if (L->is_linked()) {
    572       // Append appendix to L's list.
    573       int fixup_pos;
    574       int link = L->pos();
    575       do {
    576         fixup_pos = link;
    577         link = target_at(fixup_pos);
    578       } while (link > 0);
    579       ASSERT(link == kEndOfChain);
    580       target_at_put(fixup_pos, appendix->pos());
    581     } else {
    582       // L is empty, simply use appendix.
    583       *L = *appendix;
    584     }
    585   }
    586   appendix->Unuse();  // appendix should not be used anymore
    587 }
    588 
    589 
    590 void Assembler::bind(Label* L) {
    591   ASSERT(!L->is_bound());  // label can only be bound once
    592   bind_to(L, pc_offset());
    593 }
    594 
    595 
    596 void Assembler::next(Label* L) {
    597   ASSERT(L->is_linked());
    598   int link = target_at(L->pos());
    599   if (link > 0) {
    600     L->link_to(link);
    601   } else {
    602     ASSERT(link == kEndOfChain);
    603     L->Unuse();
    604   }
    605 }
    606 
    607 
    608 // Low-level code emission routines depending on the addressing mode.
    609 static bool fits_shifter(uint32_t imm32,
    610                          uint32_t* rotate_imm,
    611                          uint32_t* immed_8,
    612                          Instr* instr) {
    613   // imm32 must be unsigned.
    614   for (int rot = 0; rot < 16; rot++) {
    615     uint32_t imm8 = (imm32 << 2*rot) | (imm32 >> (32 - 2*rot));
    616     if ((imm8 <= 0xff)) {
    617       *rotate_imm = rot;
    618       *immed_8 = imm8;
    619       return true;
    620     }
    621   }
    622   // If the opcode is mov or mvn and if ~imm32 fits, change the opcode.
    623   if (instr != NULL && (*instr & 0xd*B21) == 0xd*B21) {
    624     if (fits_shifter(~imm32, rotate_imm, immed_8, NULL)) {
    625       *instr ^= 0x2*B21;
    626       return true;
    627     }
    628   }
    629   return false;
    630 }
    631 
    632 
    633 // We have to use the temporary register for things that can be relocated even
    634 // if they can be encoded in the ARM's 12 bits of immediate-offset instruction
    635 // space.  There is no guarantee that the relocated location can be similarly
    636 // encoded.
    637 static bool MustUseIp(RelocInfo::Mode rmode) {
    638   if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
    639 #ifdef DEBUG
    640     if (!Serializer::enabled()) {
    641       Serializer::TooLateToEnableNow();
    642     }
    643 #endif  // def DEBUG
    644     return Serializer::enabled();
    645   } else if (rmode == RelocInfo::NONE) {
    646     return false;
    647   }
    648   return true;
    649 }
    650 
    651 
    652 void Assembler::addrmod1(Instr instr,
    653                          Register rn,
    654                          Register rd,
    655                          const Operand& x) {
    656   CheckBuffer();
    657   ASSERT((instr & ~(CondMask | OpCodeMask | S)) == 0);
    658   if (!x.rm_.is_valid()) {
    659     // Immediate.
    660     uint32_t rotate_imm;
    661     uint32_t immed_8;
    662     if (MustUseIp(x.rmode_) ||
    663         !fits_shifter(x.imm32_, &rotate_imm, &immed_8, &instr)) {
    664       // The immediate operand cannot be encoded as a shifter operand, so load
    665       // it first to register ip and change the original instruction to use ip.
    666       // However, if the original instruction is a 'mov rd, x' (not setting the
    667       // condition code), then replace it with a 'ldr rd, [pc]'.
    668       RecordRelocInfo(x.rmode_, x.imm32_);
    669       CHECK(!rn.is(ip));  // rn should never be ip, or will be trashed
    670       Condition cond = static_cast<Condition>(instr & CondMask);
    671       if ((instr & ~CondMask) == 13*B21) {  // mov, S not set
    672         ldr(rd, MemOperand(pc, 0), cond);
    673       } else {
    674         ldr(ip, MemOperand(pc, 0), cond);
    675         addrmod1(instr, rn, rd, Operand(ip));
    676       }
    677       return;
    678     }
    679     instr |= I | rotate_imm*B8 | immed_8;
    680   } else if (!x.rs_.is_valid()) {
    681     // Immediate shift.
    682     instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
    683   } else {
    684     // Register shift.
    685     ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
    686     instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
    687   }
    688   emit(instr | rn.code()*B16 | rd.code()*B12);
    689   if (rn.is(pc) || x.rm_.is(pc))
    690     // Block constant pool emission for one instruction after reading pc.
    691     BlockConstPoolBefore(pc_offset() + kInstrSize);
    692 }
    693 
    694 
    695 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
    696   ASSERT((instr & ~(CondMask | B | L)) == B26);
    697   int am = x.am_;
    698   if (!x.rm_.is_valid()) {
    699     // Immediate offset.
    700     int offset_12 = x.offset_;
    701     if (offset_12 < 0) {
    702       offset_12 = -offset_12;
    703       am ^= U;
    704     }
    705     if (!is_uint12(offset_12)) {
    706       // Immediate offset cannot be encoded, load it first to register ip
    707       // rn (and rd in a load) should never be ip, or will be trashed.
    708       ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
    709       mov(ip, Operand(x.offset_), LeaveCC,
    710           static_cast<Condition>(instr & CondMask));
    711       addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
    712       return;
    713     }
    714     ASSERT(offset_12 >= 0);  // no masking needed
    715     instr |= offset_12;
    716   } else {
    717     // Register offset (shift_imm_ and shift_op_ are 0) or scaled
    718     // register offset the constructors make sure than both shift_imm_
    719     // and shift_op_ are initialized.
    720     ASSERT(!x.rm_.is(pc));
    721     instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
    722   }
    723   ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
    724   emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
    725 }
    726 
    727 
    728 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
    729   ASSERT((instr & ~(CondMask | L | S6 | H)) == (B4 | B7));
    730   ASSERT(x.rn_.is_valid());
    731   int am = x.am_;
    732   if (!x.rm_.is_valid()) {
    733     // Immediate offset.
    734     int offset_8 = x.offset_;
    735     if (offset_8 < 0) {
    736       offset_8 = -offset_8;
    737       am ^= U;
    738     }
    739     if (!is_uint8(offset_8)) {
    740       // Immediate offset cannot be encoded, load it first to register ip
    741       // rn (and rd in a load) should never be ip, or will be trashed.
    742       ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
    743       mov(ip, Operand(x.offset_), LeaveCC,
    744           static_cast<Condition>(instr & CondMask));
    745       addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
    746       return;
    747     }
    748     ASSERT(offset_8 >= 0);  // no masking needed
    749     instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
    750   } else if (x.shift_imm_ != 0) {
    751     // Scaled register offset not supported, load index first
    752     // rn (and rd in a load) should never be ip, or will be trashed.
    753     ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
    754     mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
    755         static_cast<Condition>(instr & CondMask));
    756     addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
    757     return;
    758   } else {
    759     // Register offset.
    760     ASSERT((am & (P|W)) == P || !x.rm_.is(pc));  // no pc index with writeback
    761     instr |= x.rm_.code();
    762   }
    763   ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
    764   emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
    765 }
    766 
    767 
    768 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
    769   ASSERT((instr & ~(CondMask | P | U | W | L)) == B27);
    770   ASSERT(rl != 0);
    771   ASSERT(!rn.is(pc));
    772   emit(instr | rn.code()*B16 | rl);
    773 }
    774 
    775 
    776 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
    777   // Unindexed addressing is not encoded by this function.
    778   ASSERT_EQ((B27 | B26),
    779             (instr & ~(CondMask | CoprocessorMask | P | U | N | W | L)));
    780   ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
    781   int am = x.am_;
    782   int offset_8 = x.offset_;
    783   ASSERT((offset_8 & 3) == 0);  // offset must be an aligned word offset
    784   offset_8 >>= 2;
    785   if (offset_8 < 0) {
    786     offset_8 = -offset_8;
    787     am ^= U;
    788   }
    789   ASSERT(is_uint8(offset_8));  // unsigned word offset must fit in a byte
    790   ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
    791 
    792   // Post-indexed addressing requires W == 1; different than in addrmod2/3.
    793   if ((am & P) == 0)
    794     am |= W;
    795 
    796   ASSERT(offset_8 >= 0);  // no masking needed
    797   emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
    798 }
    799 
    800 
    801 int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
    802   int target_pos;
    803   if (L->is_bound()) {
    804     target_pos = L->pos();
    805   } else {
    806     if (L->is_linked()) {
    807       target_pos = L->pos();  // L's link
    808     } else {
    809       target_pos = kEndOfChain;
    810     }
    811     L->link_to(pc_offset());
    812   }
    813 
    814   // Block the emission of the constant pool, since the branch instruction must
    815   // be emitted at the pc offset recorded by the label.
    816   BlockConstPoolBefore(pc_offset() + kInstrSize);
    817   return target_pos - (pc_offset() + kPcLoadDelta);
    818 }
    819 
    820 
    821 void Assembler::label_at_put(Label* L, int at_offset) {
    822   int target_pos;
    823   if (L->is_bound()) {
    824     target_pos = L->pos();
    825   } else {
    826     if (L->is_linked()) {
    827       target_pos = L->pos();  // L's link
    828     } else {
    829       target_pos = kEndOfChain;
    830     }
    831     L->link_to(at_offset);
    832     instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
    833   }
    834 }
    835 
    836 
    837 // Branch instructions.
    838 void Assembler::b(int branch_offset, Condition cond) {
    839   ASSERT((branch_offset & 3) == 0);
    840   int imm24 = branch_offset >> 2;
    841   ASSERT(is_int24(imm24));
    842   emit(cond | B27 | B25 | (imm24 & Imm24Mask));
    843 
    844   if (cond == al)
    845     // Dead code is a good location to emit the constant pool.
    846     CheckConstPool(false, false);
    847 }
    848 
    849 
    850 void Assembler::bl(int branch_offset, Condition cond) {
    851   ASSERT((branch_offset & 3) == 0);
    852   int imm24 = branch_offset >> 2;
    853   ASSERT(is_int24(imm24));
    854   emit(cond | B27 | B25 | B24 | (imm24 & Imm24Mask));
    855 }
    856 
    857 
    858 void Assembler::blx(int branch_offset) {  // v5 and above
    859   WriteRecordedPositions();
    860   ASSERT((branch_offset & 1) == 0);
    861   int h = ((branch_offset & 2) >> 1)*B24;
    862   int imm24 = branch_offset >> 2;
    863   ASSERT(is_int24(imm24));
    864   emit(15 << 28 | B27 | B25 | h | (imm24 & Imm24Mask));
    865 }
    866 
    867 
    868 void Assembler::blx(Register target, Condition cond) {  // v5 and above
    869   WriteRecordedPositions();
    870   ASSERT(!target.is(pc));
    871   emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | 3*B4 | target.code());
    872 }
    873 
    874 
    875 void Assembler::bx(Register target, Condition cond) {  // v5 and above, plus v4t
    876   WriteRecordedPositions();
    877   ASSERT(!target.is(pc));  // use of pc is actually allowed, but discouraged
    878   emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | B4 | target.code());
    879 }
    880 
    881 
    882 // Data-processing instructions.
    883 
    884 // UBFX <Rd>,<Rn>,#<lsb>,#<width - 1>
    885 // Instruction details available in ARM DDI 0406A, A8-464.
    886 // cond(31-28) | 01111(27-23)| 1(22) | 1(21) | widthm1(20-16) |
    887 //  Rd(15-12) | lsb(11-7) | 101(6-4) | Rn(3-0)
    888 void Assembler::ubfx(Register dst, Register src1, const Operand& src2,
    889                      const Operand& src3, Condition cond) {
    890   ASSERT(!src2.rm_.is_valid() && !src3.rm_.is_valid());
    891   ASSERT(static_cast<uint32_t>(src2.imm32_) <= 0x1f);
    892   ASSERT(static_cast<uint32_t>(src3.imm32_) <= 0x1f);
    893   emit(cond | 0x3F*B21 | src3.imm32_*B16 |
    894        dst.code()*B12 | src2.imm32_*B7 | 0x5*B4 | src1.code());
    895 }
    896 
    897 
    898 void Assembler::and_(Register dst, Register src1, const Operand& src2,
    899                      SBit s, Condition cond) {
    900   addrmod1(cond | 0*B21 | s, src1, dst, src2);
    901 }
    902 
    903 
    904 void Assembler::eor(Register dst, Register src1, const Operand& src2,
    905                     SBit s, Condition cond) {
    906   addrmod1(cond | 1*B21 | s, src1, dst, src2);
    907 }
    908 
    909 
    910 void Assembler::sub(Register dst, Register src1, const Operand& src2,
    911                     SBit s, Condition cond) {
    912   addrmod1(cond | 2*B21 | s, src1, dst, src2);
    913 }
    914 
    915 
    916 void Assembler::rsb(Register dst, Register src1, const Operand& src2,
    917                     SBit s, Condition cond) {
    918   addrmod1(cond | 3*B21 | s, src1, dst, src2);
    919 }
    920 
    921 
    922 void Assembler::add(Register dst, Register src1, const Operand& src2,
    923                     SBit s, Condition cond) {
    924   addrmod1(cond | 4*B21 | s, src1, dst, src2);
    925 
    926   // Eliminate pattern: push(r), pop()
    927   //   str(src, MemOperand(sp, 4, NegPreIndex), al);
    928   //   add(sp, sp, Operand(kPointerSize));
    929   // Both instructions can be eliminated.
    930   int pattern_size = 2 * kInstrSize;
    931   if (FLAG_push_pop_elimination &&
    932       last_bound_pos_ <= (pc_offset() - pattern_size) &&
    933       reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
    934       // Pattern.
    935       instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
    936       (instr_at(pc_ - 2 * kInstrSize) & ~RdMask) == kPushRegPattern) {
    937     pc_ -= 2 * kInstrSize;
    938     if (FLAG_print_push_pop_elimination) {
    939       PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
    940     }
    941   }
    942 }
    943 
    944 
    945 void Assembler::adc(Register dst, Register src1, const Operand& src2,
    946                     SBit s, Condition cond) {
    947   addrmod1(cond | 5*B21 | s, src1, dst, src2);
    948 }
    949 
    950 
    951 void Assembler::sbc(Register dst, Register src1, const Operand& src2,
    952                     SBit s, Condition cond) {
    953   addrmod1(cond | 6*B21 | s, src1, dst, src2);
    954 }
    955 
    956 
    957 void Assembler::rsc(Register dst, Register src1, const Operand& src2,
    958                     SBit s, Condition cond) {
    959   addrmod1(cond | 7*B21 | s, src1, dst, src2);
    960 }
    961 
    962 
    963 void Assembler::tst(Register src1, const Operand& src2, Condition cond) {
    964   addrmod1(cond | 8*B21 | S, src1, r0, src2);
    965 }
    966 
    967 
    968 void Assembler::teq(Register src1, const Operand& src2, Condition cond) {
    969   addrmod1(cond | 9*B21 | S, src1, r0, src2);
    970 }
    971 
    972 
    973 void Assembler::cmp(Register src1, const Operand& src2, Condition cond) {
    974   addrmod1(cond | 10*B21 | S, src1, r0, src2);
    975 }
    976 
    977 
    978 void Assembler::cmn(Register src1, const Operand& src2, Condition cond) {
    979   addrmod1(cond | 11*B21 | S, src1, r0, src2);
    980 }
    981 
    982 
    983 void Assembler::orr(Register dst, Register src1, const Operand& src2,
    984                     SBit s, Condition cond) {
    985   addrmod1(cond | 12*B21 | s, src1, dst, src2);
    986 }
    987 
    988 
    989 void Assembler::mov(Register dst, const Operand& src, SBit s, Condition cond) {
    990   if (dst.is(pc)) {
    991     WriteRecordedPositions();
    992   }
    993   addrmod1(cond | 13*B21 | s, r0, dst, src);
    994 }
    995 
    996 
    997 void Assembler::bic(Register dst, Register src1, const Operand& src2,
    998                     SBit s, Condition cond) {
    999   addrmod1(cond | 14*B21 | s, src1, dst, src2);
   1000 }
   1001 
   1002 
   1003 void Assembler::mvn(Register dst, const Operand& src, SBit s, Condition cond) {
   1004   addrmod1(cond | 15*B21 | s, r0, dst, src);
   1005 }
   1006 
   1007 
   1008 // Multiply instructions.
   1009 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
   1010                     SBit s, Condition cond) {
   1011   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
   1012   emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
   1013        src2.code()*B8 | B7 | B4 | src1.code());
   1014 }
   1015 
   1016 
   1017 void Assembler::mul(Register dst, Register src1, Register src2,
   1018                     SBit s, Condition cond) {
   1019   ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
   1020   // dst goes in bits 16-19 for this instruction!
   1021   emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
   1022 }
   1023 
   1024 
   1025 void Assembler::smlal(Register dstL,
   1026                       Register dstH,
   1027                       Register src1,
   1028                       Register src2,
   1029                       SBit s,
   1030                       Condition cond) {
   1031   ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
   1032   ASSERT(!dstL.is(dstH));
   1033   emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
   1034        src2.code()*B8 | B7 | B4 | src1.code());
   1035 }
   1036 
   1037 
   1038 void Assembler::smull(Register dstL,
   1039                       Register dstH,
   1040                       Register src1,
   1041                       Register src2,
   1042                       SBit s,
   1043                       Condition cond) {
   1044   ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
   1045   ASSERT(!dstL.is(dstH));
   1046   emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
   1047        src2.code()*B8 | B7 | B4 | src1.code());
   1048 }
   1049 
   1050 
   1051 void Assembler::umlal(Register dstL,
   1052                       Register dstH,
   1053                       Register src1,
   1054                       Register src2,
   1055                       SBit s,
   1056                       Condition cond) {
   1057   ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
   1058   ASSERT(!dstL.is(dstH));
   1059   emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
   1060        src2.code()*B8 | B7 | B4 | src1.code());
   1061 }
   1062 
   1063 
   1064 void Assembler::umull(Register dstL,
   1065                       Register dstH,
   1066                       Register src1,
   1067                       Register src2,
   1068                       SBit s,
   1069                       Condition cond) {
   1070   ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
   1071   ASSERT(!dstL.is(dstH));
   1072   emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
   1073        src2.code()*B8 | B7 | B4 | src1.code());
   1074 }
   1075 
   1076 
   1077 // Miscellaneous arithmetic instructions.
   1078 void Assembler::clz(Register dst, Register src, Condition cond) {
   1079   // v5 and above.
   1080   ASSERT(!dst.is(pc) && !src.is(pc));
   1081   emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
   1082        15*B8 | B4 | src.code());
   1083 }
   1084 
   1085 
   1086 // Status register access instructions.
   1087 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
   1088   ASSERT(!dst.is(pc));
   1089   emit(cond | B24 | s | 15*B16 | dst.code()*B12);
   1090 }
   1091 
   1092 
   1093 void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
   1094                     Condition cond) {
   1095   ASSERT(fields >= B16 && fields < B20);  // at least one field set
   1096   Instr instr;
   1097   if (!src.rm_.is_valid()) {
   1098     // Immediate.
   1099     uint32_t rotate_imm;
   1100     uint32_t immed_8;
   1101     if (MustUseIp(src.rmode_) ||
   1102         !fits_shifter(src.imm32_, &rotate_imm, &immed_8, NULL)) {
   1103       // Immediate operand cannot be encoded, load it first to register ip.
   1104       RecordRelocInfo(src.rmode_, src.imm32_);
   1105       ldr(ip, MemOperand(pc, 0), cond);
   1106       msr(fields, Operand(ip), cond);
   1107       return;
   1108     }
   1109     instr = I | rotate_imm*B8 | immed_8;
   1110   } else {
   1111     ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0);  // only rm allowed
   1112     instr = src.rm_.code();
   1113   }
   1114   emit(cond | instr | B24 | B21 | fields | 15*B12);
   1115 }
   1116 
   1117 
   1118 // Load/Store instructions.
   1119 void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
   1120   if (dst.is(pc)) {
   1121     WriteRecordedPositions();
   1122   }
   1123   addrmod2(cond | B26 | L, dst, src);
   1124 
   1125   // Eliminate pattern: push(r), pop(r)
   1126   //   str(r, MemOperand(sp, 4, NegPreIndex), al)
   1127   //   ldr(r, MemOperand(sp, 4, PostIndex), al)
   1128   // Both instructions can be eliminated.
   1129   int pattern_size = 2 * kInstrSize;
   1130   if (FLAG_push_pop_elimination &&
   1131       last_bound_pos_ <= (pc_offset() - pattern_size) &&
   1132       reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
   1133       // Pattern.
   1134       instr_at(pc_ - 1 * kInstrSize) == (kPopRegPattern | dst.code() * B12) &&
   1135       instr_at(pc_ - 2 * kInstrSize) == (kPushRegPattern | dst.code() * B12)) {
   1136     pc_ -= 2 * kInstrSize;
   1137     if (FLAG_print_push_pop_elimination) {
   1138       PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
   1139     }
   1140   }
   1141 }
   1142 
   1143 
   1144 void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
   1145   addrmod2(cond | B26, src, dst);
   1146 
   1147   // Eliminate pattern: pop(), push(r)
   1148   //     add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
   1149   // ->  str r, [sp, 0], al
   1150   int pattern_size = 2 * kInstrSize;
   1151   if (FLAG_push_pop_elimination &&
   1152      last_bound_pos_ <= (pc_offset() - pattern_size) &&
   1153      reloc_info_writer.last_pc() <= (pc_ - pattern_size) &&
   1154      // Pattern.
   1155      instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
   1156      instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
   1157     pc_ -= 2 * kInstrSize;
   1158     emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
   1159     if (FLAG_print_push_pop_elimination) {
   1160       PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
   1161     }
   1162   }
   1163 }
   1164 
   1165 
   1166 void Assembler::ldrb(Register dst, const MemOperand& src, Condition cond) {
   1167   addrmod2(cond | B26 | B | L, dst, src);
   1168 }
   1169 
   1170 
   1171 void Assembler::strb(Register src, const MemOperand& dst, Condition cond) {
   1172   addrmod2(cond | B26 | B, src, dst);
   1173 }
   1174 
   1175 
   1176 void Assembler::ldrh(Register dst, const MemOperand& src, Condition cond) {
   1177   addrmod3(cond | L | B7 | H | B4, dst, src);
   1178 }
   1179 
   1180 
   1181 void Assembler::strh(Register src, const MemOperand& dst, Condition cond) {
   1182   addrmod3(cond | B7 | H | B4, src, dst);
   1183 }
   1184 
   1185 
   1186 void Assembler::ldrsb(Register dst, const MemOperand& src, Condition cond) {
   1187   addrmod3(cond | L | B7 | S6 | B4, dst, src);
   1188 }
   1189 
   1190 
   1191 void Assembler::ldrsh(Register dst, const MemOperand& src, Condition cond) {
   1192   addrmod3(cond | L | B7 | S6 | H | B4, dst, src);
   1193 }
   1194 
   1195 
   1196 // Load/Store multiple instructions.
   1197 void Assembler::ldm(BlockAddrMode am,
   1198                     Register base,
   1199                     RegList dst,
   1200                     Condition cond) {
   1201   // ABI stack constraint: ldmxx base, {..sp..}  base != sp  is not restartable.
   1202   ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
   1203 
   1204   addrmod4(cond | B27 | am | L, base, dst);
   1205 
   1206   // Emit the constant pool after a function return implemented by ldm ..{..pc}.
   1207   if (cond == al && (dst & pc.bit()) != 0) {
   1208     // There is a slight chance that the ldm instruction was actually a call,
   1209     // in which case it would be wrong to return into the constant pool; we
   1210     // recognize this case by checking if the emission of the pool was blocked
   1211     // at the pc of the ldm instruction by a mov lr, pc instruction; if this is
   1212     // the case, we emit a jump over the pool.
   1213     CheckConstPool(true, no_const_pool_before_ == pc_offset() - kInstrSize);
   1214   }
   1215 }
   1216 
   1217 
   1218 void Assembler::stm(BlockAddrMode am,
   1219                     Register base,
   1220                     RegList src,
   1221                     Condition cond) {
   1222   addrmod4(cond | B27 | am, base, src);
   1223 }
   1224 
   1225 
   1226 // Semaphore instructions.
   1227 void Assembler::swp(Register dst, Register src, Register base, Condition cond) {
   1228   ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
   1229   ASSERT(!dst.is(base) && !src.is(base));
   1230   emit(cond | P | base.code()*B16 | dst.code()*B12 |
   1231        B7 | B4 | src.code());
   1232 }
   1233 
   1234 
   1235 void Assembler::swpb(Register dst,
   1236                      Register src,
   1237                      Register base,
   1238                      Condition cond) {
   1239   ASSERT(!dst.is(pc) && !src.is(pc) && !base.is(pc));
   1240   ASSERT(!dst.is(base) && !src.is(base));
   1241   emit(cond | P | B | base.code()*B16 | dst.code()*B12 |
   1242        B7 | B4 | src.code());
   1243 }
   1244 
   1245 
   1246 // Exception-generating instructions and debugging support.
   1247 void Assembler::stop(const char* msg) {
   1248 #ifndef __arm__
   1249   // The simulator handles these special instructions and stops execution.
   1250   emit(15 << 28 | ((intptr_t) msg));
   1251 #else  // def __arm__
   1252 #ifdef CAN_USE_ARMV5_INSTRUCTIONS
   1253   bkpt(0);
   1254 #else  // ndef CAN_USE_ARMV5_INSTRUCTIONS
   1255   swi(0x9f0001);
   1256 #endif  // ndef CAN_USE_ARMV5_INSTRUCTIONS
   1257 #endif  // def __arm__
   1258 }
   1259 
   1260 
   1261 void Assembler::bkpt(uint32_t imm16) {  // v5 and above
   1262   ASSERT(is_uint16(imm16));
   1263   emit(al | B24 | B21 | (imm16 >> 4)*B8 | 7*B4 | (imm16 & 0xf));
   1264 }
   1265 
   1266 
   1267 void Assembler::swi(uint32_t imm24, Condition cond) {
   1268   ASSERT(is_uint24(imm24));
   1269   emit(cond | 15*B24 | imm24);
   1270 }
   1271 
   1272 
   1273 // Coprocessor instructions.
   1274 void Assembler::cdp(Coprocessor coproc,
   1275                     int opcode_1,
   1276                     CRegister crd,
   1277                     CRegister crn,
   1278                     CRegister crm,
   1279                     int opcode_2,
   1280                     Condition cond) {
   1281   ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
   1282   emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
   1283        crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
   1284 }
   1285 
   1286 
   1287 void Assembler::cdp2(Coprocessor coproc,
   1288                      int opcode_1,
   1289                      CRegister crd,
   1290                      CRegister crn,
   1291                      CRegister crm,
   1292                      int opcode_2) {  // v5 and above
   1293   cdp(coproc, opcode_1, crd, crn, crm, opcode_2, static_cast<Condition>(nv));
   1294 }
   1295 
   1296 
   1297 void Assembler::mcr(Coprocessor coproc,
   1298                     int opcode_1,
   1299                     Register rd,
   1300                     CRegister crn,
   1301                     CRegister crm,
   1302                     int opcode_2,
   1303                     Condition cond) {
   1304   ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
   1305   emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
   1306        rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
   1307 }
   1308 
   1309 
   1310 void Assembler::mcr2(Coprocessor coproc,
   1311                      int opcode_1,
   1312                      Register rd,
   1313                      CRegister crn,
   1314                      CRegister crm,
   1315                      int opcode_2) {  // v5 and above
   1316   mcr(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
   1317 }
   1318 
   1319 
   1320 void Assembler::mrc(Coprocessor coproc,
   1321                     int opcode_1,
   1322                     Register rd,
   1323                     CRegister crn,
   1324                     CRegister crm,
   1325                     int opcode_2,
   1326                     Condition cond) {
   1327   ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
   1328   emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
   1329        rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
   1330 }
   1331 
   1332 
   1333 void Assembler::mrc2(Coprocessor coproc,
   1334                      int opcode_1,
   1335                      Register rd,
   1336                      CRegister crn,
   1337                      CRegister crm,
   1338                      int opcode_2) {  // v5 and above
   1339   mrc(coproc, opcode_1, rd, crn, crm, opcode_2, static_cast<Condition>(nv));
   1340 }
   1341 
   1342 
   1343 void Assembler::ldc(Coprocessor coproc,
   1344                     CRegister crd,
   1345                     const MemOperand& src,
   1346                     LFlag l,
   1347                     Condition cond) {
   1348   addrmod5(cond | B27 | B26 | l | L | coproc*B8, crd, src);
   1349 }
   1350 
   1351 
   1352 void Assembler::ldc(Coprocessor coproc,
   1353                     CRegister crd,
   1354                     Register rn,
   1355                     int option,
   1356                     LFlag l,
   1357                     Condition cond) {
   1358   // Unindexed addressing.
   1359   ASSERT(is_uint8(option));
   1360   emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
   1361        coproc*B8 | (option & 255));
   1362 }
   1363 
   1364 
   1365 void Assembler::ldc2(Coprocessor coproc,
   1366                      CRegister crd,
   1367                      const MemOperand& src,
   1368                      LFlag l) {  // v5 and above
   1369   ldc(coproc, crd, src, l, static_cast<Condition>(nv));
   1370 }
   1371 
   1372 
   1373 void Assembler::ldc2(Coprocessor coproc,
   1374                      CRegister crd,
   1375                      Register rn,
   1376                      int option,
   1377                      LFlag l) {  // v5 and above
   1378   ldc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
   1379 }
   1380 
   1381 
   1382 void Assembler::stc(Coprocessor coproc,
   1383                     CRegister crd,
   1384                     const MemOperand& dst,
   1385                     LFlag l,
   1386                     Condition cond) {
   1387   addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
   1388 }
   1389 
   1390 
   1391 void Assembler::stc(Coprocessor coproc,
   1392                     CRegister crd,
   1393                     Register rn,
   1394                     int option,
   1395                     LFlag l,
   1396                     Condition cond) {
   1397   // Unindexed addressing.
   1398   ASSERT(is_uint8(option));
   1399   emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
   1400        coproc*B8 | (option & 255));
   1401 }
   1402 
   1403 
   1404 void Assembler::stc2(Coprocessor
   1405                      coproc, CRegister crd,
   1406                      const MemOperand& dst,
   1407                      LFlag l) {  // v5 and above
   1408   stc(coproc, crd, dst, l, static_cast<Condition>(nv));
   1409 }
   1410 
   1411 
   1412 void Assembler::stc2(Coprocessor coproc,
   1413                      CRegister crd,
   1414                      Register rn,
   1415                      int option,
   1416                      LFlag l) {  // v5 and above
   1417   stc(coproc, crd, rn, option, l, static_cast<Condition>(nv));
   1418 }
   1419 
   1420 
   1421 // Support for VFP.
   1422 void Assembler::vldr(const DwVfpRegister dst,
   1423                      const Register base,
   1424                      int offset,
   1425                      const Condition cond) {
   1426   // Ddst = MEM(Rbase + offset).
   1427   // Instruction details available in ARM DDI 0406A, A8-628.
   1428   // cond(31-28) | 1101(27-24)| 1001(23-20) | Rbase(19-16) |
   1429   // Vdst(15-12) | 1011(11-8) | offset
   1430   ASSERT(CpuFeatures::IsEnabled(VFP3));
   1431   ASSERT(offset % 4 == 0);
   1432   emit(cond | 0xD9*B20 | base.code()*B16 | dst.code()*B12 |
   1433        0xB*B8 | ((offset / 4) & 255));
   1434 }
   1435 
   1436 
   1437 void Assembler::vstr(const DwVfpRegister src,
   1438                      const Register base,
   1439                      int offset,
   1440                      const Condition cond) {
   1441   // MEM(Rbase + offset) = Dsrc.
   1442   // Instruction details available in ARM DDI 0406A, A8-786.
   1443   // cond(31-28) | 1101(27-24)| 1000(23-20) | | Rbase(19-16) |
   1444   // Vsrc(15-12) | 1011(11-8) | (offset/4)
   1445   ASSERT(CpuFeatures::IsEnabled(VFP3));
   1446   ASSERT(offset % 4 == 0);
   1447   emit(cond | 0xD8*B20 | base.code()*B16 | src.code()*B12 |
   1448        0xB*B8 | ((offset / 4) & 255));
   1449 }
   1450 
   1451 
   1452 void Assembler::vmov(const DwVfpRegister dst,
   1453                      const Register src1,
   1454                      const Register src2,
   1455                      const Condition cond) {
   1456   // Dm = <Rt,Rt2>.
   1457   // Instruction details available in ARM DDI 0406A, A8-646.
   1458   // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
   1459   // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
   1460   ASSERT(CpuFeatures::IsEnabled(VFP3));
   1461   ASSERT(!src1.is(pc) && !src2.is(pc));
   1462   emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
   1463        src1.code()*B12 | 0xB*B8 | B4 | dst.code());
   1464 }
   1465 
   1466 
   1467 void Assembler::vmov(const Register dst1,
   1468                      const Register dst2,
   1469                      const DwVfpRegister src,
   1470                      const Condition cond) {
   1471   // <Rt,Rt2> = Dm.
   1472   // Instruction details available in ARM DDI 0406A, A8-646.
   1473   // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
   1474   // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
   1475   ASSERT(CpuFeatures::IsEnabled(VFP3));
   1476   ASSERT(!dst1.is(pc) && !dst2.is(pc));
   1477   emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
   1478        dst1.code()*B12 | 0xB*B8 | B4 | src.code());
   1479 }
   1480 
   1481 
   1482 void Assembler::vmov(const SwVfpRegister dst,
   1483                      const Register src,
   1484                      const Condition cond) {
   1485   // Sn = Rt.
   1486   // Instruction details available in ARM DDI 0406A, A8-642.
   1487   // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
   1488   // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
   1489   ASSERT(CpuFeatures::IsEnabled(VFP3));
   1490   ASSERT(!src.is(pc));
   1491   emit(cond | 0xE*B24 | (dst.code() >> 1)*B16 |
   1492        src.code()*B12 | 0xA*B8 | (0x1 & dst.code())*B7 | B4);
   1493 }
   1494 
   1495 
   1496 void Assembler::vmov(const Register dst,
   1497                      const SwVfpRegister src,
   1498                      const Condition cond) {
   1499   // Rt = Sn.
   1500   // Instruction details available in ARM DDI 0406A, A8-642.
   1501   // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
   1502   // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
   1503   ASSERT(CpuFeatures::IsEnabled(VFP3));
   1504   ASSERT(!dst.is(pc));
   1505   emit(cond | 0xE*B24 | B20 | (src.code() >> 1)*B16 |
   1506        dst.code()*B12 | 0xA*B8 | (0x1 & src.code())*B7 | B4);
   1507 }
   1508 
   1509 
   1510 void Assembler::vcvt(const DwVfpRegister dst,
   1511                      const SwVfpRegister src,
   1512                      const Condition cond) {
   1513   // Dd = Sm (integer in Sm converted to IEEE 64-bit doubles in Dd).
   1514   // Instruction details available in ARM DDI 0406A, A8-576.
   1515   // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=000(18-16) |
   1516   // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=1 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
   1517   ASSERT(CpuFeatures::IsEnabled(VFP3));
   1518   emit(cond | 0xE*B24 | B23 | 0x3*B20 | B19 |
   1519        dst.code()*B12 | 0x5*B9 | B8 | B7 | B6 |
   1520        (0x1 & src.code())*B5 | (src.code() >> 1));
   1521 }
   1522 
   1523 
   1524 void Assembler::vcvt(const SwVfpRegister dst,
   1525                      const DwVfpRegister src,
   1526                      const Condition cond) {
   1527   // Sd = Dm (IEEE 64-bit doubles in Dm converted to 32 bit integer in Sd).
   1528   // Instruction details available in ARM DDI 0406A, A8-576.
   1529   // cond(31-28) | 11101(27-23)| D=?(22) | 11(21-20) | 1(19) | opc2=101(18-16)|
   1530   // Vd(15-12) | 101(11-9) | sz(8)=1 | op(7)=? | 1(6) | M=?(5) | 0(4) | Vm(3-0)
   1531   ASSERT(CpuFeatures::IsEnabled(VFP3));
   1532   emit(cond | 0xE*B24 | B23 |(0x1 & dst.code())*B22 |
   1533        0x3*B20 | B19 | 0x5*B16 | (dst.code() >> 1)*B12 |
   1534        0x5*B9 | B8 | B7 | B6 | src.code());
   1535 }
   1536 
   1537 
   1538 void Assembler::vadd(const DwVfpRegister dst,
   1539                      const DwVfpRegister src1,
   1540                      const DwVfpRegister src2,
   1541                      const Condition cond) {
   1542   // Dd = vadd(Dn, Dm) double precision floating point addition.
   1543   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
   1544   // Instruction details available in ARM DDI 0406A, A8-536.
   1545   // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
   1546   // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
   1547   ASSERT(CpuFeatures::IsEnabled(VFP3));
   1548   emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
   1549        dst.code()*B12 | 0x5*B9 | B8 | src2.code());
   1550 }
   1551 
   1552 
   1553 void Assembler::vsub(const DwVfpRegister dst,
   1554                      const DwVfpRegister src1,
   1555                      const DwVfpRegister src2,
   1556                      const Condition cond) {
   1557   // Dd = vsub(Dn, Dm) double precision floating point subtraction.
   1558   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
   1559   // Instruction details available in ARM DDI 0406A, A8-784.
   1560   // cond(31-28) | 11100(27-23)| D=?(22) | 11(21-20) | Vn(19-16) |
   1561   // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 1(6) | M=?(5) | 0(4) | Vm(3-0)
   1562   ASSERT(CpuFeatures::IsEnabled(VFP3));
   1563   emit(cond | 0xE*B24 | 0x3*B20 | src1.code()*B16 |
   1564        dst.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
   1565 }
   1566 
   1567 
   1568 void Assembler::vmul(const DwVfpRegister dst,
   1569                      const DwVfpRegister src1,
   1570                      const DwVfpRegister src2,
   1571                      const Condition cond) {
   1572   // Dd = vmul(Dn, Dm) double precision floating point multiplication.
   1573   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
   1574   // Instruction details available in ARM DDI 0406A, A8-784.
   1575   // cond(31-28) | 11100(27-23)| D=?(22) | 10(21-20) | Vn(19-16) |
   1576   // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=0 | 0(6) | M=?(5) | 0(4) | Vm(3-0)
   1577   ASSERT(CpuFeatures::IsEnabled(VFP3));
   1578   emit(cond | 0xE*B24 | 0x2*B20 | src1.code()*B16 |
   1579        dst.code()*B12 | 0x5*B9 | B8 | src2.code());
   1580 }
   1581 
   1582 
   1583 void Assembler::vdiv(const DwVfpRegister dst,
   1584                      const DwVfpRegister src1,
   1585                      const DwVfpRegister src2,
   1586                      const Condition cond) {
   1587   // Dd = vdiv(Dn, Dm) double precision floating point division.
   1588   // Dd = D:Vd; Dm=M:Vm; Dn=N:Vm.
   1589   // Instruction details available in ARM DDI 0406A, A8-584.
   1590   // cond(31-28) | 11101(27-23)| D=?(22) | 00(21-20) | Vn(19-16) |
   1591   // Vd(15-12) | 101(11-9) | sz(8)=1 | N(7)=? | 0(6) | M=?(5) | 0(4) | Vm(3-0)
   1592   ASSERT(CpuFeatures::IsEnabled(VFP3));
   1593   emit(cond | 0xE*B24 | B23 | src1.code()*B16 |
   1594        dst.code()*B12 | 0x5*B9 | B8 | src2.code());
   1595 }
   1596 
   1597 
   1598 void Assembler::vcmp(const DwVfpRegister src1,
   1599                      const DwVfpRegister src2,
   1600                      const SBit s,
   1601                      const Condition cond) {
   1602   // vcmp(Dd, Dm) double precision floating point comparison.
   1603   // Instruction details available in ARM DDI 0406A, A8-570.
   1604   // cond(31-28) | 11101 (27-23)| D=?(22) | 11 (21-20) | 0100 (19-16) |
   1605   // Vd(15-12) | 101(11-9) | sz(8)=1 | E(7)=? | 1(6) | M(5)=? | 0(4) | Vm(3-0)
   1606   ASSERT(CpuFeatures::IsEnabled(VFP3));
   1607   emit(cond | 0xE*B24 |B23 | 0x3*B20 | B18 |
   1608        src1.code()*B12 | 0x5*B9 | B8 | B6 | src2.code());
   1609 }
   1610 
   1611 
   1612 void Assembler::vmrs(Register dst, Condition cond) {
   1613   // Instruction details available in ARM DDI 0406A, A8-652.
   1614   // cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
   1615   // Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
   1616   ASSERT(CpuFeatures::IsEnabled(VFP3));
   1617   emit(cond | 0xE*B24 | 0xF*B20 |  B16 |
   1618        dst.code()*B12 | 0xA*B8 | B4);
   1619 }
   1620 
   1621 
   1622 // Pseudo instructions.
   1623 void Assembler::lea(Register dst,
   1624                     const MemOperand& x,
   1625                     SBit s,
   1626                     Condition cond) {
   1627   int am = x.am_;
   1628   if (!x.rm_.is_valid()) {
   1629     // Immediate offset.
   1630     if ((am & P) == 0)  // post indexing
   1631       mov(dst, Operand(x.rn_), s, cond);
   1632     else if ((am & U) == 0)  // negative indexing
   1633       sub(dst, x.rn_, Operand(x.offset_), s, cond);
   1634     else
   1635       add(dst, x.rn_, Operand(x.offset_), s, cond);
   1636   } else {
   1637     // Register offset (shift_imm_ and shift_op_ are 0) or scaled
   1638     // register offset the constructors make sure than both shift_imm_
   1639     // and shift_op_ are initialized.
   1640     ASSERT(!x.rm_.is(pc));
   1641     if ((am & P) == 0)  // post indexing
   1642       mov(dst, Operand(x.rn_), s, cond);
   1643     else if ((am & U) == 0)  // negative indexing
   1644       sub(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
   1645     else
   1646       add(dst, x.rn_, Operand(x.rm_, x.shift_op_, x.shift_imm_), s, cond);
   1647   }
   1648 }
   1649 
   1650 
   1651 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
   1652   uint32_t dummy1;
   1653   uint32_t dummy2;
   1654   return fits_shifter(imm32, &dummy1, &dummy2, NULL);
   1655 }
   1656 
   1657 
   1658 void Assembler::BlockConstPoolFor(int instructions) {
   1659   BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
   1660 }
   1661 
   1662 
   1663 // Debugging.
   1664 void Assembler::RecordJSReturn() {
   1665   WriteRecordedPositions();
   1666   CheckBuffer();
   1667   RecordRelocInfo(RelocInfo::JS_RETURN);
   1668 }
   1669 
   1670 
   1671 void Assembler::RecordComment(const char* msg) {
   1672   if (FLAG_debug_code) {
   1673     CheckBuffer();
   1674     RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
   1675   }
   1676 }
   1677 
   1678 
   1679 void Assembler::RecordPosition(int pos) {
   1680   if (pos == RelocInfo::kNoPosition) return;
   1681   ASSERT(pos >= 0);
   1682   current_position_ = pos;
   1683 }
   1684 
   1685 
   1686 void Assembler::RecordStatementPosition(int pos) {
   1687   if (pos == RelocInfo::kNoPosition) return;
   1688   ASSERT(pos >= 0);
   1689   current_statement_position_ = pos;
   1690 }
   1691 
   1692 
   1693 void Assembler::WriteRecordedPositions() {
   1694   // Write the statement position if it is different from what was written last
   1695   // time.
   1696   if (current_statement_position_ != written_statement_position_) {
   1697     CheckBuffer();
   1698     RecordRelocInfo(RelocInfo::STATEMENT_POSITION, current_statement_position_);
   1699     written_statement_position_ = current_statement_position_;
   1700   }
   1701 
   1702   // Write the position if it is different from what was written last time and
   1703   // also different from the written statement position.
   1704   if (current_position_ != written_position_ &&
   1705       current_position_ != written_statement_position_) {
   1706     CheckBuffer();
   1707     RecordRelocInfo(RelocInfo::POSITION, current_position_);
   1708     written_position_ = current_position_;
   1709   }
   1710 }
   1711 
   1712 
   1713 void Assembler::GrowBuffer() {
   1714   if (!own_buffer_) FATAL("external code buffer is too small");
   1715 
   1716   // Compute new buffer size.
   1717   CodeDesc desc;  // the new buffer
   1718   if (buffer_size_ < 4*KB) {
   1719     desc.buffer_size = 4*KB;
   1720   } else if (buffer_size_ < 1*MB) {
   1721     desc.buffer_size = 2*buffer_size_;
   1722   } else {
   1723     desc.buffer_size = buffer_size_ + 1*MB;
   1724   }
   1725   CHECK_GT(desc.buffer_size, 0);  // no overflow
   1726 
   1727   // Setup new buffer.
   1728   desc.buffer = NewArray<byte>(desc.buffer_size);
   1729 
   1730   desc.instr_size = pc_offset();
   1731   desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
   1732 
   1733   // Copy the data.
   1734   int pc_delta = desc.buffer - buffer_;
   1735   int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
   1736   memmove(desc.buffer, buffer_, desc.instr_size);
   1737   memmove(reloc_info_writer.pos() + rc_delta,
   1738           reloc_info_writer.pos(), desc.reloc_size);
   1739 
   1740   // Switch buffers.
   1741   DeleteArray(buffer_);
   1742   buffer_ = desc.buffer;
   1743   buffer_size_ = desc.buffer_size;
   1744   pc_ += pc_delta;
   1745   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
   1746                                reloc_info_writer.last_pc() + pc_delta);
   1747 
   1748   // None of our relocation types are pc relative pointing outside the code
   1749   // buffer nor pc absolute pointing inside the code buffer, so there is no need
   1750   // to relocate any emitted relocation entries.
   1751 
   1752   // Relocate pending relocation entries.
   1753   for (int i = 0; i < num_prinfo_; i++) {
   1754     RelocInfo& rinfo = prinfo_[i];
   1755     ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
   1756            rinfo.rmode() != RelocInfo::POSITION);
   1757     if (rinfo.rmode() != RelocInfo::JS_RETURN) {
   1758       rinfo.set_pc(rinfo.pc() + pc_delta);
   1759     }
   1760   }
   1761 }
   1762 
   1763 
   1764 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
   1765   RelocInfo rinfo(pc_, rmode, data);  // we do not try to reuse pool constants
   1766   if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::STATEMENT_POSITION) {
   1767     // Adjust code for new modes.
   1768     ASSERT(RelocInfo::IsJSReturn(rmode)
   1769            || RelocInfo::IsComment(rmode)
   1770            || RelocInfo::IsPosition(rmode));
   1771     // These modes do not need an entry in the constant pool.
   1772   } else {
   1773     ASSERT(num_prinfo_ < kMaxNumPRInfo);
   1774     prinfo_[num_prinfo_++] = rinfo;
   1775     // Make sure the constant pool is not emitted in place of the next
   1776     // instruction for which we just recorded relocation info.
   1777     BlockConstPoolBefore(pc_offset() + kInstrSize);
   1778   }
   1779   if (rinfo.rmode() != RelocInfo::NONE) {
   1780     // Don't record external references unless the heap will be serialized.
   1781     if (rmode == RelocInfo::EXTERNAL_REFERENCE) {
   1782 #ifdef DEBUG
   1783       if (!Serializer::enabled()) {
   1784         Serializer::TooLateToEnableNow();
   1785       }
   1786 #endif
   1787       if (!Serializer::enabled() && !FLAG_debug_code) {
   1788         return;
   1789       }
   1790     }
   1791     ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
   1792     reloc_info_writer.Write(&rinfo);
   1793   }
   1794 }
   1795 
   1796 
   1797 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
   1798   // Calculate the offset of the next check. It will be overwritten
   1799   // when a const pool is generated or when const pools are being
   1800   // blocked for a specific range.
   1801   next_buffer_check_ = pc_offset() + kCheckConstInterval;
   1802 
   1803   // There is nothing to do if there are no pending relocation info entries.
   1804   if (num_prinfo_ == 0) return;
   1805 
   1806   // We emit a constant pool at regular intervals of about kDistBetweenPools
   1807   // or when requested by parameter force_emit (e.g. after each function).
   1808   // We prefer not to emit a jump unless the max distance is reached or if we
   1809   // are running low on slots, which can happen if a lot of constants are being
   1810   // emitted (e.g. --debug-code and many static references).
   1811   int dist = pc_offset() - last_const_pool_end_;
   1812   if (!force_emit && dist < kMaxDistBetweenPools &&
   1813       (require_jump || dist < kDistBetweenPools) &&
   1814       // TODO(1236125): Cleanup the "magic" number below. We know that
   1815       // the code generation will test every kCheckConstIntervalInst.
   1816       // Thus we are safe as long as we generate less than 7 constant
   1817       // entries per instruction.
   1818       (num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
   1819     return;
   1820   }
   1821 
   1822   // If we did not return by now, we need to emit the constant pool soon.
   1823 
   1824   // However, some small sequences of instructions must not be broken up by the
   1825   // insertion of a constant pool; such sequences are protected by setting
   1826   // no_const_pool_before_, which is checked here. Also, recursive calls to
   1827   // CheckConstPool are blocked by no_const_pool_before_.
   1828   if (pc_offset() < no_const_pool_before_) {
   1829     // Emission is currently blocked; make sure we try again as soon as
   1830     // possible.
   1831     next_buffer_check_ = no_const_pool_before_;
   1832 
   1833     // Something is wrong if emission is forced and blocked at the same time.
   1834     ASSERT(!force_emit);
   1835     return;
   1836   }
   1837 
   1838   int jump_instr = require_jump ? kInstrSize : 0;
   1839 
   1840   // Check that the code buffer is large enough before emitting the constant
   1841   // pool and relocation information (include the jump over the pool and the
   1842   // constant pool marker).
   1843   int max_needed_space =
   1844       jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
   1845   while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
   1846 
   1847   // Block recursive calls to CheckConstPool.
   1848   BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
   1849                        num_prinfo_*kInstrSize);
   1850   // Don't bother to check for the emit calls below.
   1851   next_buffer_check_ = no_const_pool_before_;
   1852 
   1853   // Emit jump over constant pool if necessary.
   1854   Label after_pool;
   1855   if (require_jump) b(&after_pool);
   1856 
   1857   RecordComment("[ Constant Pool");
   1858 
   1859   // Put down constant pool marker "Undefined instruction" as specified by
   1860   // A3.1 Instruction set encoding.
   1861   emit(0x03000000 | num_prinfo_);
   1862 
   1863   // Emit constant pool entries.
   1864   for (int i = 0; i < num_prinfo_; i++) {
   1865     RelocInfo& rinfo = prinfo_[i];
   1866     ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
   1867            rinfo.rmode() != RelocInfo::POSITION &&
   1868            rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
   1869     Instr instr = instr_at(rinfo.pc());
   1870 
   1871     // Instruction to patch must be a ldr/str [pc, #offset].
   1872     // P and U set, B and W clear, Rn == pc, offset12 still 0.
   1873     ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | Off12Mask)) ==
   1874            (2*B25 | P | U | pc.code()*B16));
   1875     int delta = pc_ - rinfo.pc() - 8;
   1876     ASSERT(delta >= -4);  // instr could be ldr pc, [pc, #-4] followed by targ32
   1877     if (delta < 0) {
   1878       instr &= ~U;
   1879       delta = -delta;
   1880     }
   1881     ASSERT(is_uint12(delta));
   1882     instr_at_put(rinfo.pc(), instr + delta);
   1883     emit(rinfo.data());
   1884   }
   1885   num_prinfo_ = 0;
   1886   last_const_pool_end_ = pc_offset();
   1887 
   1888   RecordComment("]");
   1889 
   1890   if (after_pool.is_linked()) {
   1891     bind(&after_pool);
   1892   }
   1893 
   1894   // Since a constant pool was just emitted, move the check offset forward by
   1895   // the standard interval.
   1896   next_buffer_check_ = pc_offset() + kCheckConstInterval;
   1897 }
   1898 
   1899 
   1900 } }  // namespace v8::internal
   1901