Home | History | Annotate | Download | only in ppc
      1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
      2 // All Rights Reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions
      6 // are met:
      7 //
      8 // - Redistributions of source code must retain the above copyright notice,
      9 // this list of conditions and the following disclaimer.
     10 //
     11 // - Redistribution in binary form must reproduce the above copyright
     12 // notice, this list of conditions and the following disclaimer in the
     13 // documentation and/or other materials provided with the
     14 // distribution.
     15 //
     16 // - Neither the name of Sun Microsystems or the names of contributors may
     17 // be used to endorse or promote products derived from this software without
     18 // specific prior written permission.
     19 //
     20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
     31 // OF THE POSSIBILITY OF SUCH DAMAGE.
     32 
     33 // The original source code covered by the above license above has been
     34 // modified significantly by Google Inc.
     35 // Copyright 2014 the V8 project authors. All rights reserved.
     36 
     37 #include "src/ppc/assembler-ppc.h"
     38 
     39 #if V8_TARGET_ARCH_PPC
     40 
     41 #include "src/base/bits.h"
     42 #include "src/base/cpu.h"
     43 #include "src/macro-assembler.h"
     44 #include "src/ppc/assembler-ppc-inl.h"
     45 
     46 namespace v8 {
     47 namespace internal {
     48 
     49 // Get the CPU features enabled by the build.
     50 static unsigned CpuFeaturesImpliedByCompiler() {
     51   unsigned answer = 0;
     52   return answer;
     53 }
     54 
     55 
     56 void CpuFeatures::ProbeImpl(bool cross_compile) {
     57   supported_ |= CpuFeaturesImpliedByCompiler();
     58   icache_line_size_ = 128;
     59 
     60   // Only use statically determined features for cross compile (snapshot).
     61   if (cross_compile) return;
     62 
     63 // Detect whether frim instruction is supported (POWER5+)
     64 // For now we will just check for processors we know do not
     65 // support it
     66 #ifndef USE_SIMULATOR
     67   // Probe for additional features at runtime.
     68   base::CPU cpu;
     69 #if V8_TARGET_ARCH_PPC64
     70   if (cpu.part() == base::CPU::PPC_POWER8) {
     71     supported_ |= (1u << FPR_GPR_MOV);
     72   }
     73 #endif
     74   if (cpu.part() == base::CPU::PPC_POWER6 ||
     75       cpu.part() == base::CPU::PPC_POWER7 ||
     76       cpu.part() == base::CPU::PPC_POWER8) {
     77     supported_ |= (1u << LWSYNC);
     78   }
     79   if (cpu.part() == base::CPU::PPC_POWER7 ||
     80       cpu.part() == base::CPU::PPC_POWER8) {
     81     supported_ |= (1u << ISELECT);
     82   }
     83 #if V8_OS_LINUX
     84   if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
     85     // Assume support
     86     supported_ |= (1u << FPU);
     87   }
     88   if (cpu.icache_line_size() != base::CPU::UNKNOWN_CACHE_LINE_SIZE) {
     89     icache_line_size_ = cpu.icache_line_size();
     90   }
     91 #elif V8_OS_AIX
     92   // Assume support FP support and default cache line size
     93   supported_ |= (1u << FPU);
     94 #endif
     95 #else  // Simulator
     96   supported_ |= (1u << FPU);
     97   supported_ |= (1u << LWSYNC);
     98   supported_ |= (1u << ISELECT);
     99 #if V8_TARGET_ARCH_PPC64
    100   supported_ |= (1u << FPR_GPR_MOV);
    101 #endif
    102 #endif
    103 }
    104 
    105 
    106 void CpuFeatures::PrintTarget() {
    107   const char* ppc_arch = NULL;
    108 
    109 #if V8_TARGET_ARCH_PPC64
    110   ppc_arch = "ppc64";
    111 #else
    112   ppc_arch = "ppc";
    113 #endif
    114 
    115   printf("target %s\n", ppc_arch);
    116 }
    117 
    118 
    119 void CpuFeatures::PrintFeatures() {
    120   printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
    121 }
    122 
    123 
    124 Register ToRegister(int num) {
    125   DCHECK(num >= 0 && num < kNumRegisters);
    126   const Register kRegisters[] = {r0,  sp,  r2,  r3,  r4,  r5,  r6,  r7,
    127                                  r8,  r9,  r10, r11, ip,  r13, r14, r15,
    128                                  r16, r17, r18, r19, r20, r21, r22, r23,
    129                                  r24, r25, r26, r27, r28, r29, r30, fp};
    130   return kRegisters[num];
    131 }
    132 
    133 
    134 // -----------------------------------------------------------------------------
    135 // Implementation of RelocInfo
    136 
    137 const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE |
    138                                   1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
    139 
    140 
    141 bool RelocInfo::IsCodedSpecially() {
    142   // The deserializer needs to know whether a pointer is specially
    143   // coded.  Being specially coded on PPC means that it is a lis/ori
    144   // instruction sequence or is a constant pool entry, and these are
    145   // always the case inside code objects.
    146   return true;
    147 }
    148 
    149 
    150 bool RelocInfo::IsInConstantPool() {
    151   if (FLAG_enable_embedded_constant_pool) {
    152     Address constant_pool = host_->constant_pool();
    153     return (constant_pool && Assembler::IsConstantPoolLoadStart(pc_));
    154   }
    155   return false;
    156 }
    157 
    158 Address RelocInfo::wasm_memory_reference() {
    159   DCHECK(IsWasmMemoryReference(rmode_));
    160   return Assembler::target_address_at(pc_, host_);
    161 }
    162 
    163 uint32_t RelocInfo::wasm_memory_size_reference() {
    164   DCHECK(IsWasmMemorySizeReference(rmode_));
    165   return static_cast<uint32_t>(
    166      reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
    167 }
    168 
    169 Address RelocInfo::wasm_global_reference() {
    170   DCHECK(IsWasmGlobalReference(rmode_));
    171   return Assembler::target_address_at(pc_, host_);
    172 }
    173 
    174 
    175 void RelocInfo::unchecked_update_wasm_memory_reference(
    176     Address address, ICacheFlushMode flush_mode) {
    177   Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
    178 }
    179 
    180 void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
    181                                                   ICacheFlushMode flush_mode) {
    182   Assembler::set_target_address_at(isolate_, pc_, host_,
    183                                    reinterpret_cast<Address>(size), flush_mode);
    184 }
    185 
    186 // -----------------------------------------------------------------------------
    187 // Implementation of Operand and MemOperand
    188 // See assembler-ppc-inl.h for inlined constructors
    189 
    190 Operand::Operand(Handle<Object> handle) {
    191   AllowDeferredHandleDereference using_raw_address;
    192   rm_ = no_reg;
    193   // Verify all Objects referred by code are NOT in new space.
    194   Object* obj = *handle;
    195   if (obj->IsHeapObject()) {
    196     imm_ = reinterpret_cast<intptr_t>(handle.location());
    197     rmode_ = RelocInfo::EMBEDDED_OBJECT;
    198   } else {
    199     // no relocation needed
    200     imm_ = reinterpret_cast<intptr_t>(obj);
    201     rmode_ = kRelocInfo_NONEPTR;
    202   }
    203 }
    204 
    205 
    206 MemOperand::MemOperand(Register rn, int32_t offset) {
    207   ra_ = rn;
    208   rb_ = no_reg;
    209   offset_ = offset;
    210 }
    211 
    212 
    213 MemOperand::MemOperand(Register ra, Register rb) {
    214   ra_ = ra;
    215   rb_ = rb;
    216   offset_ = 0;
    217 }
    218 
    219 
    220 // -----------------------------------------------------------------------------
    221 // Specific instructions, constants, and masks.
    222 
    223 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
    224     : AssemblerBase(isolate, buffer, buffer_size),
    225       recorded_ast_id_(TypeFeedbackId::None()),
    226       constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
    227   reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
    228 
    229   no_trampoline_pool_before_ = 0;
    230   trampoline_pool_blocked_nesting_ = 0;
    231   constant_pool_entry_sharing_blocked_nesting_ = 0;
    232   next_trampoline_check_ = kMaxInt;
    233   internal_trampoline_exception_ = false;
    234   last_bound_pos_ = 0;
    235   optimizable_cmpi_pos_ = -1;
    236   trampoline_emitted_ = FLAG_force_long_branches;
    237   tracked_branch_count_ = 0;
    238   ClearRecordedAstId();
    239   relocations_.reserve(128);
    240 }
    241 
    242 
    243 void Assembler::GetCode(CodeDesc* desc) {
    244   // Emit constant pool if necessary.
    245   int constant_pool_offset = EmitConstantPool();
    246 
    247   EmitRelocations();
    248 
    249   // Set up code descriptor.
    250   desc->buffer = buffer_;
    251   desc->buffer_size = buffer_size_;
    252   desc->instr_size = pc_offset();
    253   desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
    254   desc->constant_pool_size =
    255       (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
    256   desc->origin = this;
    257   desc->unwinding_info_size = 0;
    258   desc->unwinding_info = nullptr;
    259 }
    260 
    261 
    262 void Assembler::Align(int m) {
    263   DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
    264   DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
    265   while ((pc_offset() & (m - 1)) != 0) {
    266     nop();
    267   }
    268 }
    269 
    270 
    271 void Assembler::CodeTargetAlign() { Align(8); }
    272 
    273 
    274 Condition Assembler::GetCondition(Instr instr) {
    275   switch (instr & kCondMask) {
    276     case BT:
    277       return eq;
    278     case BF:
    279       return ne;
    280     default:
    281       UNIMPLEMENTED();
    282   }
    283   return al;
    284 }
    285 
    286 
    287 bool Assembler::IsLis(Instr instr) {
    288   return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr).is(r0);
    289 }
    290 
    291 
    292 bool Assembler::IsLi(Instr instr) {
    293   return ((instr & kOpcodeMask) == ADDI) && GetRA(instr).is(r0);
    294 }
    295 
    296 
    297 bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; }
    298 
    299 
    300 bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; }
    301 
    302 
    303 bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
    304 
    305 
    306 Register Assembler::GetRA(Instr instr) {
    307   Register reg;
    308   reg.reg_code = Instruction::RAValue(instr);
    309   return reg;
    310 }
    311 
    312 
    313 Register Assembler::GetRB(Instr instr) {
    314   Register reg;
    315   reg.reg_code = Instruction::RBValue(instr);
    316   return reg;
    317 }
    318 
    319 
    320 #if V8_TARGET_ARCH_PPC64
    321 // This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori)
    322 bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
    323                                    Instr instr4, Instr instr5) {
    324   // Check the instructions are indeed a five part load (into r12)
    325   // 3d800000       lis     r12, 0
    326   // 618c0000       ori     r12, r12, 0
    327   // 798c07c6       rldicr  r12, r12, 32, 31
    328   // 658c00c3       oris    r12, r12, 195
    329   // 618ccd40       ori     r12, r12, 52544
    330   return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c) &&
    331           (instr3 == 0x798c07c6) && ((instr4 >> 16) == 0x658c) &&
    332           ((instr5 >> 16) == 0x618c));
    333 }
    334 #else
    335 // This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori)
    336 bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
    337   // Check the instruction is indeed a two part load (into r12)
    338   // 3d802553       lis     r12, 9555
    339   // 618c5000       ori   r12, r12, 20480
    340   return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c));
    341 }
    342 #endif
    343 
    344 
    345 bool Assembler::IsCmpRegister(Instr instr) {
    346   return (((instr & kOpcodeMask) == EXT2) &&
    347           ((instr & kExt2OpcodeMask) == CMP));
    348 }
    349 
    350 
    351 bool Assembler::IsRlwinm(Instr instr) {
    352   return ((instr & kOpcodeMask) == RLWINMX);
    353 }
    354 
    355 
    356 bool Assembler::IsAndi(Instr instr) { return ((instr & kOpcodeMask) == ANDIx); }
    357 
    358 
    359 #if V8_TARGET_ARCH_PPC64
    360 bool Assembler::IsRldicl(Instr instr) {
    361   return (((instr & kOpcodeMask) == EXT5) &&
    362           ((instr & kExt5OpcodeMask) == RLDICL));
    363 }
    364 #endif
    365 
    366 
    367 bool Assembler::IsCmpImmediate(Instr instr) {
    368   return ((instr & kOpcodeMask) == CMPI);
    369 }
    370 
    371 
    372 bool Assembler::IsCrSet(Instr instr) {
    373   return (((instr & kOpcodeMask) == EXT1) &&
    374           ((instr & kExt1OpcodeMask) == CREQV));
    375 }
    376 
    377 
    378 Register Assembler::GetCmpImmediateRegister(Instr instr) {
    379   DCHECK(IsCmpImmediate(instr));
    380   return GetRA(instr);
    381 }
    382 
    383 
    384 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
    385   DCHECK(IsCmpImmediate(instr));
    386   return instr & kOff16Mask;
    387 }
    388 
    389 
    390 // Labels refer to positions in the (to be) generated code.
    391 // There are bound, linked, and unused labels.
    392 //
    393 // Bound labels refer to known positions in the already
    394 // generated code. pos() is the position the label refers to.
    395 //
    396 // Linked labels refer to unknown positions in the code
    397 // to be generated; pos() is the position of the last
    398 // instruction using the label.
    399 
    400 
    401 // The link chain is terminated by a negative code position (must be aligned)
    402 const int kEndOfChain = -4;
    403 
    404 
    405 // Dummy opcodes for unbound label mov instructions or jump table entries.
    406 enum {
    407   kUnboundMovLabelOffsetOpcode = 0 << 26,
    408   kUnboundAddLabelOffsetOpcode = 1 << 26,
    409   kUnboundMovLabelAddrOpcode = 2 << 26,
    410   kUnboundJumpTableEntryOpcode = 3 << 26
    411 };
    412 
    413 
    414 int Assembler::target_at(int pos) {
    415   Instr instr = instr_at(pos);
    416   // check which type of branch this is 16 or 26 bit offset
    417   int opcode = instr & kOpcodeMask;
    418   int link;
    419   switch (opcode) {
    420     case BX:
    421       link = SIGN_EXT_IMM26(instr & kImm26Mask);
    422       link &= ~(kAAMask | kLKMask);  // discard AA|LK bits if present
    423       break;
    424     case BCX:
    425       link = SIGN_EXT_IMM16((instr & kImm16Mask));
    426       link &= ~(kAAMask | kLKMask);  // discard AA|LK bits if present
    427       break;
    428     case kUnboundMovLabelOffsetOpcode:
    429     case kUnboundAddLabelOffsetOpcode:
    430     case kUnboundMovLabelAddrOpcode:
    431     case kUnboundJumpTableEntryOpcode:
    432       link = SIGN_EXT_IMM26(instr & kImm26Mask);
    433       link <<= 2;
    434       break;
    435     default:
    436       DCHECK(false);
    437       return -1;
    438   }
    439 
    440   if (link == 0) return kEndOfChain;
    441   return pos + link;
    442 }
    443 
    444 
    445 void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
    446   Instr instr = instr_at(pos);
    447   int opcode = instr & kOpcodeMask;
    448 
    449   if (is_branch != nullptr) {
    450     *is_branch = (opcode == BX || opcode == BCX);
    451   }
    452 
    453   switch (opcode) {
    454     case BX: {
    455       int imm26 = target_pos - pos;
    456       CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
    457       if (imm26 == kInstrSize && !(instr & kLKMask)) {
    458         // Branch to next instr without link.
    459         instr = ORI;  // nop: ori, 0,0,0
    460       } else {
    461         instr &= ((~kImm26Mask) | kAAMask | kLKMask);
    462         instr |= (imm26 & kImm26Mask);
    463       }
    464       instr_at_put(pos, instr);
    465       break;
    466     }
    467     case BCX: {
    468       int imm16 = target_pos - pos;
    469       CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
    470       if (imm16 == kInstrSize && !(instr & kLKMask)) {
    471         // Branch to next instr without link.
    472         instr = ORI;  // nop: ori, 0,0,0
    473       } else {
    474         instr &= ((~kImm16Mask) | kAAMask | kLKMask);
    475         instr |= (imm16 & kImm16Mask);
    476       }
    477       instr_at_put(pos, instr);
    478       break;
    479     }
    480     case kUnboundMovLabelOffsetOpcode: {
    481       // Load the position of the label relative to the generated code object
    482       // pointer in a register.
    483       Register dst = Register::from_code(instr_at(pos + kInstrSize));
    484       int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
    485       CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
    486                           CodePatcher::DONT_FLUSH);
    487       patcher.masm()->bitwise_mov32(dst, offset);
    488       break;
    489     }
    490     case kUnboundAddLabelOffsetOpcode: {
    491       // dst = base + position + immediate
    492       Instr operands = instr_at(pos + kInstrSize);
    493       Register dst = Register::from_code((operands >> 21) & 0x1f);
    494       Register base = Register::from_code((operands >> 16) & 0x1f);
    495       int32_t offset = target_pos + SIGN_EXT_IMM16(operands & kImm16Mask);
    496       CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
    497                           CodePatcher::DONT_FLUSH);
    498       patcher.masm()->bitwise_add32(dst, base, offset);
    499       break;
    500     }
    501     case kUnboundMovLabelAddrOpcode: {
    502       // Load the address of the label in a register.
    503       Register dst = Register::from_code(instr_at(pos + kInstrSize));
    504       CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
    505                           kMovInstructionsNoConstantPool,
    506                           CodePatcher::DONT_FLUSH);
    507       // Keep internal references relative until EmitRelocations.
    508       patcher.masm()->bitwise_mov(dst, target_pos);
    509       break;
    510     }
    511     case kUnboundJumpTableEntryOpcode: {
    512       CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
    513                           kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH);
    514       // Keep internal references relative until EmitRelocations.
    515       patcher.masm()->dp(target_pos);
    516       break;
    517     }
    518     default:
    519       DCHECK(false);
    520       break;
    521   }
    522 }
    523 
    524 
    525 int Assembler::max_reach_from(int pos) {
    526   Instr instr = instr_at(pos);
    527   int opcode = instr & kOpcodeMask;
    528 
    529   // check which type of branch this is 16 or 26 bit offset
    530   switch (opcode) {
    531     case BX:
    532       return 26;
    533     case BCX:
    534       return 16;
    535     case kUnboundMovLabelOffsetOpcode:
    536     case kUnboundAddLabelOffsetOpcode:
    537     case kUnboundMovLabelAddrOpcode:
    538     case kUnboundJumpTableEntryOpcode:
    539       return 0;  // no limit on reach
    540   }
    541 
    542   DCHECK(false);
    543   return 0;
    544 }
    545 
    546 
    547 void Assembler::bind_to(Label* L, int pos) {
    548   DCHECK(0 <= pos && pos <= pc_offset());  // must have a valid binding position
    549   int32_t trampoline_pos = kInvalidSlotPos;
    550   bool is_branch = false;
    551   while (L->is_linked()) {
    552     int fixup_pos = L->pos();
    553     int32_t offset = pos - fixup_pos;
    554     int maxReach = max_reach_from(fixup_pos);
    555     next(L);  // call next before overwriting link with target at fixup_pos
    556     if (maxReach && is_intn(offset, maxReach) == false) {
    557       if (trampoline_pos == kInvalidSlotPos) {
    558         trampoline_pos = get_trampoline_entry();
    559         CHECK(trampoline_pos != kInvalidSlotPos);
    560         target_at_put(trampoline_pos, pos);
    561       }
    562       target_at_put(fixup_pos, trampoline_pos);
    563     } else {
    564       target_at_put(fixup_pos, pos, &is_branch);
    565     }
    566   }
    567   L->bind_to(pos);
    568 
    569   if (!trampoline_emitted_ && is_branch) {
    570     UntrackBranch();
    571   }
    572 
    573   // Keep track of the last bound label so we don't eliminate any instructions
    574   // before a bound label.
    575   if (pos > last_bound_pos_) last_bound_pos_ = pos;
    576 }
    577 
    578 
    579 void Assembler::bind(Label* L) {
    580   DCHECK(!L->is_bound());  // label can only be bound once
    581   bind_to(L, pc_offset());
    582 }
    583 
    584 
    585 void Assembler::next(Label* L) {
    586   DCHECK(L->is_linked());
    587   int link = target_at(L->pos());
    588   if (link == kEndOfChain) {
    589     L->Unuse();
    590   } else {
    591     DCHECK(link >= 0);
    592     L->link_to(link);
    593   }
    594 }
    595 
    596 
    597 bool Assembler::is_near(Label* L, Condition cond) {
    598   DCHECK(L->is_bound());
    599   if (L->is_bound() == false) return false;
    600 
    601   int maxReach = ((cond == al) ? 26 : 16);
    602   int offset = L->pos() - pc_offset();
    603 
    604   return is_intn(offset, maxReach);
    605 }
    606 
    607 
    608 void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
    609                        DoubleRegister frb, RCBit r) {
    610   emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r);
    611 }
    612 
    613 
    614 void Assembler::d_form(Instr instr, Register rt, Register ra,
    615                        const intptr_t val, bool signed_disp) {
    616   if (signed_disp) {
    617     if (!is_int16(val)) {
    618       PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val);
    619     }
    620     CHECK(is_int16(val));
    621   } else {
    622     if (!is_uint16(val)) {
    623       PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR
    624              ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
    625              val, val, is_uint16(val), kImm16Mask);
    626     }
    627     CHECK(is_uint16(val));
    628   }
    629   emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
    630 }
    631 
    632 
    633 void Assembler::x_form(Instr instr, Register ra, Register rs, Register rb,
    634                        RCBit r) {
    635   emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | r);
    636 }
    637 
    638 
    639 void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
    640                         OEBit o, RCBit r) {
    641   emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
    642 }
    643 
    644 
    645 void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
    646                         int maskbit, RCBit r) {
    647   int sh0_4 = shift & 0x1f;
    648   int sh5 = (shift >> 5) & 0x1;
    649   int m0_4 = maskbit & 0x1f;
    650   int m5 = (maskbit >> 5) & 0x1;
    651 
    652   emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
    653        m5 * B5 | sh5 * B1 | r);
    654 }
    655 
    656 
    657 void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
    658                          int maskbit, RCBit r) {
    659   int m0_4 = maskbit & 0x1f;
    660   int m5 = (maskbit >> 5) & 0x1;
    661 
    662   emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
    663        m5 * B5 | r);
    664 }
    665 
    666 
    667 // Returns the next free trampoline entry.
    668 int32_t Assembler::get_trampoline_entry() {
    669   int32_t trampoline_entry = kInvalidSlotPos;
    670 
    671   if (!internal_trampoline_exception_) {
    672     trampoline_entry = trampoline_.take_slot();
    673 
    674     if (kInvalidSlotPos == trampoline_entry) {
    675       internal_trampoline_exception_ = true;
    676     }
    677   }
    678   return trampoline_entry;
    679 }
    680 
    681 
    682 int Assembler::link(Label* L) {
    683   int position;
    684   if (L->is_bound()) {
    685     position = L->pos();
    686   } else {
    687     if (L->is_linked()) {
    688       position = L->pos();  // L's link
    689     } else {
    690       // was: target_pos = kEndOfChain;
    691       // However, using self to mark the first reference
    692       // should avoid most instances of branch offset overflow.  See
    693       // target_at() for where this is converted back to kEndOfChain.
    694       position = pc_offset();
    695     }
    696     L->link_to(pc_offset());
    697   }
    698 
    699   return position;
    700 }
    701 
    702 
    703 // Branch instructions.
    704 
    705 
    706 void Assembler::bclr(BOfield bo, int condition_bit, LKBit lk) {
    707   emit(EXT1 | bo | condition_bit * B16 | BCLRX | lk);
    708 }
    709 
    710 
    711 void Assembler::bcctr(BOfield bo, int condition_bit, LKBit lk) {
    712   emit(EXT1 | bo | condition_bit * B16 | BCCTRX | lk);
    713 }
    714 
    715 
    716 // Pseudo op - branch to link register
    717 void Assembler::blr() { bclr(BA, 0, LeaveLK); }
    718 
    719 
    720 // Pseudo op - branch to count register -- used for "jump"
    721 void Assembler::bctr() { bcctr(BA, 0, LeaveLK); }
    722 
    723 
    724 void Assembler::bctrl() { bcctr(BA, 0, SetLK); }
    725 
    726 
    727 void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
    728   int imm16 = branch_offset;
    729   CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
    730   emit(BCX | bo | condition_bit * B16 | (imm16 & kImm16Mask) | lk);
    731 }
    732 
    733 
    734 void Assembler::b(int branch_offset, LKBit lk) {
    735   int imm26 = branch_offset;
    736   CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
    737   emit(BX | (imm26 & kImm26Mask) | lk);
    738 }
    739 
    740 
    741 void Assembler::xori(Register dst, Register src, const Operand& imm) {
    742   d_form(XORI, src, dst, imm.imm_, false);
    743 }
    744 
    745 
    746 void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
    747   d_form(XORIS, rs, ra, imm.imm_, false);
    748 }
    749 
    750 
    751 void Assembler::xor_(Register dst, Register src1, Register src2, RCBit rc) {
    752   x_form(EXT2 | XORX, dst, src1, src2, rc);
    753 }
    754 
    755 
    756 void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) {
    757   x_form(EXT2 | CNTLZWX, ra, rs, r0, rc);
    758 }
    759 
    760 
    761 void Assembler::popcntw(Register ra, Register rs) {
    762   emit(EXT2 | POPCNTW | rs.code() * B21 | ra.code() * B16);
    763 }
    764 
    765 
    766 void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) {
    767   x_form(EXT2 | ANDX, ra, rs, rb, rc);
    768 }
    769 
    770 
    771 void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
    772                        RCBit rc) {
    773   sh &= 0x1f;
    774   mb &= 0x1f;
    775   me &= 0x1f;
    776   emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
    777        me << 1 | rc);
    778 }
    779 
    780 
    781 void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
    782                       RCBit rc) {
    783   mb &= 0x1f;
    784   me &= 0x1f;
    785   emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
    786        me << 1 | rc);
    787 }
    788 
    789 
    790 void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
    791                        RCBit rc) {
    792   sh &= 0x1f;
    793   mb &= 0x1f;
    794   me &= 0x1f;
    795   emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
    796        me << 1 | rc);
    797 }
    798 
    799 
    800 void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
    801   DCHECK((32 > val.imm_) && (val.imm_ >= 0));
    802   rlwinm(dst, src, val.imm_, 0, 31 - val.imm_, rc);
    803 }
    804 
    805 
    806 void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
    807   DCHECK((32 > val.imm_) && (val.imm_ >= 0));
    808   rlwinm(dst, src, 32 - val.imm_, val.imm_, 31, rc);
    809 }
    810 
    811 
    812 void Assembler::clrrwi(Register dst, Register src, const Operand& val,
    813                        RCBit rc) {
    814   DCHECK((32 > val.imm_) && (val.imm_ >= 0));
    815   rlwinm(dst, src, 0, 0, 31 - val.imm_, rc);
    816 }
    817 
    818 
    819 void Assembler::clrlwi(Register dst, Register src, const Operand& val,
    820                        RCBit rc) {
    821   DCHECK((32 > val.imm_) && (val.imm_ >= 0));
    822   rlwinm(dst, src, 0, val.imm_, 31, rc);
    823 }
    824 
    825 
    826 void Assembler::srawi(Register ra, Register rs, int sh, RCBit r) {
    827   emit(EXT2 | SRAWIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | r);
    828 }
    829 
    830 
    831 void Assembler::srw(Register dst, Register src1, Register src2, RCBit r) {
    832   x_form(EXT2 | SRWX, dst, src1, src2, r);
    833 }
    834 
    835 
    836 void Assembler::slw(Register dst, Register src1, Register src2, RCBit r) {
    837   x_form(EXT2 | SLWX, dst, src1, src2, r);
    838 }
    839 
    840 
    841 void Assembler::sraw(Register ra, Register rs, Register rb, RCBit r) {
    842   x_form(EXT2 | SRAW, ra, rs, rb, r);
    843 }
    844 
    845 
    846 void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
    847   rlwnm(ra, rs, rb, 0, 31, r);
    848 }
    849 
    850 
    851 void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) {
    852   rlwinm(ra, rs, sh, 0, 31, r);
    853 }
    854 
    855 
    856 void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
    857   rlwinm(ra, rs, 32 - sh, 0, 31, r);
    858 }
    859 
    860 
    861 void Assembler::subi(Register dst, Register src, const Operand& imm) {
    862   addi(dst, src, Operand(-(imm.imm_)));
    863 }
    864 
    865 void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
    866                      RCBit r) {
    867   xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
    868 }
    869 
    870 void Assembler::adde(Register dst, Register src1, Register src2, OEBit o,
    871                      RCBit r) {
    872   xo_form(EXT2 | ADDEX, dst, src1, src2, o, r);
    873 }
    874 
    875 void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
    876   // a special xo_form
    877   emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r);
    878 }
    879 
    880 
    881 void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
    882                     RCBit r) {
    883   xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
    884 }
    885 
    886 void Assembler::subc(Register dst, Register src1, Register src2, OEBit o,
    887                      RCBit r) {
    888   xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
    889 }
    890 
    891 void Assembler::sube(Register dst, Register src1, Register src2, OEBit o,
    892                      RCBit r) {
    893   xo_form(EXT2 | SUBFEX, dst, src2, src1, o, r);
    894 }
    895 
    896 void Assembler::subfic(Register dst, Register src, const Operand& imm) {
    897   d_form(SUBFIC, dst, src, imm.imm_, true);
    898 }
    899 
    900 
    901 void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
    902                     RCBit r) {
    903   xo_form(EXT2 | ADDX, dst, src1, src2, o, r);
    904 }
    905 
    906 
    907 // Multiply low word
    908 void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
    909                       RCBit r) {
    910   xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
    911 }
    912 
    913 
    914 // Multiply hi word
    915 void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit r) {
    916   xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE, r);
    917 }
    918 
    919 
    920 // Multiply hi word unsigned
    921 void Assembler::mulhwu(Register dst, Register src1, Register src2, RCBit r) {
    922   xo_form(EXT2 | MULHWUX, dst, src1, src2, LeaveOE, r);
    923 }
    924 
    925 
    926 // Divide word
    927 void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
    928                      RCBit r) {
    929   xo_form(EXT2 | DIVW, dst, src1, src2, o, r);
    930 }
    931 
    932 
    933 // Divide word unsigned
    934 void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
    935                       RCBit r) {
    936   xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
    937 }
    938 
    939 
    940 void Assembler::addi(Register dst, Register src, const Operand& imm) {
    941   DCHECK(!src.is(r0));  // use li instead to show intent
    942   d_form(ADDI, dst, src, imm.imm_, true);
    943 }
    944 
    945 
    946 void Assembler::addis(Register dst, Register src, const Operand& imm) {
    947   DCHECK(!src.is(r0));  // use lis instead to show intent
    948   d_form(ADDIS, dst, src, imm.imm_, true);
    949 }
    950 
    951 
    952 void Assembler::addic(Register dst, Register src, const Operand& imm) {
    953   d_form(ADDIC, dst, src, imm.imm_, true);
    954 }
    955 
    956 
    957 void Assembler::andi(Register ra, Register rs, const Operand& imm) {
    958   d_form(ANDIx, rs, ra, imm.imm_, false);
    959 }
    960 
    961 
    962 void Assembler::andis(Register ra, Register rs, const Operand& imm) {
    963   d_form(ANDISx, rs, ra, imm.imm_, false);
    964 }
    965 
    966 
    967 void Assembler::nor(Register dst, Register src1, Register src2, RCBit r) {
    968   x_form(EXT2 | NORX, dst, src1, src2, r);
    969 }
    970 
    971 
    972 void Assembler::notx(Register dst, Register src, RCBit r) {
    973   x_form(EXT2 | NORX, dst, src, src, r);
    974 }
    975 
    976 
    977 void Assembler::ori(Register ra, Register rs, const Operand& imm) {
    978   d_form(ORI, rs, ra, imm.imm_, false);
    979 }
    980 
    981 
    982 void Assembler::oris(Register dst, Register src, const Operand& imm) {
    983   d_form(ORIS, src, dst, imm.imm_, false);
    984 }
    985 
    986 
    987 void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) {
    988   x_form(EXT2 | ORX, dst, src1, src2, rc);
    989 }
    990 
    991 
    992 void Assembler::orc(Register dst, Register src1, Register src2, RCBit rc) {
    993   x_form(EXT2 | ORC, dst, src1, src2, rc);
    994 }
    995 
    996 
    997 void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
    998   intptr_t imm16 = src2.imm_;
    999 #if V8_TARGET_ARCH_PPC64
   1000   int L = 1;
   1001 #else
   1002   int L = 0;
   1003 #endif
   1004   DCHECK(is_int16(imm16));
   1005   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1006   imm16 &= kImm16Mask;
   1007   emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
   1008 }
   1009 
   1010 
   1011 void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
   1012   uintptr_t uimm16 = src2.imm_;
   1013 #if V8_TARGET_ARCH_PPC64
   1014   int L = 1;
   1015 #else
   1016   int L = 0;
   1017 #endif
   1018   DCHECK(is_uint16(uimm16));
   1019   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1020   uimm16 &= kImm16Mask;
   1021   emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
   1022 }
   1023 
   1024 
   1025 void Assembler::cmp(Register src1, Register src2, CRegister cr) {
   1026 #if V8_TARGET_ARCH_PPC64
   1027   int L = 1;
   1028 #else
   1029   int L = 0;
   1030 #endif
   1031   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1032   emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
   1033        src2.code() * B11);
   1034 }
   1035 
   1036 
   1037 void Assembler::cmpl(Register src1, Register src2, CRegister cr) {
   1038 #if V8_TARGET_ARCH_PPC64
   1039   int L = 1;
   1040 #else
   1041   int L = 0;
   1042 #endif
   1043   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1044   emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
   1045        src2.code() * B11);
   1046 }
   1047 
   1048 
   1049 void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
   1050   intptr_t imm16 = src2.imm_;
   1051   int L = 0;
   1052   int pos = pc_offset();
   1053   DCHECK(is_int16(imm16));
   1054   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1055   imm16 &= kImm16Mask;
   1056 
   1057   // For cmpwi against 0, save postition and cr for later examination
   1058   // of potential optimization.
   1059   if (imm16 == 0 && pos > 0 && last_bound_pos_ != pos) {
   1060     optimizable_cmpi_pos_ = pos;
   1061     cmpi_cr_ = cr;
   1062   }
   1063   emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
   1064 }
   1065 
   1066 
   1067 void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
   1068   uintptr_t uimm16 = src2.imm_;
   1069   int L = 0;
   1070   DCHECK(is_uint16(uimm16));
   1071   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1072   uimm16 &= kImm16Mask;
   1073   emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
   1074 }
   1075 
   1076 
   1077 void Assembler::cmpw(Register src1, Register src2, CRegister cr) {
   1078   int L = 0;
   1079   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1080   emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
   1081        src2.code() * B11);
   1082 }
   1083 
   1084 
   1085 void Assembler::cmplw(Register src1, Register src2, CRegister cr) {
   1086   int L = 0;
   1087   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1088   emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
   1089        src2.code() * B11);
   1090 }
   1091 
   1092 
   1093 void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
   1094   emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1095        cb * B6);
   1096 }
   1097 
   1098 
   1099 // Pseudo op - load immediate
   1100 void Assembler::li(Register dst, const Operand& imm) {
   1101   d_form(ADDI, dst, r0, imm.imm_, true);
   1102 }
   1103 
   1104 
   1105 void Assembler::lis(Register dst, const Operand& imm) {
   1106   d_form(ADDIS, dst, r0, imm.imm_, true);
   1107 }
   1108 
   1109 
   1110 // Pseudo op - move register
   1111 void Assembler::mr(Register dst, Register src) {
   1112   // actually or(dst, src, src)
   1113   orx(dst, src, src);
   1114 }
   1115 
   1116 
   1117 void Assembler::lbz(Register dst, const MemOperand& src) {
   1118   DCHECK(!src.ra_.is(r0));
   1119   d_form(LBZ, dst, src.ra(), src.offset(), true);
   1120 }
   1121 
   1122 
   1123 void Assembler::lbzx(Register rt, const MemOperand& src) {
   1124   Register ra = src.ra();
   1125   Register rb = src.rb();
   1126   DCHECK(!ra.is(r0));
   1127   emit(EXT2 | LBZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1128        LeaveRC);
   1129 }
   1130 
   1131 
   1132 void Assembler::lbzux(Register rt, const MemOperand& src) {
   1133   Register ra = src.ra();
   1134   Register rb = src.rb();
   1135   DCHECK(!ra.is(r0));
   1136   emit(EXT2 | LBZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1137        LeaveRC);
   1138 }
   1139 
   1140 
   1141 void Assembler::lhz(Register dst, const MemOperand& src) {
   1142   DCHECK(!src.ra_.is(r0));
   1143   d_form(LHZ, dst, src.ra(), src.offset(), true);
   1144 }
   1145 
   1146 
   1147 void Assembler::lhzx(Register rt, const MemOperand& src) {
   1148   Register ra = src.ra();
   1149   Register rb = src.rb();
   1150   DCHECK(!ra.is(r0));
   1151   emit(EXT2 | LHZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1152        LeaveRC);
   1153 }
   1154 
   1155 
   1156 void Assembler::lhzux(Register rt, const MemOperand& src) {
   1157   Register ra = src.ra();
   1158   Register rb = src.rb();
   1159   DCHECK(!ra.is(r0));
   1160   emit(EXT2 | LHZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1161        LeaveRC);
   1162 }
   1163 
   1164 
   1165 void Assembler::lhax(Register rt, const MemOperand& src) {
   1166   Register ra = src.ra();
   1167   Register rb = src.rb();
   1168   DCHECK(!ra.is(r0));
   1169   emit(EXT2 | LHAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1170 }
   1171 
   1172 
   1173 void Assembler::lwz(Register dst, const MemOperand& src) {
   1174   DCHECK(!src.ra_.is(r0));
   1175   d_form(LWZ, dst, src.ra(), src.offset(), true);
   1176 }
   1177 
   1178 
   1179 void Assembler::lwzu(Register dst, const MemOperand& src) {
   1180   DCHECK(!src.ra_.is(r0));
   1181   d_form(LWZU, dst, src.ra(), src.offset(), true);
   1182 }
   1183 
   1184 
   1185 void Assembler::lwzx(Register rt, const MemOperand& src) {
   1186   Register ra = src.ra();
   1187   Register rb = src.rb();
   1188   DCHECK(!ra.is(r0));
   1189   emit(EXT2 | LWZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1190        LeaveRC);
   1191 }
   1192 
   1193 
   1194 void Assembler::lwzux(Register rt, const MemOperand& src) {
   1195   Register ra = src.ra();
   1196   Register rb = src.rb();
   1197   DCHECK(!ra.is(r0));
   1198   emit(EXT2 | LWZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1199        LeaveRC);
   1200 }
   1201 
   1202 
   1203 void Assembler::lha(Register dst, const MemOperand& src) {
   1204   DCHECK(!src.ra_.is(r0));
   1205   d_form(LHA, dst, src.ra(), src.offset(), true);
   1206 }
   1207 
   1208 
   1209 void Assembler::lwa(Register dst, const MemOperand& src) {
   1210 #if V8_TARGET_ARCH_PPC64
   1211   int offset = src.offset();
   1212   DCHECK(!src.ra_.is(r0));
   1213   CHECK(!(offset & 3) && is_int16(offset));
   1214   offset = kImm16Mask & offset;
   1215   emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
   1216 #else
   1217   lwz(dst, src);
   1218 #endif
   1219 }
   1220 
   1221 
   1222 void Assembler::lwax(Register rt, const MemOperand& src) {
   1223 #if V8_TARGET_ARCH_PPC64
   1224   Register ra = src.ra();
   1225   Register rb = src.rb();
   1226   DCHECK(!ra.is(r0));
   1227   emit(EXT2 | LWAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1228 #else
   1229   lwzx(rt, src);
   1230 #endif
   1231 }
   1232 
   1233 
   1234 void Assembler::ldbrx(Register dst, const MemOperand& src) {
   1235   x_form(EXT2 | LDBRX, src.ra(), dst, src.rb(), LeaveRC);
   1236 }
   1237 
   1238 
   1239 void Assembler::lwbrx(Register dst, const MemOperand& src) {
   1240   x_form(EXT2 | LWBRX, src.ra(), dst, src.rb(), LeaveRC);
   1241 }
   1242 
   1243 
   1244 void Assembler::lhbrx(Register dst, const MemOperand& src) {
   1245   x_form(EXT2 | LHBRX, src.ra(), dst, src.rb(), LeaveRC);
   1246 }
   1247 
   1248 
   1249 void Assembler::stb(Register dst, const MemOperand& src) {
   1250   DCHECK(!src.ra_.is(r0));
   1251   d_form(STB, dst, src.ra(), src.offset(), true);
   1252 }
   1253 
   1254 
   1255 void Assembler::stbx(Register rs, const MemOperand& src) {
   1256   Register ra = src.ra();
   1257   Register rb = src.rb();
   1258   DCHECK(!ra.is(r0));
   1259   emit(EXT2 | STBX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1260        LeaveRC);
   1261 }
   1262 
   1263 
   1264 void Assembler::stbux(Register rs, const MemOperand& src) {
   1265   Register ra = src.ra();
   1266   Register rb = src.rb();
   1267   DCHECK(!ra.is(r0));
   1268   emit(EXT2 | STBUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1269        LeaveRC);
   1270 }
   1271 
   1272 
   1273 void Assembler::sth(Register dst, const MemOperand& src) {
   1274   DCHECK(!src.ra_.is(r0));
   1275   d_form(STH, dst, src.ra(), src.offset(), true);
   1276 }
   1277 
   1278 
   1279 void Assembler::sthx(Register rs, const MemOperand& src) {
   1280   Register ra = src.ra();
   1281   Register rb = src.rb();
   1282   DCHECK(!ra.is(r0));
   1283   emit(EXT2 | STHX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1284        LeaveRC);
   1285 }
   1286 
   1287 
   1288 void Assembler::sthux(Register rs, const MemOperand& src) {
   1289   Register ra = src.ra();
   1290   Register rb = src.rb();
   1291   DCHECK(!ra.is(r0));
   1292   emit(EXT2 | STHUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1293        LeaveRC);
   1294 }
   1295 
   1296 
   1297 void Assembler::stw(Register dst, const MemOperand& src) {
   1298   DCHECK(!src.ra_.is(r0));
   1299   d_form(STW, dst, src.ra(), src.offset(), true);
   1300 }
   1301 
   1302 
   1303 void Assembler::stwu(Register dst, const MemOperand& src) {
   1304   DCHECK(!src.ra_.is(r0));
   1305   d_form(STWU, dst, src.ra(), src.offset(), true);
   1306 }
   1307 
   1308 
   1309 void Assembler::stwx(Register rs, const MemOperand& src) {
   1310   Register ra = src.ra();
   1311   Register rb = src.rb();
   1312   DCHECK(!ra.is(r0));
   1313   emit(EXT2 | STWX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1314        LeaveRC);
   1315 }
   1316 
   1317 
   1318 void Assembler::stwux(Register rs, const MemOperand& src) {
   1319   Register ra = src.ra();
   1320   Register rb = src.rb();
   1321   DCHECK(!ra.is(r0));
   1322   emit(EXT2 | STWUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1323        LeaveRC);
   1324 }
   1325 
   1326 
   1327 void Assembler::extsb(Register rs, Register ra, RCBit rc) {
   1328   emit(EXT2 | EXTSB | ra.code() * B21 | rs.code() * B16 | rc);
   1329 }
   1330 
   1331 
   1332 void Assembler::extsh(Register rs, Register ra, RCBit rc) {
   1333   emit(EXT2 | EXTSH | ra.code() * B21 | rs.code() * B16 | rc);
   1334 }
   1335 
   1336 
   1337 void Assembler::extsw(Register rs, Register ra, RCBit rc) {
   1338 #if V8_TARGET_ARCH_PPC64
   1339   emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
   1340 #else
   1341   // nop on 32-bit
   1342   DCHECK(rs.is(ra) && rc == LeaveRC);
   1343 #endif
   1344 }
   1345 
   1346 
   1347 void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
   1348   emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
   1349 }
   1350 
   1351 
   1352 void Assembler::andc(Register dst, Register src1, Register src2, RCBit rc) {
   1353   x_form(EXT2 | ANDCX, dst, src1, src2, rc);
   1354 }
   1355 
   1356 
   1357 #if V8_TARGET_ARCH_PPC64
   1358 // 64bit specific instructions
   1359 void Assembler::ld(Register rd, const MemOperand& src) {
   1360   int offset = src.offset();
   1361   DCHECK(!src.ra_.is(r0));
   1362   CHECK(!(offset & 3) && is_int16(offset));
   1363   offset = kImm16Mask & offset;
   1364   emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
   1365 }
   1366 
   1367 
   1368 void Assembler::ldx(Register rd, const MemOperand& src) {
   1369   Register ra = src.ra();
   1370   Register rb = src.rb();
   1371   DCHECK(!ra.is(r0));
   1372   emit(EXT2 | LDX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1373 }
   1374 
   1375 
   1376 void Assembler::ldu(Register rd, const MemOperand& src) {
   1377   int offset = src.offset();
   1378   DCHECK(!src.ra_.is(r0));
   1379   CHECK(!(offset & 3) && is_int16(offset));
   1380   offset = kImm16Mask & offset;
   1381   emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
   1382 }
   1383 
   1384 
   1385 void Assembler::ldux(Register rd, const MemOperand& src) {
   1386   Register ra = src.ra();
   1387   Register rb = src.rb();
   1388   DCHECK(!ra.is(r0));
   1389   emit(EXT2 | LDUX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1390 }
   1391 
   1392 
   1393 void Assembler::std(Register rs, const MemOperand& src) {
   1394   int offset = src.offset();
   1395   DCHECK(!src.ra_.is(r0));
   1396   CHECK(!(offset & 3) && is_int16(offset));
   1397   offset = kImm16Mask & offset;
   1398   emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
   1399 }
   1400 
   1401 
   1402 void Assembler::stdx(Register rs, const MemOperand& src) {
   1403   Register ra = src.ra();
   1404   Register rb = src.rb();
   1405   DCHECK(!ra.is(r0));
   1406   emit(EXT2 | STDX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1407 }
   1408 
   1409 
   1410 void Assembler::stdu(Register rs, const MemOperand& src) {
   1411   int offset = src.offset();
   1412   DCHECK(!src.ra_.is(r0));
   1413   CHECK(!(offset & 3) && is_int16(offset));
   1414   offset = kImm16Mask & offset;
   1415   emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
   1416 }
   1417 
   1418 
   1419 void Assembler::stdux(Register rs, const MemOperand& src) {
   1420   Register ra = src.ra();
   1421   Register rb = src.rb();
   1422   DCHECK(!ra.is(r0));
   1423   emit(EXT2 | STDUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1424 }
   1425 
   1426 
   1427 void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) {
   1428   md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
   1429 }
   1430 
   1431 
   1432 void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) {
   1433   md_form(EXT5 | RLDICL, ra, rs, sh, mb, r);
   1434 }
   1435 
   1436 
   1437 void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) {
   1438   mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r);
   1439 }
   1440 
   1441 
   1442 void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
   1443   md_form(EXT5 | RLDICR, ra, rs, sh, me, r);
   1444 }
   1445 
   1446 
   1447 void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
   1448   DCHECK((64 > val.imm_) && (val.imm_ >= 0));
   1449   rldicr(dst, src, val.imm_, 63 - val.imm_, rc);
   1450 }
   1451 
   1452 
   1453 void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
   1454   DCHECK((64 > val.imm_) && (val.imm_ >= 0));
   1455   rldicl(dst, src, 64 - val.imm_, val.imm_, rc);
   1456 }
   1457 
   1458 
   1459 void Assembler::clrrdi(Register dst, Register src, const Operand& val,
   1460                        RCBit rc) {
   1461   DCHECK((64 > val.imm_) && (val.imm_ >= 0));
   1462   rldicr(dst, src, 0, 63 - val.imm_, rc);
   1463 }
   1464 
   1465 
   1466 void Assembler::clrldi(Register dst, Register src, const Operand& val,
   1467                        RCBit rc) {
   1468   DCHECK((64 > val.imm_) && (val.imm_ >= 0));
   1469   rldicl(dst, src, 0, val.imm_, rc);
   1470 }
   1471 
   1472 
   1473 void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
   1474   md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r);
   1475 }
   1476 
   1477 
   1478 void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
   1479   int sh0_4 = sh & 0x1f;
   1480   int sh5 = (sh >> 5) & 0x1;
   1481 
   1482   emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
   1483        sh5 * B1 | r);
   1484 }
   1485 
   1486 
   1487 void Assembler::srd(Register dst, Register src1, Register src2, RCBit r) {
   1488   x_form(EXT2 | SRDX, dst, src1, src2, r);
   1489 }
   1490 
   1491 
   1492 void Assembler::sld(Register dst, Register src1, Register src2, RCBit r) {
   1493   x_form(EXT2 | SLDX, dst, src1, src2, r);
   1494 }
   1495 
   1496 
   1497 void Assembler::srad(Register ra, Register rs, Register rb, RCBit r) {
   1498   x_form(EXT2 | SRAD, ra, rs, rb, r);
   1499 }
   1500 
   1501 
   1502 void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
   1503   rldcl(ra, rs, rb, 0, r);
   1504 }
   1505 
   1506 
   1507 void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) {
   1508   rldicl(ra, rs, sh, 0, r);
   1509 }
   1510 
   1511 
   1512 void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) {
   1513   rldicl(ra, rs, 64 - sh, 0, r);
   1514 }
   1515 
   1516 
   1517 void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) {
   1518   x_form(EXT2 | CNTLZDX, ra, rs, r0, rc);
   1519 }
   1520 
   1521 
   1522 void Assembler::popcntd(Register ra, Register rs) {
   1523   emit(EXT2 | POPCNTD | rs.code() * B21 | ra.code() * B16);
   1524 }
   1525 
   1526 
   1527 void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
   1528                       RCBit r) {
   1529   xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
   1530 }
   1531 
   1532 
   1533 void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
   1534                      RCBit r) {
   1535   xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
   1536 }
   1537 
   1538 
   1539 void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
   1540                       RCBit r) {
   1541   xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
   1542 }
   1543 #endif
   1544 
   1545 
   1546 // Function descriptor for AIX.
   1547 // Code address skips the function descriptor "header".
   1548 // TOC and static chain are ignored and set to 0.
   1549 void Assembler::function_descriptor() {
   1550   if (ABI_USES_FUNCTION_DESCRIPTORS) {
   1551     Label instructions;
   1552     DCHECK(pc_offset() == 0);
   1553     emit_label_addr(&instructions);
   1554     dp(0);
   1555     dp(0);
   1556     bind(&instructions);
   1557   }
   1558 }
   1559 
   1560 
   1561 int Assembler::instructions_required_for_mov(Register dst,
   1562                                              const Operand& src) const {
   1563   bool canOptimize =
   1564       !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
   1565   if (use_constant_pool_for_mov(dst, src, canOptimize)) {
   1566     if (ConstantPoolAccessIsInOverflow()) {
   1567       return kMovInstructionsConstantPool + 1;
   1568     }
   1569     return kMovInstructionsConstantPool;
   1570   }
   1571   DCHECK(!canOptimize);
   1572   return kMovInstructionsNoConstantPool;
   1573 }
   1574 
   1575 
   1576 bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
   1577                                           bool canOptimize) const {
   1578   if (!FLAG_enable_embedded_constant_pool || !is_constant_pool_available()) {
   1579     // If there is no constant pool available, we must use a mov
   1580     // immediate sequence.
   1581     return false;
   1582   }
   1583 
   1584   intptr_t value = src.immediate();
   1585 #if V8_TARGET_ARCH_PPC64
   1586   bool allowOverflow = !((canOptimize && is_int32(value)) || dst.is(r0));
   1587 #else
   1588   bool allowOverflow = !(canOptimize || dst.is(r0));
   1589 #endif
   1590   if (canOptimize && is_int16(value)) {
   1591     // Prefer a single-instruction load-immediate.
   1592     return false;
   1593   }
   1594   if (!allowOverflow && ConstantPoolAccessIsInOverflow()) {
   1595     // Prefer non-relocatable two-instruction bitwise-mov32 over
   1596     // overflow sequence.
   1597     return false;
   1598   }
   1599 
   1600   return true;
   1601 }
   1602 
   1603 
   1604 void Assembler::EnsureSpaceFor(int space_needed) {
   1605   if (buffer_space() <= (kGap + space_needed)) {
   1606     GrowBuffer(space_needed);
   1607   }
   1608 }
   1609 
   1610 
   1611 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
   1612   if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
   1613     if (assembler != NULL && assembler->predictable_code_size()) return true;
   1614     return assembler->serializer_enabled();
   1615   } else if (RelocInfo::IsNone(rmode_)) {
   1616     return false;
   1617   }
   1618   return true;
   1619 }
   1620 
   1621 
   1622 // Primarily used for loading constants
   1623 // This should really move to be in macro-assembler as it
   1624 // is really a pseudo instruction
   1625 // Some usages of this intend for a FIXED_SEQUENCE to be used
   1626 // Todo - break this dependency so we can optimize mov() in general
   1627 // and only use the generic version when we require a fixed sequence
   1628 void Assembler::mov(Register dst, const Operand& src) {
   1629   intptr_t value = src.immediate();
   1630   bool relocatable = src.must_output_reloc_info(this);
   1631   bool canOptimize;
   1632 
   1633   canOptimize =
   1634       !(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
   1635 
   1636   if (use_constant_pool_for_mov(dst, src, canOptimize)) {
   1637     DCHECK(is_constant_pool_available());
   1638     if (relocatable) {
   1639       RecordRelocInfo(src.rmode_);
   1640     }
   1641     ConstantPoolEntry::Access access = ConstantPoolAddEntry(src.rmode_, value);
   1642 #if V8_TARGET_ARCH_PPC64
   1643     if (access == ConstantPoolEntry::OVERFLOWED) {
   1644       addis(dst, kConstantPoolRegister, Operand::Zero());
   1645       ld(dst, MemOperand(dst, 0));
   1646     } else {
   1647       ld(dst, MemOperand(kConstantPoolRegister, 0));
   1648     }
   1649 #else
   1650     if (access == ConstantPoolEntry::OVERFLOWED) {
   1651       addis(dst, kConstantPoolRegister, Operand::Zero());
   1652       lwz(dst, MemOperand(dst, 0));
   1653     } else {
   1654       lwz(dst, MemOperand(kConstantPoolRegister, 0));
   1655     }
   1656 #endif
   1657     return;
   1658   }
   1659 
   1660   if (canOptimize) {
   1661     if (is_int16(value)) {
   1662       li(dst, Operand(value));
   1663     } else {
   1664       uint16_t u16;
   1665 #if V8_TARGET_ARCH_PPC64
   1666       if (is_int32(value)) {
   1667 #endif
   1668         lis(dst, Operand(value >> 16));
   1669 #if V8_TARGET_ARCH_PPC64
   1670       } else {
   1671         if (is_int48(value)) {
   1672           li(dst, Operand(value >> 32));
   1673         } else {
   1674           lis(dst, Operand(value >> 48));
   1675           u16 = ((value >> 32) & 0xffff);
   1676           if (u16) {
   1677             ori(dst, dst, Operand(u16));
   1678           }
   1679         }
   1680         sldi(dst, dst, Operand(32));
   1681         u16 = ((value >> 16) & 0xffff);
   1682         if (u16) {
   1683           oris(dst, dst, Operand(u16));
   1684         }
   1685       }
   1686 #endif
   1687       u16 = (value & 0xffff);
   1688       if (u16) {
   1689         ori(dst, dst, Operand(u16));
   1690       }
   1691     }
   1692     return;
   1693   }
   1694 
   1695   DCHECK(!canOptimize);
   1696   if (relocatable) {
   1697     RecordRelocInfo(src.rmode_);
   1698   }
   1699   bitwise_mov(dst, value);
   1700 }
   1701 
   1702 
   1703 void Assembler::bitwise_mov(Register dst, intptr_t value) {
   1704     BlockTrampolinePoolScope block_trampoline_pool(this);
   1705 #if V8_TARGET_ARCH_PPC64
   1706     int32_t hi_32 = static_cast<int32_t>(value >> 32);
   1707     int32_t lo_32 = static_cast<int32_t>(value);
   1708     int hi_word = static_cast<int>(hi_32 >> 16);
   1709     int lo_word = static_cast<int>(hi_32 & 0xffff);
   1710     lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
   1711     ori(dst, dst, Operand(lo_word));
   1712     sldi(dst, dst, Operand(32));
   1713     hi_word = static_cast<int>(((lo_32 >> 16) & 0xffff));
   1714     lo_word = static_cast<int>(lo_32 & 0xffff);
   1715     oris(dst, dst, Operand(hi_word));
   1716     ori(dst, dst, Operand(lo_word));
   1717 #else
   1718     int hi_word = static_cast<int>(value >> 16);
   1719     int lo_word = static_cast<int>(value & 0xffff);
   1720     lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
   1721     ori(dst, dst, Operand(lo_word));
   1722 #endif
   1723 }
   1724 
   1725 
   1726 void Assembler::bitwise_mov32(Register dst, int32_t value) {
   1727   BlockTrampolinePoolScope block_trampoline_pool(this);
   1728   int hi_word = static_cast<int>(value >> 16);
   1729   int lo_word = static_cast<int>(value & 0xffff);
   1730   lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
   1731   ori(dst, dst, Operand(lo_word));
   1732 }
   1733 
   1734 
   1735 void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
   1736   BlockTrampolinePoolScope block_trampoline_pool(this);
   1737   if (is_int16(value)) {
   1738     addi(dst, src, Operand(value));
   1739     nop();
   1740   } else {
   1741     int hi_word = static_cast<int>(value >> 16);
   1742     int lo_word = static_cast<int>(value & 0xffff);
   1743     if (lo_word & 0x8000) hi_word++;
   1744     addis(dst, src, Operand(SIGN_EXT_IMM16(hi_word)));
   1745     addic(dst, dst, Operand(SIGN_EXT_IMM16(lo_word)));
   1746   }
   1747 }
   1748 
   1749 
   1750 void Assembler::mov_label_offset(Register dst, Label* label) {
   1751   int position = link(label);
   1752   if (label->is_bound()) {
   1753     // Load the position of the label relative to the generated code object.
   1754     mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag));
   1755   } else {
   1756     // Encode internal reference to unbound label. We use a dummy opcode
   1757     // such that it won't collide with any opcode that might appear in the
   1758     // label's chain.  Encode the destination register in the 2nd instruction.
   1759     int link = position - pc_offset();
   1760     DCHECK_EQ(0, link & 3);
   1761     link >>= 2;
   1762     DCHECK(is_int26(link));
   1763 
   1764     // When the label is bound, these instructions will be patched
   1765     // with a 2 instruction mov sequence that will load the
   1766     // destination register with the position of the label from the
   1767     // beginning of the code.
   1768     //
   1769     // target_at extracts the link and target_at_put patches the instructions.
   1770     BlockTrampolinePoolScope block_trampoline_pool(this);
   1771     emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask));
   1772     emit(dst.code());
   1773   }
   1774 }
   1775 
   1776 
   1777 void Assembler::add_label_offset(Register dst, Register base, Label* label,
   1778                                  int delta) {
   1779   int position = link(label);
   1780   if (label->is_bound()) {
   1781     // dst = base + position + delta
   1782     position += delta;
   1783     bitwise_add32(dst, base, position);
   1784   } else {
   1785     // Encode internal reference to unbound label. We use a dummy opcode
   1786     // such that it won't collide with any opcode that might appear in the
   1787     // label's chain.  Encode the operands in the 2nd instruction.
   1788     int link = position - pc_offset();
   1789     DCHECK_EQ(0, link & 3);
   1790     link >>= 2;
   1791     DCHECK(is_int26(link));
   1792     DCHECK(is_int16(delta));
   1793 
   1794     BlockTrampolinePoolScope block_trampoline_pool(this);
   1795     emit(kUnboundAddLabelOffsetOpcode | (link & kImm26Mask));
   1796     emit(dst.code() * B21 | base.code() * B16 | (delta & kImm16Mask));
   1797   }
   1798 }
   1799 
   1800 
   1801 void Assembler::mov_label_addr(Register dst, Label* label) {
   1802   CheckBuffer();
   1803   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
   1804   int position = link(label);
   1805   if (label->is_bound()) {
   1806     // Keep internal references relative until EmitRelocations.
   1807     bitwise_mov(dst, position);
   1808   } else {
   1809     // Encode internal reference to unbound label. We use a dummy opcode
   1810     // such that it won't collide with any opcode that might appear in the
   1811     // label's chain.  Encode the destination register in the 2nd instruction.
   1812     int link = position - pc_offset();
   1813     DCHECK_EQ(0, link & 3);
   1814     link >>= 2;
   1815     DCHECK(is_int26(link));
   1816 
   1817     // When the label is bound, these instructions will be patched
   1818     // with a multi-instruction mov sequence that will load the
   1819     // destination register with the address of the label.
   1820     //
   1821     // target_at extracts the link and target_at_put patches the instructions.
   1822     BlockTrampolinePoolScope block_trampoline_pool(this);
   1823     emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
   1824     emit(dst.code());
   1825     DCHECK(kMovInstructionsNoConstantPool >= 2);
   1826     for (int i = 0; i < kMovInstructionsNoConstantPool - 2; i++) nop();
   1827   }
   1828 }
   1829 
   1830 
   1831 void Assembler::emit_label_addr(Label* label) {
   1832   CheckBuffer();
   1833   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
   1834   int position = link(label);
   1835   if (label->is_bound()) {
   1836     // Keep internal references relative until EmitRelocations.
   1837     dp(position);
   1838   } else {
   1839     // Encode internal reference to unbound label. We use a dummy opcode
   1840     // such that it won't collide with any opcode that might appear in the
   1841     // label's chain.
   1842     int link = position - pc_offset();
   1843     DCHECK_EQ(0, link & 3);
   1844     link >>= 2;
   1845     DCHECK(is_int26(link));
   1846 
   1847     // When the label is bound, the instruction(s) will be patched
   1848     // as a jump table entry containing the label address.  target_at extracts
   1849     // the link and target_at_put patches the instruction(s).
   1850     BlockTrampolinePoolScope block_trampoline_pool(this);
   1851     emit(kUnboundJumpTableEntryOpcode | (link & kImm26Mask));
   1852 #if V8_TARGET_ARCH_PPC64
   1853     nop();
   1854 #endif
   1855   }
   1856 }
   1857 
   1858 
   1859 // Special register instructions
   1860 void Assembler::crxor(int bt, int ba, int bb) {
   1861   emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
   1862 }
   1863 
   1864 
   1865 void Assembler::creqv(int bt, int ba, int bb) {
   1866   emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
   1867 }
   1868 
   1869 
   1870 void Assembler::mflr(Register dst) {
   1871   emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11);  // Ignore RC bit
   1872 }
   1873 
   1874 
   1875 void Assembler::mtlr(Register src) {
   1876   emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11);  // Ignore RC bit
   1877 }
   1878 
   1879 
   1880 void Assembler::mtctr(Register src) {
   1881   emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11);  // Ignore RC bit
   1882 }
   1883 
   1884 
   1885 void Assembler::mtxer(Register src) {
   1886   emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
   1887 }
   1888 
   1889 
   1890 void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
   1891   DCHECK(static_cast<int>(bit) < 32);
   1892   int bf = cr.code();
   1893   int bfa = bit / CRWIDTH;
   1894   emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
   1895 }
   1896 
   1897 
   1898 void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
   1899 
   1900 
   1901 #if V8_TARGET_ARCH_PPC64
   1902 void Assembler::mffprd(Register dst, DoubleRegister src) {
   1903   emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
   1904 }
   1905 
   1906 
   1907 void Assembler::mffprwz(Register dst, DoubleRegister src) {
   1908   emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
   1909 }
   1910 
   1911 
   1912 void Assembler::mtfprd(DoubleRegister dst, Register src) {
   1913   emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
   1914 }
   1915 
   1916 
   1917 void Assembler::mtfprwz(DoubleRegister dst, Register src) {
   1918   emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
   1919 }
   1920 
   1921 
   1922 void Assembler::mtfprwa(DoubleRegister dst, Register src) {
   1923   emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
   1924 }
   1925 #endif
   1926 
   1927 
   1928 // Exception-generating instructions and debugging support.
   1929 // Stops with a non-negative code less than kNumOfWatchedStops support
   1930 // enabling/disabling and a counter feature. See simulator-ppc.h .
   1931 void Assembler::stop(const char* msg, Condition cond, int32_t code,
   1932                      CRegister cr) {
   1933   if (cond != al) {
   1934     Label skip;
   1935     b(NegateCondition(cond), &skip, cr);
   1936     bkpt(0);
   1937     bind(&skip);
   1938   } else {
   1939     bkpt(0);
   1940   }
   1941 }
   1942 
   1943 
   1944 void Assembler::bkpt(uint32_t imm16) { emit(0x7d821008); }
   1945 
   1946 
   1947 void Assembler::dcbf(Register ra, Register rb) {
   1948   emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
   1949 }
   1950 
   1951 
   1952 void Assembler::sync() { emit(EXT2 | SYNC); }
   1953 
   1954 
   1955 void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); }
   1956 
   1957 
   1958 void Assembler::icbi(Register ra, Register rb) {
   1959   emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
   1960 }
   1961 
   1962 
   1963 void Assembler::isync() { emit(EXT1 | ISYNC); }
   1964 
   1965 
   1966 // Floating point support
   1967 
   1968 void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
   1969   int offset = src.offset();
   1970   Register ra = src.ra();
   1971   DCHECK(!ra.is(r0));
   1972   CHECK(is_int16(offset));
   1973   int imm16 = offset & kImm16Mask;
   1974   // could be x_form instruction with some casting magic
   1975   emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
   1976 }
   1977 
   1978 
   1979 void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
   1980   int offset = src.offset();
   1981   Register ra = src.ra();
   1982   DCHECK(!ra.is(r0));
   1983   CHECK(is_int16(offset));
   1984   int imm16 = offset & kImm16Mask;
   1985   // could be x_form instruction with some casting magic
   1986   emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
   1987 }
   1988 
   1989 
   1990 void Assembler::lfdx(const DoubleRegister frt, const MemOperand& src) {
   1991   Register ra = src.ra();
   1992   Register rb = src.rb();
   1993   DCHECK(!ra.is(r0));
   1994   emit(EXT2 | LFDX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1995        LeaveRC);
   1996 }
   1997 
   1998 
   1999 void Assembler::lfdux(const DoubleRegister frt, const MemOperand& src) {
   2000   Register ra = src.ra();
   2001   Register rb = src.rb();
   2002   DCHECK(!ra.is(r0));
   2003   emit(EXT2 | LFDUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2004        LeaveRC);
   2005 }
   2006 
   2007 
   2008 void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
   2009   int offset = src.offset();
   2010   Register ra = src.ra();
   2011   CHECK(is_int16(offset));
   2012   DCHECK(!ra.is(r0));
   2013   int imm16 = offset & kImm16Mask;
   2014   // could be x_form instruction with some casting magic
   2015   emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
   2016 }
   2017 
   2018 
   2019 void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
   2020   int offset = src.offset();
   2021   Register ra = src.ra();
   2022   CHECK(is_int16(offset));
   2023   DCHECK(!ra.is(r0));
   2024   int imm16 = offset & kImm16Mask;
   2025   // could be x_form instruction with some casting magic
   2026   emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
   2027 }
   2028 
   2029 
   2030 void Assembler::lfsx(const DoubleRegister frt, const MemOperand& src) {
   2031   Register ra = src.ra();
   2032   Register rb = src.rb();
   2033   DCHECK(!ra.is(r0));
   2034   emit(EXT2 | LFSX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2035        LeaveRC);
   2036 }
   2037 
   2038 
   2039 void Assembler::lfsux(const DoubleRegister frt, const MemOperand& src) {
   2040   Register ra = src.ra();
   2041   Register rb = src.rb();
   2042   DCHECK(!ra.is(r0));
   2043   emit(EXT2 | LFSUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2044        LeaveRC);
   2045 }
   2046 
   2047 
   2048 void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
   2049   int offset = src.offset();
   2050   Register ra = src.ra();
   2051   CHECK(is_int16(offset));
   2052   DCHECK(!ra.is(r0));
   2053   int imm16 = offset & kImm16Mask;
   2054   // could be x_form instruction with some casting magic
   2055   emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
   2056 }
   2057 
   2058 
   2059 void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
   2060   int offset = src.offset();
   2061   Register ra = src.ra();
   2062   CHECK(is_int16(offset));
   2063   DCHECK(!ra.is(r0));
   2064   int imm16 = offset & kImm16Mask;
   2065   // could be x_form instruction with some casting magic
   2066   emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
   2067 }
   2068 
   2069 
   2070 void Assembler::stfdx(const DoubleRegister frs, const MemOperand& src) {
   2071   Register ra = src.ra();
   2072   Register rb = src.rb();
   2073   DCHECK(!ra.is(r0));
   2074   emit(EXT2 | STFDX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2075        LeaveRC);
   2076 }
   2077 
   2078 
   2079 void Assembler::stfdux(const DoubleRegister frs, const MemOperand& src) {
   2080   Register ra = src.ra();
   2081   Register rb = src.rb();
   2082   DCHECK(!ra.is(r0));
   2083   emit(EXT2 | STFDUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2084        LeaveRC);
   2085 }
   2086 
   2087 
   2088 void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
   2089   int offset = src.offset();
   2090   Register ra = src.ra();
   2091   CHECK(is_int16(offset));
   2092   DCHECK(!ra.is(r0));
   2093   int imm16 = offset & kImm16Mask;
   2094   // could be x_form instruction with some casting magic
   2095   emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
   2096 }
   2097 
   2098 
   2099 void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
   2100   int offset = src.offset();
   2101   Register ra = src.ra();
   2102   CHECK(is_int16(offset));
   2103   DCHECK(!ra.is(r0));
   2104   int imm16 = offset & kImm16Mask;
   2105   // could be x_form instruction with some casting magic
   2106   emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
   2107 }
   2108 
   2109 
   2110 void Assembler::stfsx(const DoubleRegister frs, const MemOperand& src) {
   2111   Register ra = src.ra();
   2112   Register rb = src.rb();
   2113   DCHECK(!ra.is(r0));
   2114   emit(EXT2 | STFSX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2115        LeaveRC);
   2116 }
   2117 
   2118 
   2119 void Assembler::stfsux(const DoubleRegister frs, const MemOperand& src) {
   2120   Register ra = src.ra();
   2121   Register rb = src.rb();
   2122   DCHECK(!ra.is(r0));
   2123   emit(EXT2 | STFSUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2124        LeaveRC);
   2125 }
   2126 
   2127 
   2128 void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra,
   2129                      const DoubleRegister frb, RCBit rc) {
   2130   a_form(EXT4 | FSUB, frt, fra, frb, rc);
   2131 }
   2132 
   2133 
   2134 void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra,
   2135                      const DoubleRegister frb, RCBit rc) {
   2136   a_form(EXT4 | FADD, frt, fra, frb, rc);
   2137 }
   2138 
   2139 
   2140 void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
   2141                      const DoubleRegister frc, RCBit rc) {
   2142   emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
   2143        rc);
   2144 }
   2145 
   2146 
   2147 void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra,
   2148                      const DoubleRegister frb, RCBit rc) {
   2149   a_form(EXT4 | FDIV, frt, fra, frb, rc);
   2150 }
   2151 
   2152 
   2153 void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb,
   2154                       CRegister cr) {
   2155   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   2156   emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
   2157 }
   2158 
   2159 
   2160 void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb,
   2161                     RCBit rc) {
   2162   emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
   2163 }
   2164 
   2165 
   2166 void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) {
   2167   emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
   2168 }
   2169 
   2170 
   2171 void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
   2172   emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
   2173 }
   2174 
   2175 
   2176 void Assembler::frin(const DoubleRegister frt, const DoubleRegister frb,
   2177                      RCBit rc) {
   2178   emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
   2179 }
   2180 
   2181 
   2182 void Assembler::friz(const DoubleRegister frt, const DoubleRegister frb,
   2183                      RCBit rc) {
   2184   emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc);
   2185 }
   2186 
   2187 
   2188 void Assembler::frip(const DoubleRegister frt, const DoubleRegister frb,
   2189                      RCBit rc) {
   2190   emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc);
   2191 }
   2192 
   2193 
   2194 void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb,
   2195                      RCBit rc) {
   2196   emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc);
   2197 }
   2198 
   2199 
   2200 void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb,
   2201                      RCBit rc) {
   2202   emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
   2203 }
   2204 
   2205 
   2206 void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
   2207                       RCBit rc) {
   2208   emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
   2209 }
   2210 
   2211 
   2212 void Assembler::fcfidu(const DoubleRegister frt, const DoubleRegister frb,
   2213                        RCBit rc) {
   2214   emit(EXT4 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
   2215 }
   2216 
   2217 
   2218 void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb,
   2219                         RCBit rc) {
   2220   emit(EXT3 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
   2221 }
   2222 
   2223 
   2224 void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb,
   2225                        RCBit rc) {
   2226   emit(EXT3 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
   2227 }
   2228 
   2229 
   2230 void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
   2231                       RCBit rc) {
   2232   emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
   2233 }
   2234 
   2235 
   2236 void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
   2237                        RCBit rc) {
   2238   emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
   2239 }
   2240 
   2241 
   2242 void Assembler::fctidu(const DoubleRegister frt, const DoubleRegister frb,
   2243                        RCBit rc) {
   2244   emit(EXT4 | FCTIDU | frt.code() * B21 | frb.code() * B11 | rc);
   2245 }
   2246 
   2247 
   2248 void Assembler::fctiduz(const DoubleRegister frt, const DoubleRegister frb,
   2249                         RCBit rc) {
   2250   emit(EXT4 | FCTIDUZ | frt.code() * B21 | frb.code() * B11 | rc);
   2251 }
   2252 
   2253 
   2254 void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
   2255                      const DoubleRegister frc, const DoubleRegister frb,
   2256                      RCBit rc) {
   2257   emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
   2258        frc.code() * B6 | rc);
   2259 }
   2260 
   2261 
   2262 void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
   2263                      RCBit rc) {
   2264   emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
   2265 }
   2266 
   2267 
   2268 void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
   2269   DCHECK(static_cast<int>(bit) < 32);
   2270   int bt = bit;
   2271   emit(EXT4 | MTFSB0 | bt * B21 | rc);
   2272 }
   2273 
   2274 
   2275 void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
   2276   DCHECK(static_cast<int>(bit) < 32);
   2277   int bt = bit;
   2278   emit(EXT4 | MTFSB1 | bt * B21 | rc);
   2279 }
   2280 
   2281 
   2282 void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
   2283   emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
   2284 }
   2285 
   2286 
   2287 void Assembler::mffs(const DoubleRegister frt, RCBit rc) {
   2288   emit(EXT4 | MFFS | frt.code() * B21 | rc);
   2289 }
   2290 
   2291 
   2292 void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W,
   2293                       RCBit rc) {
   2294   emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
   2295 }
   2296 
   2297 
   2298 void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb,
   2299                       RCBit rc) {
   2300   emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
   2301 }
   2302 
   2303 
   2304 void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb,
   2305                      RCBit rc) {
   2306   emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
   2307 }
   2308 
   2309 
   2310 void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra,
   2311                       const DoubleRegister frc, const DoubleRegister frb,
   2312                       RCBit rc) {
   2313   emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
   2314        frc.code() * B6 | rc);
   2315 }
   2316 
   2317 
   2318 void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
   2319                       const DoubleRegister frc, const DoubleRegister frb,
   2320                       RCBit rc) {
   2321   emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
   2322        frc.code() * B6 | rc);
   2323 }
   2324 
   2325 
   2326 // Pseudo instructions.
   2327 void Assembler::nop(int type) {
   2328   Register reg = r0;
   2329   switch (type) {
   2330     case NON_MARKING_NOP:
   2331       reg = r0;
   2332       break;
   2333     case GROUP_ENDING_NOP:
   2334       reg = r2;
   2335       break;
   2336     case DEBUG_BREAK_NOP:
   2337       reg = r3;
   2338       break;
   2339     default:
   2340       UNIMPLEMENTED();
   2341   }
   2342 
   2343   ori(reg, reg, Operand::Zero());
   2344 }
   2345 
   2346 
   2347 bool Assembler::IsNop(Instr instr, int type) {
   2348   int reg = 0;
   2349   switch (type) {
   2350     case NON_MARKING_NOP:
   2351       reg = 0;
   2352       break;
   2353     case GROUP_ENDING_NOP:
   2354       reg = 2;
   2355       break;
   2356     case DEBUG_BREAK_NOP:
   2357       reg = 3;
   2358       break;
   2359     default:
   2360       UNIMPLEMENTED();
   2361   }
   2362   return instr == (ORI | reg * B21 | reg * B16);
   2363 }
   2364 
   2365 
   2366 void Assembler::GrowBuffer(int needed) {
   2367   if (!own_buffer_) FATAL("external code buffer is too small");
   2368 
   2369   // Compute new buffer size.
   2370   CodeDesc desc;  // the new buffer
   2371   if (buffer_size_ < 4 * KB) {
   2372     desc.buffer_size = 4 * KB;
   2373   } else if (buffer_size_ < 1 * MB) {
   2374     desc.buffer_size = 2 * buffer_size_;
   2375   } else {
   2376     desc.buffer_size = buffer_size_ + 1 * MB;
   2377   }
   2378   int space = buffer_space() + (desc.buffer_size - buffer_size_);
   2379   if (space < needed) {
   2380     desc.buffer_size += needed - space;
   2381   }
   2382   CHECK_GT(desc.buffer_size, 0);  // no overflow
   2383 
   2384   // Set up new buffer.
   2385   desc.buffer = NewArray<byte>(desc.buffer_size);
   2386   desc.origin = this;
   2387 
   2388   desc.instr_size = pc_offset();
   2389   desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
   2390 
   2391   // Copy the data.
   2392   intptr_t pc_delta = desc.buffer - buffer_;
   2393   intptr_t rc_delta =
   2394       (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
   2395   memmove(desc.buffer, buffer_, desc.instr_size);
   2396   memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
   2397           desc.reloc_size);
   2398 
   2399   // Switch buffers.
   2400   DeleteArray(buffer_);
   2401   buffer_ = desc.buffer;
   2402   buffer_size_ = desc.buffer_size;
   2403   pc_ += pc_delta;
   2404   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
   2405                                reloc_info_writer.last_pc() + pc_delta);
   2406 
   2407   // Nothing else to do here since we keep all internal references and
   2408   // deferred relocation entries relative to the buffer (until
   2409   // EmitRelocations).
   2410 }
   2411 
   2412 
   2413 void Assembler::db(uint8_t data) {
   2414   CheckBuffer();
   2415   *reinterpret_cast<uint8_t*>(pc_) = data;
   2416   pc_ += sizeof(uint8_t);
   2417 }
   2418 
   2419 
   2420 void Assembler::dd(uint32_t data) {
   2421   CheckBuffer();
   2422   *reinterpret_cast<uint32_t*>(pc_) = data;
   2423   pc_ += sizeof(uint32_t);
   2424 }
   2425 
   2426 
   2427 void Assembler::dq(uint64_t value) {
   2428   CheckBuffer();
   2429   *reinterpret_cast<uint64_t*>(pc_) = value;
   2430   pc_ += sizeof(uint64_t);
   2431 }
   2432 
   2433 
   2434 void Assembler::dp(uintptr_t data) {
   2435   CheckBuffer();
   2436   *reinterpret_cast<uintptr_t*>(pc_) = data;
   2437   pc_ += sizeof(uintptr_t);
   2438 }
   2439 
   2440 
   2441 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
   2442   if (RelocInfo::IsNone(rmode) ||
   2443       // Don't record external references unless the heap will be serialized.
   2444       (rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
   2445        !emit_debug_code())) {
   2446     return;
   2447   }
   2448   if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
   2449     data = RecordedAstId().ToInt();
   2450     ClearRecordedAstId();
   2451   }
   2452   DeferredRelocInfo rinfo(pc_offset(), rmode, data);
   2453   relocations_.push_back(rinfo);
   2454 }
   2455 
   2456 
   2457 void Assembler::EmitRelocations() {
   2458   EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
   2459 
   2460   for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
   2461        it != relocations_.end(); it++) {
   2462     RelocInfo::Mode rmode = it->rmode();
   2463     Address pc = buffer_ + it->position();
   2464     Code* code = NULL;
   2465     RelocInfo rinfo(isolate(), pc, rmode, it->data(), code);
   2466 
   2467     // Fix up internal references now that they are guaranteed to be bound.
   2468     if (RelocInfo::IsInternalReference(rmode)) {
   2469       // Jump table entry
   2470       intptr_t pos = reinterpret_cast<intptr_t>(Memory::Address_at(pc));
   2471       Memory::Address_at(pc) = buffer_ + pos;
   2472     } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
   2473       // mov sequence
   2474       intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, code));
   2475       set_target_address_at(isolate(), pc, code, buffer_ + pos,
   2476                             SKIP_ICACHE_FLUSH);
   2477     }
   2478 
   2479     reloc_info_writer.Write(&rinfo);
   2480   }
   2481 }
   2482 
   2483 
   2484 void Assembler::BlockTrampolinePoolFor(int instructions) {
   2485   BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
   2486 }
   2487 
   2488 
   2489 void Assembler::CheckTrampolinePool() {
   2490   // Some small sequences of instructions must not be broken up by the
   2491   // insertion of a trampoline pool; such sequences are protected by setting
   2492   // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
   2493   // which are both checked here. Also, recursive calls to CheckTrampolinePool
   2494   // are blocked by trampoline_pool_blocked_nesting_.
   2495   if (trampoline_pool_blocked_nesting_ > 0) return;
   2496   if (pc_offset() < no_trampoline_pool_before_) {
   2497     next_trampoline_check_ = no_trampoline_pool_before_;
   2498     return;
   2499   }
   2500 
   2501   DCHECK(!trampoline_emitted_);
   2502   if (tracked_branch_count_ > 0) {
   2503     int size = tracked_branch_count_ * kInstrSize;
   2504 
   2505     // As we are only going to emit trampoline once, we need to prevent any
   2506     // further emission.
   2507     trampoline_emitted_ = true;
   2508     next_trampoline_check_ = kMaxInt;
   2509 
   2510     // First we emit jump, then we emit trampoline pool.
   2511     b(size + kInstrSize, LeaveLK);
   2512     for (int i = size; i > 0; i -= kInstrSize) {
   2513       b(i, LeaveLK);
   2514     }
   2515 
   2516     trampoline_ = Trampoline(pc_offset() - size, tracked_branch_count_);
   2517   }
   2518 }
   2519 
   2520 
   2521 }  // namespace internal
   2522 }  // namespace v8
   2523 
   2524 #endif  // V8_TARGET_ARCH_PPC
   2525