Home | History | Annotate | Download | only in ppc
      1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
      2 // All Rights Reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions
      6 // are met:
      7 //
      8 // - Redistributions of source code must retain the above copyright notice,
      9 // this list of conditions and the following disclaimer.
     10 //
     11 // - Redistribution in binary form must reproduce the above copyright
     12 // notice, this list of conditions and the following disclaimer in the
     13 // documentation and/or other materials provided with the
     14 // distribution.
     15 //
     16 // - Neither the name of Sun Microsystems or the names of contributors may
     17 // be used to endorse or promote products derived from this software without
     18 // specific prior written permission.
     19 //
     20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
     31 // OF THE POSSIBILITY OF SUCH DAMAGE.
     32 
     33 // The original source code covered by the above license above has been
     34 // modified significantly by Google Inc.
     35 // Copyright 2014 the V8 project authors. All rights reserved.
     36 
     37 #include "src/ppc/assembler-ppc.h"
     38 
     39 #if V8_TARGET_ARCH_PPC
     40 
     41 #include "src/base/bits.h"
     42 #include "src/base/cpu.h"
     43 #include "src/macro-assembler.h"
     44 #include "src/ppc/assembler-ppc-inl.h"
     45 
     46 namespace v8 {
     47 namespace internal {
     48 
     49 // Get the CPU features enabled by the build.
     50 static unsigned CpuFeaturesImpliedByCompiler() {
     51   unsigned answer = 0;
     52   return answer;
     53 }
     54 
     55 
     56 void CpuFeatures::ProbeImpl(bool cross_compile) {
     57   supported_ |= CpuFeaturesImpliedByCompiler();
     58   icache_line_size_ = 128;
     59 
     60   // Only use statically determined features for cross compile (snapshot).
     61   if (cross_compile) return;
     62 
     63 // Detect whether frim instruction is supported (POWER5+)
     64 // For now we will just check for processors we know do not
     65 // support it
     66 #ifndef USE_SIMULATOR
     67   // Probe for additional features at runtime.
     68   base::CPU cpu;
     69 #if V8_TARGET_ARCH_PPC64
     70   if (cpu.part() == base::CPU::PPC_POWER8) {
     71     supported_ |= (1u << FPR_GPR_MOV);
     72   }
     73 #endif
     74   if (cpu.part() == base::CPU::PPC_POWER6 ||
     75       cpu.part() == base::CPU::PPC_POWER7 ||
     76       cpu.part() == base::CPU::PPC_POWER8) {
     77     supported_ |= (1u << LWSYNC);
     78   }
     79   if (cpu.part() == base::CPU::PPC_POWER7 ||
     80       cpu.part() == base::CPU::PPC_POWER8) {
     81     supported_ |= (1u << ISELECT);
     82   }
     83 #if V8_OS_LINUX
     84   if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
     85     // Assume support
     86     supported_ |= (1u << FPU);
     87   }
     88   if (cpu.icache_line_size() != base::CPU::UNKNOWN_CACHE_LINE_SIZE) {
     89     icache_line_size_ = cpu.icache_line_size();
     90   }
     91 #elif V8_OS_AIX
     92   // Assume support FP support and default cache line size
     93   supported_ |= (1u << FPU);
     94 #endif
     95 #else  // Simulator
     96   supported_ |= (1u << FPU);
     97   supported_ |= (1u << LWSYNC);
     98   supported_ |= (1u << ISELECT);
     99 #if V8_TARGET_ARCH_PPC64
    100   supported_ |= (1u << FPR_GPR_MOV);
    101 #endif
    102 #endif
    103 }
    104 
    105 
    106 void CpuFeatures::PrintTarget() {
    107   const char* ppc_arch = NULL;
    108 
    109 #if V8_TARGET_ARCH_PPC64
    110   ppc_arch = "ppc64";
    111 #else
    112   ppc_arch = "ppc";
    113 #endif
    114 
    115   printf("target %s\n", ppc_arch);
    116 }
    117 
    118 
    119 void CpuFeatures::PrintFeatures() {
    120   printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
    121 }
    122 
    123 
    124 Register ToRegister(int num) {
    125   DCHECK(num >= 0 && num < kNumRegisters);
    126   const Register kRegisters[] = {r0,  sp,  r2,  r3,  r4,  r5,  r6,  r7,
    127                                  r8,  r9,  r10, r11, ip,  r13, r14, r15,
    128                                  r16, r17, r18, r19, r20, r21, r22, r23,
    129                                  r24, r25, r26, r27, r28, r29, r30, fp};
    130   return kRegisters[num];
    131 }
    132 
    133 
    134 // -----------------------------------------------------------------------------
    135 // Implementation of RelocInfo
    136 
    137 const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE |
    138                                   1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
    139 
    140 
    141 bool RelocInfo::IsCodedSpecially() {
    142   // The deserializer needs to know whether a pointer is specially
    143   // coded.  Being specially coded on PPC means that it is a lis/ori
    144   // instruction sequence or is a constant pool entry, and these are
    145   // always the case inside code objects.
    146   return true;
    147 }
    148 
    149 
    150 bool RelocInfo::IsInConstantPool() {
    151   if (FLAG_enable_embedded_constant_pool) {
    152     Address constant_pool = host_->constant_pool();
    153     return (constant_pool && Assembler::IsConstantPoolLoadStart(pc_));
    154   }
    155   return false;
    156 }
    157 
    158 Address RelocInfo::wasm_memory_reference() {
    159   DCHECK(IsWasmMemoryReference(rmode_));
    160   return Assembler::target_address_at(pc_, host_);
    161 }
    162 
    163 uint32_t RelocInfo::wasm_memory_size_reference() {
    164   DCHECK(IsWasmMemorySizeReference(rmode_));
    165   return static_cast<uint32_t>(
    166      reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
    167 }
    168 
    169 Address RelocInfo::wasm_global_reference() {
    170   DCHECK(IsWasmGlobalReference(rmode_));
    171   return Assembler::target_address_at(pc_, host_);
    172 }
    173 
    174 
    175 void RelocInfo::unchecked_update_wasm_memory_reference(
    176     Address address, ICacheFlushMode flush_mode) {
    177   Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
    178 }
    179 
    180 void RelocInfo::unchecked_update_wasm_memory_size(uint32_t size,
    181                                                   ICacheFlushMode flush_mode) {
    182   Assembler::set_target_address_at(isolate_, pc_, host_,
    183                                    reinterpret_cast<Address>(size), flush_mode);
    184 }
    185 
    186 // -----------------------------------------------------------------------------
    187 // Implementation of Operand and MemOperand
    188 // See assembler-ppc-inl.h for inlined constructors
    189 
    190 Operand::Operand(Handle<Object> handle) {
    191   AllowDeferredHandleDereference using_raw_address;
    192   rm_ = no_reg;
    193   // Verify all Objects referred by code are NOT in new space.
    194   Object* obj = *handle;
    195   if (obj->IsHeapObject()) {
    196     DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
    197     imm_ = reinterpret_cast<intptr_t>(handle.location());
    198     rmode_ = RelocInfo::EMBEDDED_OBJECT;
    199   } else {
    200     // no relocation needed
    201     imm_ = reinterpret_cast<intptr_t>(obj);
    202     rmode_ = kRelocInfo_NONEPTR;
    203   }
    204 }
    205 
    206 
    207 MemOperand::MemOperand(Register rn, int32_t offset) {
    208   ra_ = rn;
    209   rb_ = no_reg;
    210   offset_ = offset;
    211 }
    212 
    213 
    214 MemOperand::MemOperand(Register ra, Register rb) {
    215   ra_ = ra;
    216   rb_ = rb;
    217   offset_ = 0;
    218 }
    219 
    220 
    221 // -----------------------------------------------------------------------------
    222 // Specific instructions, constants, and masks.
    223 
    224 
    225 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
    226     : AssemblerBase(isolate, buffer, buffer_size),
    227       recorded_ast_id_(TypeFeedbackId::None()),
    228       constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits),
    229       positions_recorder_(this) {
    230   reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
    231 
    232   no_trampoline_pool_before_ = 0;
    233   trampoline_pool_blocked_nesting_ = 0;
    234   constant_pool_entry_sharing_blocked_nesting_ = 0;
    235   next_trampoline_check_ = kMaxInt;
    236   internal_trampoline_exception_ = false;
    237   last_bound_pos_ = 0;
    238   optimizable_cmpi_pos_ = -1;
    239   trampoline_emitted_ = FLAG_force_long_branches;
    240   tracked_branch_count_ = 0;
    241   ClearRecordedAstId();
    242   relocations_.reserve(128);
    243 }
    244 
    245 
    246 void Assembler::GetCode(CodeDesc* desc) {
    247   // Emit constant pool if necessary.
    248   int constant_pool_offset = EmitConstantPool();
    249 
    250   EmitRelocations();
    251 
    252   // Set up code descriptor.
    253   desc->buffer = buffer_;
    254   desc->buffer_size = buffer_size_;
    255   desc->instr_size = pc_offset();
    256   desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
    257   desc->constant_pool_size =
    258       (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
    259   desc->origin = this;
    260   desc->unwinding_info_size = 0;
    261   desc->unwinding_info = nullptr;
    262 }
    263 
    264 
    265 void Assembler::Align(int m) {
    266   DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
    267   DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
    268   while ((pc_offset() & (m - 1)) != 0) {
    269     nop();
    270   }
    271 }
    272 
    273 
    274 void Assembler::CodeTargetAlign() { Align(8); }
    275 
    276 
    277 Condition Assembler::GetCondition(Instr instr) {
    278   switch (instr & kCondMask) {
    279     case BT:
    280       return eq;
    281     case BF:
    282       return ne;
    283     default:
    284       UNIMPLEMENTED();
    285   }
    286   return al;
    287 }
    288 
    289 
    290 bool Assembler::IsLis(Instr instr) {
    291   return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr).is(r0);
    292 }
    293 
    294 
    295 bool Assembler::IsLi(Instr instr) {
    296   return ((instr & kOpcodeMask) == ADDI) && GetRA(instr).is(r0);
    297 }
    298 
    299 
    300 bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; }
    301 
    302 
    303 bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; }
    304 
    305 
    306 bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
    307 
    308 
    309 Register Assembler::GetRA(Instr instr) {
    310   Register reg;
    311   reg.reg_code = Instruction::RAValue(instr);
    312   return reg;
    313 }
    314 
    315 
    316 Register Assembler::GetRB(Instr instr) {
    317   Register reg;
    318   reg.reg_code = Instruction::RBValue(instr);
    319   return reg;
    320 }
    321 
    322 
    323 #if V8_TARGET_ARCH_PPC64
    324 // This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori)
    325 bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
    326                                    Instr instr4, Instr instr5) {
    327   // Check the instructions are indeed a five part load (into r12)
    328   // 3d800000       lis     r12, 0
    329   // 618c0000       ori     r12, r12, 0
    330   // 798c07c6       rldicr  r12, r12, 32, 31
    331   // 658c00c3       oris    r12, r12, 195
    332   // 618ccd40       ori     r12, r12, 52544
    333   return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c) &&
    334           (instr3 == 0x798c07c6) && ((instr4 >> 16) == 0x658c) &&
    335           ((instr5 >> 16) == 0x618c));
    336 }
    337 #else
    338 // This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori)
    339 bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
    340   // Check the instruction is indeed a two part load (into r12)
    341   // 3d802553       lis     r12, 9555
    342   // 618c5000       ori   r12, r12, 20480
    343   return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c));
    344 }
    345 #endif
    346 
    347 
    348 bool Assembler::IsCmpRegister(Instr instr) {
    349   return (((instr & kOpcodeMask) == EXT2) &&
    350           ((instr & kExt2OpcodeMask) == CMP));
    351 }
    352 
    353 
    354 bool Assembler::IsRlwinm(Instr instr) {
    355   return ((instr & kOpcodeMask) == RLWINMX);
    356 }
    357 
    358 
    359 bool Assembler::IsAndi(Instr instr) { return ((instr & kOpcodeMask) == ANDIx); }
    360 
    361 
    362 #if V8_TARGET_ARCH_PPC64
    363 bool Assembler::IsRldicl(Instr instr) {
    364   return (((instr & kOpcodeMask) == EXT5) &&
    365           ((instr & kExt5OpcodeMask) == RLDICL));
    366 }
    367 #endif
    368 
    369 
    370 bool Assembler::IsCmpImmediate(Instr instr) {
    371   return ((instr & kOpcodeMask) == CMPI);
    372 }
    373 
    374 
    375 bool Assembler::IsCrSet(Instr instr) {
    376   return (((instr & kOpcodeMask) == EXT1) &&
    377           ((instr & kExt1OpcodeMask) == CREQV));
    378 }
    379 
    380 
    381 Register Assembler::GetCmpImmediateRegister(Instr instr) {
    382   DCHECK(IsCmpImmediate(instr));
    383   return GetRA(instr);
    384 }
    385 
    386 
    387 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
    388   DCHECK(IsCmpImmediate(instr));
    389   return instr & kOff16Mask;
    390 }
    391 
    392 
    393 // Labels refer to positions in the (to be) generated code.
    394 // There are bound, linked, and unused labels.
    395 //
    396 // Bound labels refer to known positions in the already
    397 // generated code. pos() is the position the label refers to.
    398 //
    399 // Linked labels refer to unknown positions in the code
    400 // to be generated; pos() is the position of the last
    401 // instruction using the label.
    402 
    403 
    404 // The link chain is terminated by a negative code position (must be aligned)
    405 const int kEndOfChain = -4;
    406 
    407 
    408 // Dummy opcodes for unbound label mov instructions or jump table entries.
    409 enum {
    410   kUnboundMovLabelOffsetOpcode = 0 << 26,
    411   kUnboundAddLabelOffsetOpcode = 1 << 26,
    412   kUnboundMovLabelAddrOpcode = 2 << 26,
    413   kUnboundJumpTableEntryOpcode = 3 << 26
    414 };
    415 
    416 
    417 int Assembler::target_at(int pos) {
    418   Instr instr = instr_at(pos);
    419   // check which type of branch this is 16 or 26 bit offset
    420   int opcode = instr & kOpcodeMask;
    421   int link;
    422   switch (opcode) {
    423     case BX:
    424       link = SIGN_EXT_IMM26(instr & kImm26Mask);
    425       link &= ~(kAAMask | kLKMask);  // discard AA|LK bits if present
    426       break;
    427     case BCX:
    428       link = SIGN_EXT_IMM16((instr & kImm16Mask));
    429       link &= ~(kAAMask | kLKMask);  // discard AA|LK bits if present
    430       break;
    431     case kUnboundMovLabelOffsetOpcode:
    432     case kUnboundAddLabelOffsetOpcode:
    433     case kUnboundMovLabelAddrOpcode:
    434     case kUnboundJumpTableEntryOpcode:
    435       link = SIGN_EXT_IMM26(instr & kImm26Mask);
    436       link <<= 2;
    437       break;
    438     default:
    439       DCHECK(false);
    440       return -1;
    441   }
    442 
    443   if (link == 0) return kEndOfChain;
    444   return pos + link;
    445 }
    446 
    447 
    448 void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
    449   Instr instr = instr_at(pos);
    450   int opcode = instr & kOpcodeMask;
    451 
    452   if (is_branch != nullptr) {
    453     *is_branch = (opcode == BX || opcode == BCX);
    454   }
    455 
    456   switch (opcode) {
    457     case BX: {
    458       int imm26 = target_pos - pos;
    459       CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
    460       if (imm26 == kInstrSize && !(instr & kLKMask)) {
    461         // Branch to next instr without link.
    462         instr = ORI;  // nop: ori, 0,0,0
    463       } else {
    464         instr &= ((~kImm26Mask) | kAAMask | kLKMask);
    465         instr |= (imm26 & kImm26Mask);
    466       }
    467       instr_at_put(pos, instr);
    468       break;
    469     }
    470     case BCX: {
    471       int imm16 = target_pos - pos;
    472       CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
    473       if (imm16 == kInstrSize && !(instr & kLKMask)) {
    474         // Branch to next instr without link.
    475         instr = ORI;  // nop: ori, 0,0,0
    476       } else {
    477         instr &= ((~kImm16Mask) | kAAMask | kLKMask);
    478         instr |= (imm16 & kImm16Mask);
    479       }
    480       instr_at_put(pos, instr);
    481       break;
    482     }
    483     case kUnboundMovLabelOffsetOpcode: {
    484       // Load the position of the label relative to the generated code object
    485       // pointer in a register.
    486       Register dst = Register::from_code(instr_at(pos + kInstrSize));
    487       int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
    488       CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
    489                           CodePatcher::DONT_FLUSH);
    490       patcher.masm()->bitwise_mov32(dst, offset);
    491       break;
    492     }
    493     case kUnboundAddLabelOffsetOpcode: {
    494       // dst = base + position + immediate
    495       Instr operands = instr_at(pos + kInstrSize);
    496       Register dst = Register::from_code((operands >> 21) & 0x1f);
    497       Register base = Register::from_code((operands >> 16) & 0x1f);
    498       int32_t offset = target_pos + SIGN_EXT_IMM16(operands & kImm16Mask);
    499       CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
    500                           CodePatcher::DONT_FLUSH);
    501       patcher.masm()->bitwise_add32(dst, base, offset);
    502       break;
    503     }
    504     case kUnboundMovLabelAddrOpcode: {
    505       // Load the address of the label in a register.
    506       Register dst = Register::from_code(instr_at(pos + kInstrSize));
    507       CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
    508                           kMovInstructionsNoConstantPool,
    509                           CodePatcher::DONT_FLUSH);
    510       // Keep internal references relative until EmitRelocations.
    511       patcher.masm()->bitwise_mov(dst, target_pos);
    512       break;
    513     }
    514     case kUnboundJumpTableEntryOpcode: {
    515       CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
    516                           kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH);
    517       // Keep internal references relative until EmitRelocations.
    518       patcher.masm()->dp(target_pos);
    519       break;
    520     }
    521     default:
    522       DCHECK(false);
    523       break;
    524   }
    525 }
    526 
    527 
    528 int Assembler::max_reach_from(int pos) {
    529   Instr instr = instr_at(pos);
    530   int opcode = instr & kOpcodeMask;
    531 
    532   // check which type of branch this is 16 or 26 bit offset
    533   switch (opcode) {
    534     case BX:
    535       return 26;
    536     case BCX:
    537       return 16;
    538     case kUnboundMovLabelOffsetOpcode:
    539     case kUnboundAddLabelOffsetOpcode:
    540     case kUnboundMovLabelAddrOpcode:
    541     case kUnboundJumpTableEntryOpcode:
    542       return 0;  // no limit on reach
    543   }
    544 
    545   DCHECK(false);
    546   return 0;
    547 }
    548 
    549 
    550 void Assembler::bind_to(Label* L, int pos) {
    551   DCHECK(0 <= pos && pos <= pc_offset());  // must have a valid binding position
    552   int32_t trampoline_pos = kInvalidSlotPos;
    553   bool is_branch = false;
    554   while (L->is_linked()) {
    555     int fixup_pos = L->pos();
    556     int32_t offset = pos - fixup_pos;
    557     int maxReach = max_reach_from(fixup_pos);
    558     next(L);  // call next before overwriting link with target at fixup_pos
    559     if (maxReach && is_intn(offset, maxReach) == false) {
    560       if (trampoline_pos == kInvalidSlotPos) {
    561         trampoline_pos = get_trampoline_entry();
    562         CHECK(trampoline_pos != kInvalidSlotPos);
    563         target_at_put(trampoline_pos, pos);
    564       }
    565       target_at_put(fixup_pos, trampoline_pos);
    566     } else {
    567       target_at_put(fixup_pos, pos, &is_branch);
    568     }
    569   }
    570   L->bind_to(pos);
    571 
    572   if (!trampoline_emitted_ && is_branch) {
    573     UntrackBranch();
    574   }
    575 
    576   // Keep track of the last bound label so we don't eliminate any instructions
    577   // before a bound label.
    578   if (pos > last_bound_pos_) last_bound_pos_ = pos;
    579 }
    580 
    581 
    582 void Assembler::bind(Label* L) {
    583   DCHECK(!L->is_bound());  // label can only be bound once
    584   bind_to(L, pc_offset());
    585 }
    586 
    587 
    588 void Assembler::next(Label* L) {
    589   DCHECK(L->is_linked());
    590   int link = target_at(L->pos());
    591   if (link == kEndOfChain) {
    592     L->Unuse();
    593   } else {
    594     DCHECK(link >= 0);
    595     L->link_to(link);
    596   }
    597 }
    598 
    599 
    600 bool Assembler::is_near(Label* L, Condition cond) {
    601   DCHECK(L->is_bound());
    602   if (L->is_bound() == false) return false;
    603 
    604   int maxReach = ((cond == al) ? 26 : 16);
    605   int offset = L->pos() - pc_offset();
    606 
    607   return is_intn(offset, maxReach);
    608 }
    609 
    610 
    611 void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
    612                        DoubleRegister frb, RCBit r) {
    613   emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r);
    614 }
    615 
    616 
    617 void Assembler::d_form(Instr instr, Register rt, Register ra,
    618                        const intptr_t val, bool signed_disp) {
    619   if (signed_disp) {
    620     if (!is_int16(val)) {
    621       PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val);
    622     }
    623     CHECK(is_int16(val));
    624   } else {
    625     if (!is_uint16(val)) {
    626       PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR
    627              ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
    628              val, val, is_uint16(val), kImm16Mask);
    629     }
    630     CHECK(is_uint16(val));
    631   }
    632   emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
    633 }
    634 
    635 
    636 void Assembler::x_form(Instr instr, Register ra, Register rs, Register rb,
    637                        RCBit r) {
    638   emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | r);
    639 }
    640 
    641 
    642 void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
    643                         OEBit o, RCBit r) {
    644   emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
    645 }
    646 
    647 
    648 void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
    649                         int maskbit, RCBit r) {
    650   int sh0_4 = shift & 0x1f;
    651   int sh5 = (shift >> 5) & 0x1;
    652   int m0_4 = maskbit & 0x1f;
    653   int m5 = (maskbit >> 5) & 0x1;
    654 
    655   emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
    656        m5 * B5 | sh5 * B1 | r);
    657 }
    658 
    659 
    660 void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
    661                          int maskbit, RCBit r) {
    662   int m0_4 = maskbit & 0x1f;
    663   int m5 = (maskbit >> 5) & 0x1;
    664 
    665   emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
    666        m5 * B5 | r);
    667 }
    668 
    669 
    670 // Returns the next free trampoline entry.
    671 int32_t Assembler::get_trampoline_entry() {
    672   int32_t trampoline_entry = kInvalidSlotPos;
    673 
    674   if (!internal_trampoline_exception_) {
    675     trampoline_entry = trampoline_.take_slot();
    676 
    677     if (kInvalidSlotPos == trampoline_entry) {
    678       internal_trampoline_exception_ = true;
    679     }
    680   }
    681   return trampoline_entry;
    682 }
    683 
    684 
    685 int Assembler::link(Label* L) {
    686   int position;
    687   if (L->is_bound()) {
    688     position = L->pos();
    689   } else {
    690     if (L->is_linked()) {
    691       position = L->pos();  // L's link
    692     } else {
    693       // was: target_pos = kEndOfChain;
    694       // However, using self to mark the first reference
    695       // should avoid most instances of branch offset overflow.  See
    696       // target_at() for where this is converted back to kEndOfChain.
    697       position = pc_offset();
    698     }
    699     L->link_to(pc_offset());
    700   }
    701 
    702   return position;
    703 }
    704 
    705 
    706 // Branch instructions.
    707 
    708 
    709 void Assembler::bclr(BOfield bo, int condition_bit, LKBit lk) {
    710   emit(EXT1 | bo | condition_bit * B16 | BCLRX | lk);
    711 }
    712 
    713 
    714 void Assembler::bcctr(BOfield bo, int condition_bit, LKBit lk) {
    715   emit(EXT1 | bo | condition_bit * B16 | BCCTRX | lk);
    716 }
    717 
    718 
    719 // Pseudo op - branch to link register
    720 void Assembler::blr() { bclr(BA, 0, LeaveLK); }
    721 
    722 
    723 // Pseudo op - branch to count register -- used for "jump"
    724 void Assembler::bctr() { bcctr(BA, 0, LeaveLK); }
    725 
    726 
    727 void Assembler::bctrl() { bcctr(BA, 0, SetLK); }
    728 
    729 
    730 void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
    731   int imm16 = branch_offset;
    732   CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
    733   emit(BCX | bo | condition_bit * B16 | (imm16 & kImm16Mask) | lk);
    734 }
    735 
    736 
    737 void Assembler::b(int branch_offset, LKBit lk) {
    738   int imm26 = branch_offset;
    739   CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
    740   emit(BX | (imm26 & kImm26Mask) | lk);
    741 }
    742 
    743 
    744 void Assembler::xori(Register dst, Register src, const Operand& imm) {
    745   d_form(XORI, src, dst, imm.imm_, false);
    746 }
    747 
    748 
    749 void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
    750   d_form(XORIS, rs, ra, imm.imm_, false);
    751 }
    752 
    753 
    754 void Assembler::xor_(Register dst, Register src1, Register src2, RCBit rc) {
    755   x_form(EXT2 | XORX, dst, src1, src2, rc);
    756 }
    757 
    758 
    759 void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) {
    760   x_form(EXT2 | CNTLZWX, ra, rs, r0, rc);
    761 }
    762 
    763 
    764 void Assembler::popcntw(Register ra, Register rs) {
    765   emit(EXT2 | POPCNTW | rs.code() * B21 | ra.code() * B16);
    766 }
    767 
    768 
    769 void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) {
    770   x_form(EXT2 | ANDX, ra, rs, rb, rc);
    771 }
    772 
    773 
    774 void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
    775                        RCBit rc) {
    776   sh &= 0x1f;
    777   mb &= 0x1f;
    778   me &= 0x1f;
    779   emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
    780        me << 1 | rc);
    781 }
    782 
    783 
    784 void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
    785                       RCBit rc) {
    786   mb &= 0x1f;
    787   me &= 0x1f;
    788   emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
    789        me << 1 | rc);
    790 }
    791 
    792 
    793 void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
    794                        RCBit rc) {
    795   sh &= 0x1f;
    796   mb &= 0x1f;
    797   me &= 0x1f;
    798   emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
    799        me << 1 | rc);
    800 }
    801 
    802 
    803 void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
    804   DCHECK((32 > val.imm_) && (val.imm_ >= 0));
    805   rlwinm(dst, src, val.imm_, 0, 31 - val.imm_, rc);
    806 }
    807 
    808 
    809 void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
    810   DCHECK((32 > val.imm_) && (val.imm_ >= 0));
    811   rlwinm(dst, src, 32 - val.imm_, val.imm_, 31, rc);
    812 }
    813 
    814 
    815 void Assembler::clrrwi(Register dst, Register src, const Operand& val,
    816                        RCBit rc) {
    817   DCHECK((32 > val.imm_) && (val.imm_ >= 0));
    818   rlwinm(dst, src, 0, 0, 31 - val.imm_, rc);
    819 }
    820 
    821 
    822 void Assembler::clrlwi(Register dst, Register src, const Operand& val,
    823                        RCBit rc) {
    824   DCHECK((32 > val.imm_) && (val.imm_ >= 0));
    825   rlwinm(dst, src, 0, val.imm_, 31, rc);
    826 }
    827 
    828 
    829 void Assembler::srawi(Register ra, Register rs, int sh, RCBit r) {
    830   emit(EXT2 | SRAWIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | r);
    831 }
    832 
    833 
    834 void Assembler::srw(Register dst, Register src1, Register src2, RCBit r) {
    835   x_form(EXT2 | SRWX, dst, src1, src2, r);
    836 }
    837 
    838 
    839 void Assembler::slw(Register dst, Register src1, Register src2, RCBit r) {
    840   x_form(EXT2 | SLWX, dst, src1, src2, r);
    841 }
    842 
    843 
    844 void Assembler::sraw(Register ra, Register rs, Register rb, RCBit r) {
    845   x_form(EXT2 | SRAW, ra, rs, rb, r);
    846 }
    847 
    848 
    849 void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
    850   rlwnm(ra, rs, rb, 0, 31, r);
    851 }
    852 
    853 
    854 void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) {
    855   rlwinm(ra, rs, sh, 0, 31, r);
    856 }
    857 
    858 
    859 void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
    860   rlwinm(ra, rs, 32 - sh, 0, 31, r);
    861 }
    862 
    863 
    864 void Assembler::subi(Register dst, Register src, const Operand& imm) {
    865   addi(dst, src, Operand(-(imm.imm_)));
    866 }
    867 
    868 void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
    869                      RCBit r) {
    870   xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
    871 }
    872 
    873 void Assembler::adde(Register dst, Register src1, Register src2, OEBit o,
    874                      RCBit r) {
    875   xo_form(EXT2 | ADDEX, dst, src1, src2, o, r);
    876 }
    877 
    878 void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
    879   // a special xo_form
    880   emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r);
    881 }
    882 
    883 
    884 void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
    885                     RCBit r) {
    886   xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
    887 }
    888 
    889 void Assembler::subc(Register dst, Register src1, Register src2, OEBit o,
    890                      RCBit r) {
    891   xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
    892 }
    893 
    894 void Assembler::sube(Register dst, Register src1, Register src2, OEBit o,
    895                      RCBit r) {
    896   xo_form(EXT2 | SUBFEX, dst, src2, src1, o, r);
    897 }
    898 
    899 void Assembler::subfic(Register dst, Register src, const Operand& imm) {
    900   d_form(SUBFIC, dst, src, imm.imm_, true);
    901 }
    902 
    903 
    904 void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
    905                     RCBit r) {
    906   xo_form(EXT2 | ADDX, dst, src1, src2, o, r);
    907 }
    908 
    909 
    910 // Multiply low word
    911 void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
    912                       RCBit r) {
    913   xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
    914 }
    915 
    916 
    917 // Multiply hi word
    918 void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit r) {
    919   xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE, r);
    920 }
    921 
    922 
    923 // Multiply hi word unsigned
    924 void Assembler::mulhwu(Register dst, Register src1, Register src2, RCBit r) {
    925   xo_form(EXT2 | MULHWUX, dst, src1, src2, LeaveOE, r);
    926 }
    927 
    928 
    929 // Divide word
    930 void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
    931                      RCBit r) {
    932   xo_form(EXT2 | DIVW, dst, src1, src2, o, r);
    933 }
    934 
    935 
    936 // Divide word unsigned
    937 void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
    938                       RCBit r) {
    939   xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
    940 }
    941 
    942 
    943 void Assembler::addi(Register dst, Register src, const Operand& imm) {
    944   DCHECK(!src.is(r0));  // use li instead to show intent
    945   d_form(ADDI, dst, src, imm.imm_, true);
    946 }
    947 
    948 
    949 void Assembler::addis(Register dst, Register src, const Operand& imm) {
    950   DCHECK(!src.is(r0));  // use lis instead to show intent
    951   d_form(ADDIS, dst, src, imm.imm_, true);
    952 }
    953 
    954 
    955 void Assembler::addic(Register dst, Register src, const Operand& imm) {
    956   d_form(ADDIC, dst, src, imm.imm_, true);
    957 }
    958 
    959 
    960 void Assembler::andi(Register ra, Register rs, const Operand& imm) {
    961   d_form(ANDIx, rs, ra, imm.imm_, false);
    962 }
    963 
    964 
    965 void Assembler::andis(Register ra, Register rs, const Operand& imm) {
    966   d_form(ANDISx, rs, ra, imm.imm_, false);
    967 }
    968 
    969 
    970 void Assembler::nor(Register dst, Register src1, Register src2, RCBit r) {
    971   x_form(EXT2 | NORX, dst, src1, src2, r);
    972 }
    973 
    974 
    975 void Assembler::notx(Register dst, Register src, RCBit r) {
    976   x_form(EXT2 | NORX, dst, src, src, r);
    977 }
    978 
    979 
    980 void Assembler::ori(Register ra, Register rs, const Operand& imm) {
    981   d_form(ORI, rs, ra, imm.imm_, false);
    982 }
    983 
    984 
    985 void Assembler::oris(Register dst, Register src, const Operand& imm) {
    986   d_form(ORIS, src, dst, imm.imm_, false);
    987 }
    988 
    989 
    990 void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) {
    991   x_form(EXT2 | ORX, dst, src1, src2, rc);
    992 }
    993 
    994 
    995 void Assembler::orc(Register dst, Register src1, Register src2, RCBit rc) {
    996   x_form(EXT2 | ORC, dst, src1, src2, rc);
    997 }
    998 
    999 
   1000 void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
   1001   intptr_t imm16 = src2.imm_;
   1002 #if V8_TARGET_ARCH_PPC64
   1003   int L = 1;
   1004 #else
   1005   int L = 0;
   1006 #endif
   1007   DCHECK(is_int16(imm16));
   1008   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1009   imm16 &= kImm16Mask;
   1010   emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
   1011 }
   1012 
   1013 
   1014 void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
   1015   uintptr_t uimm16 = src2.imm_;
   1016 #if V8_TARGET_ARCH_PPC64
   1017   int L = 1;
   1018 #else
   1019   int L = 0;
   1020 #endif
   1021   DCHECK(is_uint16(uimm16));
   1022   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1023   uimm16 &= kImm16Mask;
   1024   emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
   1025 }
   1026 
   1027 
   1028 void Assembler::cmp(Register src1, Register src2, CRegister cr) {
   1029 #if V8_TARGET_ARCH_PPC64
   1030   int L = 1;
   1031 #else
   1032   int L = 0;
   1033 #endif
   1034   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1035   emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
   1036        src2.code() * B11);
   1037 }
   1038 
   1039 
   1040 void Assembler::cmpl(Register src1, Register src2, CRegister cr) {
   1041 #if V8_TARGET_ARCH_PPC64
   1042   int L = 1;
   1043 #else
   1044   int L = 0;
   1045 #endif
   1046   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1047   emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
   1048        src2.code() * B11);
   1049 }
   1050 
   1051 
   1052 void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
   1053   intptr_t imm16 = src2.imm_;
   1054   int L = 0;
   1055   int pos = pc_offset();
   1056   DCHECK(is_int16(imm16));
   1057   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1058   imm16 &= kImm16Mask;
   1059 
   1060   // For cmpwi against 0, save postition and cr for later examination
   1061   // of potential optimization.
   1062   if (imm16 == 0 && pos > 0 && last_bound_pos_ != pos) {
   1063     optimizable_cmpi_pos_ = pos;
   1064     cmpi_cr_ = cr;
   1065   }
   1066   emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
   1067 }
   1068 
   1069 
   1070 void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
   1071   uintptr_t uimm16 = src2.imm_;
   1072   int L = 0;
   1073   DCHECK(is_uint16(uimm16));
   1074   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1075   uimm16 &= kImm16Mask;
   1076   emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
   1077 }
   1078 
   1079 
   1080 void Assembler::cmpw(Register src1, Register src2, CRegister cr) {
   1081   int L = 0;
   1082   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1083   emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
   1084        src2.code() * B11);
   1085 }
   1086 
   1087 
   1088 void Assembler::cmplw(Register src1, Register src2, CRegister cr) {
   1089   int L = 0;
   1090   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1091   emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
   1092        src2.code() * B11);
   1093 }
   1094 
   1095 
   1096 void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
   1097   emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1098        cb * B6);
   1099 }
   1100 
   1101 
   1102 // Pseudo op - load immediate
   1103 void Assembler::li(Register dst, const Operand& imm) {
   1104   d_form(ADDI, dst, r0, imm.imm_, true);
   1105 }
   1106 
   1107 
   1108 void Assembler::lis(Register dst, const Operand& imm) {
   1109   d_form(ADDIS, dst, r0, imm.imm_, true);
   1110 }
   1111 
   1112 
   1113 // Pseudo op - move register
   1114 void Assembler::mr(Register dst, Register src) {
   1115   // actually or(dst, src, src)
   1116   orx(dst, src, src);
   1117 }
   1118 
   1119 
   1120 void Assembler::lbz(Register dst, const MemOperand& src) {
   1121   DCHECK(!src.ra_.is(r0));
   1122   d_form(LBZ, dst, src.ra(), src.offset(), true);
   1123 }
   1124 
   1125 
   1126 void Assembler::lbzx(Register rt, const MemOperand& src) {
   1127   Register ra = src.ra();
   1128   Register rb = src.rb();
   1129   DCHECK(!ra.is(r0));
   1130   emit(EXT2 | LBZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1131        LeaveRC);
   1132 }
   1133 
   1134 
   1135 void Assembler::lbzux(Register rt, const MemOperand& src) {
   1136   Register ra = src.ra();
   1137   Register rb = src.rb();
   1138   DCHECK(!ra.is(r0));
   1139   emit(EXT2 | LBZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1140        LeaveRC);
   1141 }
   1142 
   1143 
   1144 void Assembler::lhz(Register dst, const MemOperand& src) {
   1145   DCHECK(!src.ra_.is(r0));
   1146   d_form(LHZ, dst, src.ra(), src.offset(), true);
   1147 }
   1148 
   1149 
   1150 void Assembler::lhzx(Register rt, const MemOperand& src) {
   1151   Register ra = src.ra();
   1152   Register rb = src.rb();
   1153   DCHECK(!ra.is(r0));
   1154   emit(EXT2 | LHZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1155        LeaveRC);
   1156 }
   1157 
   1158 
   1159 void Assembler::lhzux(Register rt, const MemOperand& src) {
   1160   Register ra = src.ra();
   1161   Register rb = src.rb();
   1162   DCHECK(!ra.is(r0));
   1163   emit(EXT2 | LHZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1164        LeaveRC);
   1165 }
   1166 
   1167 
   1168 void Assembler::lhax(Register rt, const MemOperand& src) {
   1169   Register ra = src.ra();
   1170   Register rb = src.rb();
   1171   DCHECK(!ra.is(r0));
   1172   emit(EXT2 | LHAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1173 }
   1174 
   1175 
   1176 void Assembler::lwz(Register dst, const MemOperand& src) {
   1177   DCHECK(!src.ra_.is(r0));
   1178   d_form(LWZ, dst, src.ra(), src.offset(), true);
   1179 }
   1180 
   1181 
   1182 void Assembler::lwzu(Register dst, const MemOperand& src) {
   1183   DCHECK(!src.ra_.is(r0));
   1184   d_form(LWZU, dst, src.ra(), src.offset(), true);
   1185 }
   1186 
   1187 
   1188 void Assembler::lwzx(Register rt, const MemOperand& src) {
   1189   Register ra = src.ra();
   1190   Register rb = src.rb();
   1191   DCHECK(!ra.is(r0));
   1192   emit(EXT2 | LWZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1193        LeaveRC);
   1194 }
   1195 
   1196 
   1197 void Assembler::lwzux(Register rt, const MemOperand& src) {
   1198   Register ra = src.ra();
   1199   Register rb = src.rb();
   1200   DCHECK(!ra.is(r0));
   1201   emit(EXT2 | LWZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1202        LeaveRC);
   1203 }
   1204 
   1205 
   1206 void Assembler::lha(Register dst, const MemOperand& src) {
   1207   DCHECK(!src.ra_.is(r0));
   1208   d_form(LHA, dst, src.ra(), src.offset(), true);
   1209 }
   1210 
   1211 
   1212 void Assembler::lwa(Register dst, const MemOperand& src) {
   1213 #if V8_TARGET_ARCH_PPC64
   1214   int offset = src.offset();
   1215   DCHECK(!src.ra_.is(r0));
   1216   CHECK(!(offset & 3) && is_int16(offset));
   1217   offset = kImm16Mask & offset;
   1218   emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
   1219 #else
   1220   lwz(dst, src);
   1221 #endif
   1222 }
   1223 
   1224 
   1225 void Assembler::lwax(Register rt, const MemOperand& src) {
   1226 #if V8_TARGET_ARCH_PPC64
   1227   Register ra = src.ra();
   1228   Register rb = src.rb();
   1229   DCHECK(!ra.is(r0));
   1230   emit(EXT2 | LWAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1231 #else
   1232   lwzx(rt, src);
   1233 #endif
   1234 }
   1235 
   1236 
   1237 void Assembler::stb(Register dst, const MemOperand& src) {
   1238   DCHECK(!src.ra_.is(r0));
   1239   d_form(STB, dst, src.ra(), src.offset(), true);
   1240 }
   1241 
   1242 
   1243 void Assembler::stbx(Register rs, const MemOperand& src) {
   1244   Register ra = src.ra();
   1245   Register rb = src.rb();
   1246   DCHECK(!ra.is(r0));
   1247   emit(EXT2 | STBX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1248        LeaveRC);
   1249 }
   1250 
   1251 
   1252 void Assembler::stbux(Register rs, const MemOperand& src) {
   1253   Register ra = src.ra();
   1254   Register rb = src.rb();
   1255   DCHECK(!ra.is(r0));
   1256   emit(EXT2 | STBUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1257        LeaveRC);
   1258 }
   1259 
   1260 
   1261 void Assembler::sth(Register dst, const MemOperand& src) {
   1262   DCHECK(!src.ra_.is(r0));
   1263   d_form(STH, dst, src.ra(), src.offset(), true);
   1264 }
   1265 
   1266 
   1267 void Assembler::sthx(Register rs, const MemOperand& src) {
   1268   Register ra = src.ra();
   1269   Register rb = src.rb();
   1270   DCHECK(!ra.is(r0));
   1271   emit(EXT2 | STHX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1272        LeaveRC);
   1273 }
   1274 
   1275 
   1276 void Assembler::sthux(Register rs, const MemOperand& src) {
   1277   Register ra = src.ra();
   1278   Register rb = src.rb();
   1279   DCHECK(!ra.is(r0));
   1280   emit(EXT2 | STHUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1281        LeaveRC);
   1282 }
   1283 
   1284 
   1285 void Assembler::stw(Register dst, const MemOperand& src) {
   1286   DCHECK(!src.ra_.is(r0));
   1287   d_form(STW, dst, src.ra(), src.offset(), true);
   1288 }
   1289 
   1290 
   1291 void Assembler::stwu(Register dst, const MemOperand& src) {
   1292   DCHECK(!src.ra_.is(r0));
   1293   d_form(STWU, dst, src.ra(), src.offset(), true);
   1294 }
   1295 
   1296 
   1297 void Assembler::stwx(Register rs, const MemOperand& src) {
   1298   Register ra = src.ra();
   1299   Register rb = src.rb();
   1300   DCHECK(!ra.is(r0));
   1301   emit(EXT2 | STWX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1302        LeaveRC);
   1303 }
   1304 
   1305 
   1306 void Assembler::stwux(Register rs, const MemOperand& src) {
   1307   Register ra = src.ra();
   1308   Register rb = src.rb();
   1309   DCHECK(!ra.is(r0));
   1310   emit(EXT2 | STWUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1311        LeaveRC);
   1312 }
   1313 
   1314 
   1315 void Assembler::extsb(Register rs, Register ra, RCBit rc) {
   1316   emit(EXT2 | EXTSB | ra.code() * B21 | rs.code() * B16 | rc);
   1317 }
   1318 
   1319 
   1320 void Assembler::extsh(Register rs, Register ra, RCBit rc) {
   1321   emit(EXT2 | EXTSH | ra.code() * B21 | rs.code() * B16 | rc);
   1322 }
   1323 
   1324 
   1325 void Assembler::extsw(Register rs, Register ra, RCBit rc) {
   1326 #if V8_TARGET_ARCH_PPC64
   1327   emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
   1328 #else
   1329   // nop on 32-bit
   1330   DCHECK(rs.is(ra) && rc == LeaveRC);
   1331 #endif
   1332 }
   1333 
   1334 
   1335 void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
   1336   emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
   1337 }
   1338 
   1339 
   1340 void Assembler::andc(Register dst, Register src1, Register src2, RCBit rc) {
   1341   x_form(EXT2 | ANDCX, dst, src1, src2, rc);
   1342 }
   1343 
   1344 
   1345 #if V8_TARGET_ARCH_PPC64
   1346 // 64bit specific instructions
   1347 void Assembler::ld(Register rd, const MemOperand& src) {
   1348   int offset = src.offset();
   1349   DCHECK(!src.ra_.is(r0));
   1350   CHECK(!(offset & 3) && is_int16(offset));
   1351   offset = kImm16Mask & offset;
   1352   emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
   1353 }
   1354 
   1355 
   1356 void Assembler::ldx(Register rd, const MemOperand& src) {
   1357   Register ra = src.ra();
   1358   Register rb = src.rb();
   1359   DCHECK(!ra.is(r0));
   1360   emit(EXT2 | LDX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1361 }
   1362 
   1363 
   1364 void Assembler::ldu(Register rd, const MemOperand& src) {
   1365   int offset = src.offset();
   1366   DCHECK(!src.ra_.is(r0));
   1367   CHECK(!(offset & 3) && is_int16(offset));
   1368   offset = kImm16Mask & offset;
   1369   emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
   1370 }
   1371 
   1372 
   1373 void Assembler::ldux(Register rd, const MemOperand& src) {
   1374   Register ra = src.ra();
   1375   Register rb = src.rb();
   1376   DCHECK(!ra.is(r0));
   1377   emit(EXT2 | LDUX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1378 }
   1379 
   1380 
   1381 void Assembler::std(Register rs, const MemOperand& src) {
   1382   int offset = src.offset();
   1383   DCHECK(!src.ra_.is(r0));
   1384   CHECK(!(offset & 3) && is_int16(offset));
   1385   offset = kImm16Mask & offset;
   1386   emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
   1387 }
   1388 
   1389 
   1390 void Assembler::stdx(Register rs, const MemOperand& src) {
   1391   Register ra = src.ra();
   1392   Register rb = src.rb();
   1393   DCHECK(!ra.is(r0));
   1394   emit(EXT2 | STDX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1395 }
   1396 
   1397 
   1398 void Assembler::stdu(Register rs, const MemOperand& src) {
   1399   int offset = src.offset();
   1400   DCHECK(!src.ra_.is(r0));
   1401   CHECK(!(offset & 3) && is_int16(offset));
   1402   offset = kImm16Mask & offset;
   1403   emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
   1404 }
   1405 
   1406 
   1407 void Assembler::stdux(Register rs, const MemOperand& src) {
   1408   Register ra = src.ra();
   1409   Register rb = src.rb();
   1410   DCHECK(!ra.is(r0));
   1411   emit(EXT2 | STDUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1412 }
   1413 
   1414 
   1415 void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) {
   1416   md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
   1417 }
   1418 
   1419 
   1420 void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) {
   1421   md_form(EXT5 | RLDICL, ra, rs, sh, mb, r);
   1422 }
   1423 
   1424 
   1425 void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) {
   1426   mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r);
   1427 }
   1428 
   1429 
   1430 void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
   1431   md_form(EXT5 | RLDICR, ra, rs, sh, me, r);
   1432 }
   1433 
   1434 
   1435 void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
   1436   DCHECK((64 > val.imm_) && (val.imm_ >= 0));
   1437   rldicr(dst, src, val.imm_, 63 - val.imm_, rc);
   1438 }
   1439 
   1440 
   1441 void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
   1442   DCHECK((64 > val.imm_) && (val.imm_ >= 0));
   1443   rldicl(dst, src, 64 - val.imm_, val.imm_, rc);
   1444 }
   1445 
   1446 
   1447 void Assembler::clrrdi(Register dst, Register src, const Operand& val,
   1448                        RCBit rc) {
   1449   DCHECK((64 > val.imm_) && (val.imm_ >= 0));
   1450   rldicr(dst, src, 0, 63 - val.imm_, rc);
   1451 }
   1452 
   1453 
   1454 void Assembler::clrldi(Register dst, Register src, const Operand& val,
   1455                        RCBit rc) {
   1456   DCHECK((64 > val.imm_) && (val.imm_ >= 0));
   1457   rldicl(dst, src, 0, val.imm_, rc);
   1458 }
   1459 
   1460 
   1461 void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
   1462   md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r);
   1463 }
   1464 
   1465 
   1466 void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
   1467   int sh0_4 = sh & 0x1f;
   1468   int sh5 = (sh >> 5) & 0x1;
   1469 
   1470   emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
   1471        sh5 * B1 | r);
   1472 }
   1473 
   1474 
   1475 void Assembler::srd(Register dst, Register src1, Register src2, RCBit r) {
   1476   x_form(EXT2 | SRDX, dst, src1, src2, r);
   1477 }
   1478 
   1479 
   1480 void Assembler::sld(Register dst, Register src1, Register src2, RCBit r) {
   1481   x_form(EXT2 | SLDX, dst, src1, src2, r);
   1482 }
   1483 
   1484 
   1485 void Assembler::srad(Register ra, Register rs, Register rb, RCBit r) {
   1486   x_form(EXT2 | SRAD, ra, rs, rb, r);
   1487 }
   1488 
   1489 
   1490 void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
   1491   rldcl(ra, rs, rb, 0, r);
   1492 }
   1493 
   1494 
   1495 void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) {
   1496   rldicl(ra, rs, sh, 0, r);
   1497 }
   1498 
   1499 
   1500 void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) {
   1501   rldicl(ra, rs, 64 - sh, 0, r);
   1502 }
   1503 
   1504 
   1505 void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) {
   1506   x_form(EXT2 | CNTLZDX, ra, rs, r0, rc);
   1507 }
   1508 
   1509 
   1510 void Assembler::popcntd(Register ra, Register rs) {
   1511   emit(EXT2 | POPCNTD | rs.code() * B21 | ra.code() * B16);
   1512 }
   1513 
   1514 
   1515 void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
   1516                       RCBit r) {
   1517   xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
   1518 }
   1519 
   1520 
   1521 void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
   1522                      RCBit r) {
   1523   xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
   1524 }
   1525 
   1526 
   1527 void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
   1528                       RCBit r) {
   1529   xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
   1530 }
   1531 #endif
   1532 
   1533 
   1534 // Function descriptor for AIX.
   1535 // Code address skips the function descriptor "header".
   1536 // TOC and static chain are ignored and set to 0.
   1537 void Assembler::function_descriptor() {
   1538   if (ABI_USES_FUNCTION_DESCRIPTORS) {
   1539     Label instructions;
   1540     DCHECK(pc_offset() == 0);
   1541     emit_label_addr(&instructions);
   1542     dp(0);
   1543     dp(0);
   1544     bind(&instructions);
   1545   }
   1546 }
   1547 
   1548 
   1549 int Assembler::instructions_required_for_mov(Register dst,
   1550                                              const Operand& src) const {
   1551   bool canOptimize =
   1552       !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
   1553   if (use_constant_pool_for_mov(dst, src, canOptimize)) {
   1554     if (ConstantPoolAccessIsInOverflow()) {
   1555       return kMovInstructionsConstantPool + 1;
   1556     }
   1557     return kMovInstructionsConstantPool;
   1558   }
   1559   DCHECK(!canOptimize);
   1560   return kMovInstructionsNoConstantPool;
   1561 }
   1562 
   1563 
   1564 bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
   1565                                           bool canOptimize) const {
   1566   if (!FLAG_enable_embedded_constant_pool || !is_constant_pool_available()) {
   1567     // If there is no constant pool available, we must use a mov
   1568     // immediate sequence.
   1569     return false;
   1570   }
   1571 
   1572   intptr_t value = src.immediate();
   1573 #if V8_TARGET_ARCH_PPC64
   1574   bool allowOverflow = !((canOptimize && is_int32(value)) || dst.is(r0));
   1575 #else
   1576   bool allowOverflow = !(canOptimize || dst.is(r0));
   1577 #endif
   1578   if (canOptimize && is_int16(value)) {
   1579     // Prefer a single-instruction load-immediate.
   1580     return false;
   1581   }
   1582   if (!allowOverflow && ConstantPoolAccessIsInOverflow()) {
   1583     // Prefer non-relocatable two-instruction bitwise-mov32 over
   1584     // overflow sequence.
   1585     return false;
   1586   }
   1587 
   1588   return true;
   1589 }
   1590 
   1591 
   1592 void Assembler::EnsureSpaceFor(int space_needed) {
   1593   if (buffer_space() <= (kGap + space_needed)) {
   1594     GrowBuffer(space_needed);
   1595   }
   1596 }
   1597 
   1598 
   1599 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
   1600   if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
   1601     if (assembler != NULL && assembler->predictable_code_size()) return true;
   1602     return assembler->serializer_enabled();
   1603   } else if (RelocInfo::IsNone(rmode_)) {
   1604     return false;
   1605   }
   1606   return true;
   1607 }
   1608 
   1609 
   1610 // Primarily used for loading constants
   1611 // This should really move to be in macro-assembler as it
   1612 // is really a pseudo instruction
   1613 // Some usages of this intend for a FIXED_SEQUENCE to be used
   1614 // Todo - break this dependency so we can optimize mov() in general
   1615 // and only use the generic version when we require a fixed sequence
   1616 void Assembler::mov(Register dst, const Operand& src) {
   1617   intptr_t value = src.immediate();
   1618   bool relocatable = src.must_output_reloc_info(this);
   1619   bool canOptimize;
   1620 
   1621   canOptimize =
   1622       !(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
   1623 
   1624   if (use_constant_pool_for_mov(dst, src, canOptimize)) {
   1625     DCHECK(is_constant_pool_available());
   1626     if (relocatable) {
   1627       RecordRelocInfo(src.rmode_);
   1628     }
   1629     ConstantPoolEntry::Access access = ConstantPoolAddEntry(src.rmode_, value);
   1630 #if V8_TARGET_ARCH_PPC64
   1631     if (access == ConstantPoolEntry::OVERFLOWED) {
   1632       addis(dst, kConstantPoolRegister, Operand::Zero());
   1633       ld(dst, MemOperand(dst, 0));
   1634     } else {
   1635       ld(dst, MemOperand(kConstantPoolRegister, 0));
   1636     }
   1637 #else
   1638     if (access == ConstantPoolEntry::OVERFLOWED) {
   1639       addis(dst, kConstantPoolRegister, Operand::Zero());
   1640       lwz(dst, MemOperand(dst, 0));
   1641     } else {
   1642       lwz(dst, MemOperand(kConstantPoolRegister, 0));
   1643     }
   1644 #endif
   1645     return;
   1646   }
   1647 
   1648   if (canOptimize) {
   1649     if (is_int16(value)) {
   1650       li(dst, Operand(value));
   1651     } else {
   1652       uint16_t u16;
   1653 #if V8_TARGET_ARCH_PPC64
   1654       if (is_int32(value)) {
   1655 #endif
   1656         lis(dst, Operand(value >> 16));
   1657 #if V8_TARGET_ARCH_PPC64
   1658       } else {
   1659         if (is_int48(value)) {
   1660           li(dst, Operand(value >> 32));
   1661         } else {
   1662           lis(dst, Operand(value >> 48));
   1663           u16 = ((value >> 32) & 0xffff);
   1664           if (u16) {
   1665             ori(dst, dst, Operand(u16));
   1666           }
   1667         }
   1668         sldi(dst, dst, Operand(32));
   1669         u16 = ((value >> 16) & 0xffff);
   1670         if (u16) {
   1671           oris(dst, dst, Operand(u16));
   1672         }
   1673       }
   1674 #endif
   1675       u16 = (value & 0xffff);
   1676       if (u16) {
   1677         ori(dst, dst, Operand(u16));
   1678       }
   1679     }
   1680     return;
   1681   }
   1682 
   1683   DCHECK(!canOptimize);
   1684   if (relocatable) {
   1685     RecordRelocInfo(src.rmode_);
   1686   }
   1687   bitwise_mov(dst, value);
   1688 }
   1689 
   1690 
   1691 void Assembler::bitwise_mov(Register dst, intptr_t value) {
   1692     BlockTrampolinePoolScope block_trampoline_pool(this);
   1693 #if V8_TARGET_ARCH_PPC64
   1694     int32_t hi_32 = static_cast<int32_t>(value >> 32);
   1695     int32_t lo_32 = static_cast<int32_t>(value);
   1696     int hi_word = static_cast<int>(hi_32 >> 16);
   1697     int lo_word = static_cast<int>(hi_32 & 0xffff);
   1698     lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
   1699     ori(dst, dst, Operand(lo_word));
   1700     sldi(dst, dst, Operand(32));
   1701     hi_word = static_cast<int>(((lo_32 >> 16) & 0xffff));
   1702     lo_word = static_cast<int>(lo_32 & 0xffff);
   1703     oris(dst, dst, Operand(hi_word));
   1704     ori(dst, dst, Operand(lo_word));
   1705 #else
   1706     int hi_word = static_cast<int>(value >> 16);
   1707     int lo_word = static_cast<int>(value & 0xffff);
   1708     lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
   1709     ori(dst, dst, Operand(lo_word));
   1710 #endif
   1711 }
   1712 
   1713 
   1714 void Assembler::bitwise_mov32(Register dst, int32_t value) {
   1715   BlockTrampolinePoolScope block_trampoline_pool(this);
   1716   int hi_word = static_cast<int>(value >> 16);
   1717   int lo_word = static_cast<int>(value & 0xffff);
   1718   lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
   1719   ori(dst, dst, Operand(lo_word));
   1720 }
   1721 
   1722 
   1723 void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
   1724   BlockTrampolinePoolScope block_trampoline_pool(this);
   1725   if (is_int16(value)) {
   1726     addi(dst, src, Operand(value));
   1727     nop();
   1728   } else {
   1729     int hi_word = static_cast<int>(value >> 16);
   1730     int lo_word = static_cast<int>(value & 0xffff);
   1731     if (lo_word & 0x8000) hi_word++;
   1732     addis(dst, src, Operand(SIGN_EXT_IMM16(hi_word)));
   1733     addic(dst, dst, Operand(SIGN_EXT_IMM16(lo_word)));
   1734   }
   1735 }
   1736 
   1737 
   1738 void Assembler::mov_label_offset(Register dst, Label* label) {
   1739   int position = link(label);
   1740   if (label->is_bound()) {
   1741     // Load the position of the label relative to the generated code object.
   1742     mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag));
   1743   } else {
   1744     // Encode internal reference to unbound label. We use a dummy opcode
   1745     // such that it won't collide with any opcode that might appear in the
   1746     // label's chain.  Encode the destination register in the 2nd instruction.
   1747     int link = position - pc_offset();
   1748     DCHECK_EQ(0, link & 3);
   1749     link >>= 2;
   1750     DCHECK(is_int26(link));
   1751 
   1752     // When the label is bound, these instructions will be patched
   1753     // with a 2 instruction mov sequence that will load the
   1754     // destination register with the position of the label from the
   1755     // beginning of the code.
   1756     //
   1757     // target_at extracts the link and target_at_put patches the instructions.
   1758     BlockTrampolinePoolScope block_trampoline_pool(this);
   1759     emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask));
   1760     emit(dst.code());
   1761   }
   1762 }
   1763 
   1764 
   1765 void Assembler::add_label_offset(Register dst, Register base, Label* label,
   1766                                  int delta) {
   1767   int position = link(label);
   1768   if (label->is_bound()) {
   1769     // dst = base + position + delta
   1770     position += delta;
   1771     bitwise_add32(dst, base, position);
   1772   } else {
   1773     // Encode internal reference to unbound label. We use a dummy opcode
   1774     // such that it won't collide with any opcode that might appear in the
   1775     // label's chain.  Encode the operands in the 2nd instruction.
   1776     int link = position - pc_offset();
   1777     DCHECK_EQ(0, link & 3);
   1778     link >>= 2;
   1779     DCHECK(is_int26(link));
   1780     DCHECK(is_int16(delta));
   1781 
   1782     BlockTrampolinePoolScope block_trampoline_pool(this);
   1783     emit(kUnboundAddLabelOffsetOpcode | (link & kImm26Mask));
   1784     emit(dst.code() * B21 | base.code() * B16 | (delta & kImm16Mask));
   1785   }
   1786 }
   1787 
   1788 
   1789 void Assembler::mov_label_addr(Register dst, Label* label) {
   1790   CheckBuffer();
   1791   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
   1792   int position = link(label);
   1793   if (label->is_bound()) {
   1794     // Keep internal references relative until EmitRelocations.
   1795     bitwise_mov(dst, position);
   1796   } else {
   1797     // Encode internal reference to unbound label. We use a dummy opcode
   1798     // such that it won't collide with any opcode that might appear in the
   1799     // label's chain.  Encode the destination register in the 2nd instruction.
   1800     int link = position - pc_offset();
   1801     DCHECK_EQ(0, link & 3);
   1802     link >>= 2;
   1803     DCHECK(is_int26(link));
   1804 
   1805     // When the label is bound, these instructions will be patched
   1806     // with a multi-instruction mov sequence that will load the
   1807     // destination register with the address of the label.
   1808     //
   1809     // target_at extracts the link and target_at_put patches the instructions.
   1810     BlockTrampolinePoolScope block_trampoline_pool(this);
   1811     emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
   1812     emit(dst.code());
   1813     DCHECK(kMovInstructionsNoConstantPool >= 2);
   1814     for (int i = 0; i < kMovInstructionsNoConstantPool - 2; i++) nop();
   1815   }
   1816 }
   1817 
   1818 
   1819 void Assembler::emit_label_addr(Label* label) {
   1820   CheckBuffer();
   1821   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
   1822   int position = link(label);
   1823   if (label->is_bound()) {
   1824     // Keep internal references relative until EmitRelocations.
   1825     dp(position);
   1826   } else {
   1827     // Encode internal reference to unbound label. We use a dummy opcode
   1828     // such that it won't collide with any opcode that might appear in the
   1829     // label's chain.
   1830     int link = position - pc_offset();
   1831     DCHECK_EQ(0, link & 3);
   1832     link >>= 2;
   1833     DCHECK(is_int26(link));
   1834 
   1835     // When the label is bound, the instruction(s) will be patched
   1836     // as a jump table entry containing the label address.  target_at extracts
   1837     // the link and target_at_put patches the instruction(s).
   1838     BlockTrampolinePoolScope block_trampoline_pool(this);
   1839     emit(kUnboundJumpTableEntryOpcode | (link & kImm26Mask));
   1840 #if V8_TARGET_ARCH_PPC64
   1841     nop();
   1842 #endif
   1843   }
   1844 }
   1845 
   1846 
   1847 // Special register instructions
   1848 void Assembler::crxor(int bt, int ba, int bb) {
   1849   emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
   1850 }
   1851 
   1852 
   1853 void Assembler::creqv(int bt, int ba, int bb) {
   1854   emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
   1855 }
   1856 
   1857 
   1858 void Assembler::mflr(Register dst) {
   1859   emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11);  // Ignore RC bit
   1860 }
   1861 
   1862 
   1863 void Assembler::mtlr(Register src) {
   1864   emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11);  // Ignore RC bit
   1865 }
   1866 
   1867 
   1868 void Assembler::mtctr(Register src) {
   1869   emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11);  // Ignore RC bit
   1870 }
   1871 
   1872 
   1873 void Assembler::mtxer(Register src) {
   1874   emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
   1875 }
   1876 
   1877 
   1878 void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
   1879   DCHECK(static_cast<int>(bit) < 32);
   1880   int bf = cr.code();
   1881   int bfa = bit / CRWIDTH;
   1882   emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
   1883 }
   1884 
   1885 
   1886 void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
   1887 
   1888 
   1889 #if V8_TARGET_ARCH_PPC64
   1890 void Assembler::mffprd(Register dst, DoubleRegister src) {
   1891   emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
   1892 }
   1893 
   1894 
   1895 void Assembler::mffprwz(Register dst, DoubleRegister src) {
   1896   emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
   1897 }
   1898 
   1899 
   1900 void Assembler::mtfprd(DoubleRegister dst, Register src) {
   1901   emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
   1902 }
   1903 
   1904 
   1905 void Assembler::mtfprwz(DoubleRegister dst, Register src) {
   1906   emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
   1907 }
   1908 
   1909 
   1910 void Assembler::mtfprwa(DoubleRegister dst, Register src) {
   1911   emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
   1912 }
   1913 #endif
   1914 
   1915 
   1916 // Exception-generating instructions and debugging support.
   1917 // Stops with a non-negative code less than kNumOfWatchedStops support
   1918 // enabling/disabling and a counter feature. See simulator-ppc.h .
   1919 void Assembler::stop(const char* msg, Condition cond, int32_t code,
   1920                      CRegister cr) {
   1921   if (cond != al) {
   1922     Label skip;
   1923     b(NegateCondition(cond), &skip, cr);
   1924     bkpt(0);
   1925     bind(&skip);
   1926   } else {
   1927     bkpt(0);
   1928   }
   1929 }
   1930 
   1931 
   1932 void Assembler::bkpt(uint32_t imm16) { emit(0x7d821008); }
   1933 
   1934 
   1935 void Assembler::dcbf(Register ra, Register rb) {
   1936   emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
   1937 }
   1938 
   1939 
   1940 void Assembler::sync() { emit(EXT2 | SYNC); }
   1941 
   1942 
   1943 void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); }
   1944 
   1945 
   1946 void Assembler::icbi(Register ra, Register rb) {
   1947   emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
   1948 }
   1949 
   1950 
   1951 void Assembler::isync() { emit(EXT1 | ISYNC); }
   1952 
   1953 
   1954 // Floating point support
   1955 
   1956 void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
   1957   int offset = src.offset();
   1958   Register ra = src.ra();
   1959   DCHECK(!ra.is(r0));
   1960   CHECK(is_int16(offset));
   1961   int imm16 = offset & kImm16Mask;
   1962   // could be x_form instruction with some casting magic
   1963   emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
   1964 }
   1965 
   1966 
   1967 void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
   1968   int offset = src.offset();
   1969   Register ra = src.ra();
   1970   DCHECK(!ra.is(r0));
   1971   CHECK(is_int16(offset));
   1972   int imm16 = offset & kImm16Mask;
   1973   // could be x_form instruction with some casting magic
   1974   emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
   1975 }
   1976 
   1977 
   1978 void Assembler::lfdx(const DoubleRegister frt, const MemOperand& src) {
   1979   Register ra = src.ra();
   1980   Register rb = src.rb();
   1981   DCHECK(!ra.is(r0));
   1982   emit(EXT2 | LFDX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1983        LeaveRC);
   1984 }
   1985 
   1986 
   1987 void Assembler::lfdux(const DoubleRegister frt, const MemOperand& src) {
   1988   Register ra = src.ra();
   1989   Register rb = src.rb();
   1990   DCHECK(!ra.is(r0));
   1991   emit(EXT2 | LFDUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1992        LeaveRC);
   1993 }
   1994 
   1995 
   1996 void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
   1997   int offset = src.offset();
   1998   Register ra = src.ra();
   1999   CHECK(is_int16(offset));
   2000   DCHECK(!ra.is(r0));
   2001   int imm16 = offset & kImm16Mask;
   2002   // could be x_form instruction with some casting magic
   2003   emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
   2004 }
   2005 
   2006 
   2007 void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
   2008   int offset = src.offset();
   2009   Register ra = src.ra();
   2010   CHECK(is_int16(offset));
   2011   DCHECK(!ra.is(r0));
   2012   int imm16 = offset & kImm16Mask;
   2013   // could be x_form instruction with some casting magic
   2014   emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
   2015 }
   2016 
   2017 
   2018 void Assembler::lfsx(const DoubleRegister frt, const MemOperand& src) {
   2019   Register ra = src.ra();
   2020   Register rb = src.rb();
   2021   DCHECK(!ra.is(r0));
   2022   emit(EXT2 | LFSX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2023        LeaveRC);
   2024 }
   2025 
   2026 
   2027 void Assembler::lfsux(const DoubleRegister frt, const MemOperand& src) {
   2028   Register ra = src.ra();
   2029   Register rb = src.rb();
   2030   DCHECK(!ra.is(r0));
   2031   emit(EXT2 | LFSUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2032        LeaveRC);
   2033 }
   2034 
   2035 
   2036 void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
   2037   int offset = src.offset();
   2038   Register ra = src.ra();
   2039   CHECK(is_int16(offset));
   2040   DCHECK(!ra.is(r0));
   2041   int imm16 = offset & kImm16Mask;
   2042   // could be x_form instruction with some casting magic
   2043   emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
   2044 }
   2045 
   2046 
   2047 void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
   2048   int offset = src.offset();
   2049   Register ra = src.ra();
   2050   CHECK(is_int16(offset));
   2051   DCHECK(!ra.is(r0));
   2052   int imm16 = offset & kImm16Mask;
   2053   // could be x_form instruction with some casting magic
   2054   emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
   2055 }
   2056 
   2057 
   2058 void Assembler::stfdx(const DoubleRegister frs, const MemOperand& src) {
   2059   Register ra = src.ra();
   2060   Register rb = src.rb();
   2061   DCHECK(!ra.is(r0));
   2062   emit(EXT2 | STFDX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2063        LeaveRC);
   2064 }
   2065 
   2066 
   2067 void Assembler::stfdux(const DoubleRegister frs, const MemOperand& src) {
   2068   Register ra = src.ra();
   2069   Register rb = src.rb();
   2070   DCHECK(!ra.is(r0));
   2071   emit(EXT2 | STFDUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2072        LeaveRC);
   2073 }
   2074 
   2075 
   2076 void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
   2077   int offset = src.offset();
   2078   Register ra = src.ra();
   2079   CHECK(is_int16(offset));
   2080   DCHECK(!ra.is(r0));
   2081   int imm16 = offset & kImm16Mask;
   2082   // could be x_form instruction with some casting magic
   2083   emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
   2084 }
   2085 
   2086 
   2087 void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
   2088   int offset = src.offset();
   2089   Register ra = src.ra();
   2090   CHECK(is_int16(offset));
   2091   DCHECK(!ra.is(r0));
   2092   int imm16 = offset & kImm16Mask;
   2093   // could be x_form instruction with some casting magic
   2094   emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
   2095 }
   2096 
   2097 
   2098 void Assembler::stfsx(const DoubleRegister frs, const MemOperand& src) {
   2099   Register ra = src.ra();
   2100   Register rb = src.rb();
   2101   DCHECK(!ra.is(r0));
   2102   emit(EXT2 | STFSX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2103        LeaveRC);
   2104 }
   2105 
   2106 
   2107 void Assembler::stfsux(const DoubleRegister frs, const MemOperand& src) {
   2108   Register ra = src.ra();
   2109   Register rb = src.rb();
   2110   DCHECK(!ra.is(r0));
   2111   emit(EXT2 | STFSUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2112        LeaveRC);
   2113 }
   2114 
   2115 
   2116 void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra,
   2117                      const DoubleRegister frb, RCBit rc) {
   2118   a_form(EXT4 | FSUB, frt, fra, frb, rc);
   2119 }
   2120 
   2121 
   2122 void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra,
   2123                      const DoubleRegister frb, RCBit rc) {
   2124   a_form(EXT4 | FADD, frt, fra, frb, rc);
   2125 }
   2126 
   2127 
   2128 void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
   2129                      const DoubleRegister frc, RCBit rc) {
   2130   emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
   2131        rc);
   2132 }
   2133 
   2134 
   2135 void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra,
   2136                      const DoubleRegister frb, RCBit rc) {
   2137   a_form(EXT4 | FDIV, frt, fra, frb, rc);
   2138 }
   2139 
   2140 
   2141 void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb,
   2142                       CRegister cr) {
   2143   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   2144   emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
   2145 }
   2146 
   2147 
   2148 void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb,
   2149                     RCBit rc) {
   2150   emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
   2151 }
   2152 
   2153 
   2154 void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) {
   2155   emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
   2156 }
   2157 
   2158 
   2159 void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
   2160   emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
   2161 }
   2162 
   2163 
   2164 void Assembler::frin(const DoubleRegister frt, const DoubleRegister frb,
   2165                      RCBit rc) {
   2166   emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
   2167 }
   2168 
   2169 
   2170 void Assembler::friz(const DoubleRegister frt, const DoubleRegister frb,
   2171                      RCBit rc) {
   2172   emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc);
   2173 }
   2174 
   2175 
   2176 void Assembler::frip(const DoubleRegister frt, const DoubleRegister frb,
   2177                      RCBit rc) {
   2178   emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc);
   2179 }
   2180 
   2181 
   2182 void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb,
   2183                      RCBit rc) {
   2184   emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc);
   2185 }
   2186 
   2187 
   2188 void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb,
   2189                      RCBit rc) {
   2190   emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
   2191 }
   2192 
   2193 
   2194 void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
   2195                       RCBit rc) {
   2196   emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
   2197 }
   2198 
   2199 
   2200 void Assembler::fcfidu(const DoubleRegister frt, const DoubleRegister frb,
   2201                        RCBit rc) {
   2202   emit(EXT4 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
   2203 }
   2204 
   2205 
   2206 void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb,
   2207                         RCBit rc) {
   2208   emit(EXT3 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
   2209 }
   2210 
   2211 
   2212 void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb,
   2213                        RCBit rc) {
   2214   emit(EXT3 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
   2215 }
   2216 
   2217 
   2218 void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
   2219                       RCBit rc) {
   2220   emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
   2221 }
   2222 
   2223 
   2224 void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
   2225                        RCBit rc) {
   2226   emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
   2227 }
   2228 
   2229 
   2230 void Assembler::fctidu(const DoubleRegister frt, const DoubleRegister frb,
   2231                        RCBit rc) {
   2232   emit(EXT4 | FCTIDU | frt.code() * B21 | frb.code() * B11 | rc);
   2233 }
   2234 
   2235 
   2236 void Assembler::fctiduz(const DoubleRegister frt, const DoubleRegister frb,
   2237                         RCBit rc) {
   2238   emit(EXT4 | FCTIDUZ | frt.code() * B21 | frb.code() * B11 | rc);
   2239 }
   2240 
   2241 
   2242 void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
   2243                      const DoubleRegister frc, const DoubleRegister frb,
   2244                      RCBit rc) {
   2245   emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
   2246        frc.code() * B6 | rc);
   2247 }
   2248 
   2249 
   2250 void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
   2251                      RCBit rc) {
   2252   emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
   2253 }
   2254 
   2255 
   2256 void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
   2257   DCHECK(static_cast<int>(bit) < 32);
   2258   int bt = bit;
   2259   emit(EXT4 | MTFSB0 | bt * B21 | rc);
   2260 }
   2261 
   2262 
   2263 void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
   2264   DCHECK(static_cast<int>(bit) < 32);
   2265   int bt = bit;
   2266   emit(EXT4 | MTFSB1 | bt * B21 | rc);
   2267 }
   2268 
   2269 
   2270 void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
   2271   emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
   2272 }
   2273 
   2274 
   2275 void Assembler::mffs(const DoubleRegister frt, RCBit rc) {
   2276   emit(EXT4 | MFFS | frt.code() * B21 | rc);
   2277 }
   2278 
   2279 
   2280 void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W,
   2281                       RCBit rc) {
   2282   emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
   2283 }
   2284 
   2285 
   2286 void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb,
   2287                       RCBit rc) {
   2288   emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
   2289 }
   2290 
   2291 
   2292 void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb,
   2293                      RCBit rc) {
   2294   emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
   2295 }
   2296 
   2297 
   2298 void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra,
   2299                       const DoubleRegister frc, const DoubleRegister frb,
   2300                       RCBit rc) {
   2301   emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
   2302        frc.code() * B6 | rc);
   2303 }
   2304 
   2305 
   2306 void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
   2307                       const DoubleRegister frc, const DoubleRegister frb,
   2308                       RCBit rc) {
   2309   emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
   2310        frc.code() * B6 | rc);
   2311 }
   2312 
   2313 
   2314 // Pseudo instructions.
   2315 void Assembler::nop(int type) {
   2316   Register reg = r0;
   2317   switch (type) {
   2318     case NON_MARKING_NOP:
   2319       reg = r0;
   2320       break;
   2321     case GROUP_ENDING_NOP:
   2322       reg = r2;
   2323       break;
   2324     case DEBUG_BREAK_NOP:
   2325       reg = r3;
   2326       break;
   2327     default:
   2328       UNIMPLEMENTED();
   2329   }
   2330 
   2331   ori(reg, reg, Operand::Zero());
   2332 }
   2333 
   2334 
   2335 bool Assembler::IsNop(Instr instr, int type) {
   2336   int reg = 0;
   2337   switch (type) {
   2338     case NON_MARKING_NOP:
   2339       reg = 0;
   2340       break;
   2341     case GROUP_ENDING_NOP:
   2342       reg = 2;
   2343       break;
   2344     case DEBUG_BREAK_NOP:
   2345       reg = 3;
   2346       break;
   2347     default:
   2348       UNIMPLEMENTED();
   2349   }
   2350   return instr == (ORI | reg * B21 | reg * B16);
   2351 }
   2352 
   2353 
   2354 void Assembler::GrowBuffer(int needed) {
   2355   if (!own_buffer_) FATAL("external code buffer is too small");
   2356 
   2357   // Compute new buffer size.
   2358   CodeDesc desc;  // the new buffer
   2359   if (buffer_size_ < 4 * KB) {
   2360     desc.buffer_size = 4 * KB;
   2361   } else if (buffer_size_ < 1 * MB) {
   2362     desc.buffer_size = 2 * buffer_size_;
   2363   } else {
   2364     desc.buffer_size = buffer_size_ + 1 * MB;
   2365   }
   2366   int space = buffer_space() + (desc.buffer_size - buffer_size_);
   2367   if (space < needed) {
   2368     desc.buffer_size += needed - space;
   2369   }
   2370   CHECK_GT(desc.buffer_size, 0);  // no overflow
   2371 
   2372   // Set up new buffer.
   2373   desc.buffer = NewArray<byte>(desc.buffer_size);
   2374   desc.origin = this;
   2375 
   2376   desc.instr_size = pc_offset();
   2377   desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
   2378 
   2379   // Copy the data.
   2380   intptr_t pc_delta = desc.buffer - buffer_;
   2381   intptr_t rc_delta =
   2382       (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
   2383   memmove(desc.buffer, buffer_, desc.instr_size);
   2384   memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
   2385           desc.reloc_size);
   2386 
   2387   // Switch buffers.
   2388   DeleteArray(buffer_);
   2389   buffer_ = desc.buffer;
   2390   buffer_size_ = desc.buffer_size;
   2391   pc_ += pc_delta;
   2392   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
   2393                                reloc_info_writer.last_pc() + pc_delta);
   2394 
   2395   // Nothing else to do here since we keep all internal references and
   2396   // deferred relocation entries relative to the buffer (until
   2397   // EmitRelocations).
   2398 }
   2399 
   2400 
   2401 void Assembler::db(uint8_t data) {
   2402   CheckBuffer();
   2403   *reinterpret_cast<uint8_t*>(pc_) = data;
   2404   pc_ += sizeof(uint8_t);
   2405 }
   2406 
   2407 
   2408 void Assembler::dd(uint32_t data) {
   2409   CheckBuffer();
   2410   *reinterpret_cast<uint32_t*>(pc_) = data;
   2411   pc_ += sizeof(uint32_t);
   2412 }
   2413 
   2414 
   2415 void Assembler::dq(uint64_t value) {
   2416   CheckBuffer();
   2417   *reinterpret_cast<uint64_t*>(pc_) = value;
   2418   pc_ += sizeof(uint64_t);
   2419 }
   2420 
   2421 
   2422 void Assembler::dp(uintptr_t data) {
   2423   CheckBuffer();
   2424   *reinterpret_cast<uintptr_t*>(pc_) = data;
   2425   pc_ += sizeof(uintptr_t);
   2426 }
   2427 
   2428 
   2429 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
   2430   if (RelocInfo::IsNone(rmode) ||
   2431       // Don't record external references unless the heap will be serialized.
   2432       (rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
   2433        !emit_debug_code())) {
   2434     return;
   2435   }
   2436   if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
   2437     data = RecordedAstId().ToInt();
   2438     ClearRecordedAstId();
   2439   }
   2440   DeferredRelocInfo rinfo(pc_offset(), rmode, data);
   2441   relocations_.push_back(rinfo);
   2442 }
   2443 
   2444 
   2445 void Assembler::EmitRelocations() {
   2446   EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
   2447 
   2448   for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
   2449        it != relocations_.end(); it++) {
   2450     RelocInfo::Mode rmode = it->rmode();
   2451     Address pc = buffer_ + it->position();
   2452     Code* code = NULL;
   2453     RelocInfo rinfo(isolate(), pc, rmode, it->data(), code);
   2454 
   2455     // Fix up internal references now that they are guaranteed to be bound.
   2456     if (RelocInfo::IsInternalReference(rmode)) {
   2457       // Jump table entry
   2458       intptr_t pos = reinterpret_cast<intptr_t>(Memory::Address_at(pc));
   2459       Memory::Address_at(pc) = buffer_ + pos;
   2460     } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
   2461       // mov sequence
   2462       intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, code));
   2463       set_target_address_at(isolate(), pc, code, buffer_ + pos,
   2464                             SKIP_ICACHE_FLUSH);
   2465     }
   2466 
   2467     reloc_info_writer.Write(&rinfo);
   2468   }
   2469 
   2470   reloc_info_writer.Finish();
   2471 }
   2472 
   2473 
   2474 void Assembler::BlockTrampolinePoolFor(int instructions) {
   2475   BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
   2476 }
   2477 
   2478 
   2479 void Assembler::CheckTrampolinePool() {
   2480   // Some small sequences of instructions must not be broken up by the
   2481   // insertion of a trampoline pool; such sequences are protected by setting
   2482   // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
   2483   // which are both checked here. Also, recursive calls to CheckTrampolinePool
   2484   // are blocked by trampoline_pool_blocked_nesting_.
   2485   if (trampoline_pool_blocked_nesting_ > 0) return;
   2486   if (pc_offset() < no_trampoline_pool_before_) {
   2487     next_trampoline_check_ = no_trampoline_pool_before_;
   2488     return;
   2489   }
   2490 
   2491   DCHECK(!trampoline_emitted_);
   2492   if (tracked_branch_count_ > 0) {
   2493     int size = tracked_branch_count_ * kInstrSize;
   2494 
   2495     // As we are only going to emit trampoline once, we need to prevent any
   2496     // further emission.
   2497     trampoline_emitted_ = true;
   2498     next_trampoline_check_ = kMaxInt;
   2499 
   2500     // First we emit jump, then we emit trampoline pool.
   2501     b(size + kInstrSize, LeaveLK);
   2502     for (int i = size; i > 0; i -= kInstrSize) {
   2503       b(i, LeaveLK);
   2504     }
   2505 
   2506     trampoline_ = Trampoline(pc_offset() - size, tracked_branch_count_);
   2507   }
   2508 }
   2509 
   2510 
   2511 }  // namespace internal
   2512 }  // namespace v8
   2513 
   2514 #endif  // V8_TARGET_ARCH_PPC
   2515