Home | History | Annotate | Download | only in ppc
      1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
      2 // All Rights Reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions
      6 // are met:
      7 //
      8 // - Redistributions of source code must retain the above copyright notice,
      9 // this list of conditions and the following disclaimer.
     10 //
     11 // - Redistribution in binary form must reproduce the above copyright
     12 // notice, this list of conditions and the following disclaimer in the
     13 // documentation and/or other materials provided with the
     14 // distribution.
     15 //
     16 // - Neither the name of Sun Microsystems or the names of contributors may
     17 // be used to endorse or promote products derived from this software without
     18 // specific prior written permission.
     19 //
     20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
     31 // OF THE POSSIBILITY OF SUCH DAMAGE.
     32 
     33 // The original source code covered by the above license above has been
     34 // modified significantly by Google Inc.
     35 // Copyright 2014 the V8 project authors. All rights reserved.
     36 
     37 #include "src/ppc/assembler-ppc.h"
     38 
     39 #if V8_TARGET_ARCH_PPC
     40 
     41 #include "src/base/bits.h"
     42 #include "src/base/cpu.h"
     43 #include "src/macro-assembler.h"
     44 #include "src/ppc/assembler-ppc-inl.h"
     45 
     46 namespace v8 {
     47 namespace internal {
     48 
     49 // Get the CPU features enabled by the build.
     50 static unsigned CpuFeaturesImpliedByCompiler() {
     51   unsigned answer = 0;
     52   return answer;
     53 }
     54 
     55 
     56 void CpuFeatures::ProbeImpl(bool cross_compile) {
     57   supported_ |= CpuFeaturesImpliedByCompiler();
     58   icache_line_size_ = 128;
     59 
     60   // Only use statically determined features for cross compile (snapshot).
     61   if (cross_compile) return;
     62 
     63 // Detect whether frim instruction is supported (POWER5+)
     64 // For now we will just check for processors we know do not
     65 // support it
     66 #ifndef USE_SIMULATOR
     67   // Probe for additional features at runtime.
     68   base::CPU cpu;
     69   if (cpu.part() == base::CPU::PPC_POWER9) {
     70     supported_ |= (1u << MODULO);
     71   }
     72 #if V8_TARGET_ARCH_PPC64
     73   if (cpu.part() == base::CPU::PPC_POWER8) {
     74     supported_ |= (1u << FPR_GPR_MOV);
     75   }
     76 #endif
     77   if (cpu.part() == base::CPU::PPC_POWER6 ||
     78       cpu.part() == base::CPU::PPC_POWER7 ||
     79       cpu.part() == base::CPU::PPC_POWER8) {
     80     supported_ |= (1u << LWSYNC);
     81   }
     82   if (cpu.part() == base::CPU::PPC_POWER7 ||
     83       cpu.part() == base::CPU::PPC_POWER8) {
     84     supported_ |= (1u << ISELECT);
     85     supported_ |= (1u << VSX);
     86   }
     87 #if V8_OS_LINUX
     88   if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
     89     // Assume support
     90     supported_ |= (1u << FPU);
     91   }
     92   if (cpu.icache_line_size() != base::CPU::UNKNOWN_CACHE_LINE_SIZE) {
     93     icache_line_size_ = cpu.icache_line_size();
     94   }
     95 #elif V8_OS_AIX
     96   // Assume support FP support and default cache line size
     97   supported_ |= (1u << FPU);
     98 #endif
     99 #else  // Simulator
    100   supported_ |= (1u << FPU);
    101   supported_ |= (1u << LWSYNC);
    102   supported_ |= (1u << ISELECT);
    103   supported_ |= (1u << VSX);
    104   supported_ |= (1u << MODULO);
    105 #if V8_TARGET_ARCH_PPC64
    106   supported_ |= (1u << FPR_GPR_MOV);
    107 #endif
    108 #endif
    109 }
    110 
    111 
    112 void CpuFeatures::PrintTarget() {
    113   const char* ppc_arch = NULL;
    114 
    115 #if V8_TARGET_ARCH_PPC64
    116   ppc_arch = "ppc64";
    117 #else
    118   ppc_arch = "ppc";
    119 #endif
    120 
    121   printf("target %s\n", ppc_arch);
    122 }
    123 
    124 
    125 void CpuFeatures::PrintFeatures() {
    126   printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
    127 }
    128 
    129 
    130 Register ToRegister(int num) {
    131   DCHECK(num >= 0 && num < kNumRegisters);
    132   const Register kRegisters[] = {r0,  sp,  r2,  r3,  r4,  r5,  r6,  r7,
    133                                  r8,  r9,  r10, r11, ip,  r13, r14, r15,
    134                                  r16, r17, r18, r19, r20, r21, r22, r23,
    135                                  r24, r25, r26, r27, r28, r29, r30, fp};
    136   return kRegisters[num];
    137 }
    138 
    139 
    140 // -----------------------------------------------------------------------------
    141 // Implementation of RelocInfo
    142 
    143 const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE |
    144                                   1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
    145 
    146 
    147 bool RelocInfo::IsCodedSpecially() {
    148   // The deserializer needs to know whether a pointer is specially
    149   // coded.  Being specially coded on PPC means that it is a lis/ori
    150   // instruction sequence or is a constant pool entry, and these are
    151   // always the case inside code objects.
    152   return true;
    153 }
    154 
    155 
    156 bool RelocInfo::IsInConstantPool() {
    157   if (FLAG_enable_embedded_constant_pool) {
    158     Address constant_pool = host_->constant_pool();
    159     return (constant_pool && Assembler::IsConstantPoolLoadStart(pc_));
    160   }
    161   return false;
    162 }
    163 
    164 Address RelocInfo::wasm_memory_reference() {
    165   DCHECK(IsWasmMemoryReference(rmode_));
    166   return Assembler::target_address_at(pc_, host_);
    167 }
    168 
    169 uint32_t RelocInfo::wasm_memory_size_reference() {
    170   DCHECK(IsWasmMemorySizeReference(rmode_));
    171   return static_cast<uint32_t>(
    172      reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
    173 }
    174 
    175 Address RelocInfo::wasm_global_reference() {
    176   DCHECK(IsWasmGlobalReference(rmode_));
    177   return Assembler::target_address_at(pc_, host_);
    178 }
    179 
    180 uint32_t RelocInfo::wasm_function_table_size_reference() {
    181   DCHECK(IsWasmFunctionTableSizeReference(rmode_));
    182   return static_cast<uint32_t>(
    183       reinterpret_cast<intptr_t>(Assembler::target_address_at(pc_, host_)));
    184 }
    185 
    186 void RelocInfo::unchecked_update_wasm_memory_reference(
    187     Address address, ICacheFlushMode flush_mode) {
    188   Assembler::set_target_address_at(isolate_, pc_, host_, address, flush_mode);
    189 }
    190 
    191 void RelocInfo::unchecked_update_wasm_size(uint32_t size,
    192                                            ICacheFlushMode flush_mode) {
    193   Assembler::set_target_address_at(isolate_, pc_, host_,
    194                                    reinterpret_cast<Address>(size), flush_mode);
    195 }
    196 
    197 // -----------------------------------------------------------------------------
    198 // Implementation of Operand and MemOperand
    199 // See assembler-ppc-inl.h for inlined constructors
    200 
    201 Operand::Operand(Handle<Object> handle) {
    202   AllowDeferredHandleDereference using_raw_address;
    203   rm_ = no_reg;
    204   // Verify all Objects referred by code are NOT in new space.
    205   Object* obj = *handle;
    206   if (obj->IsHeapObject()) {
    207     imm_ = reinterpret_cast<intptr_t>(handle.location());
    208     rmode_ = RelocInfo::EMBEDDED_OBJECT;
    209   } else {
    210     // no relocation needed
    211     imm_ = reinterpret_cast<intptr_t>(obj);
    212     rmode_ = kRelocInfo_NONEPTR;
    213   }
    214 }
    215 
    216 
    217 MemOperand::MemOperand(Register rn, int32_t offset) {
    218   ra_ = rn;
    219   rb_ = no_reg;
    220   offset_ = offset;
    221 }
    222 
    223 
    224 MemOperand::MemOperand(Register ra, Register rb) {
    225   ra_ = ra;
    226   rb_ = rb;
    227   offset_ = 0;
    228 }
    229 
    230 
    231 // -----------------------------------------------------------------------------
    232 // Specific instructions, constants, and masks.
    233 
    234 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
    235     : AssemblerBase(isolate, buffer, buffer_size),
    236       recorded_ast_id_(TypeFeedbackId::None()),
    237       constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits) {
    238   reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
    239 
    240   no_trampoline_pool_before_ = 0;
    241   trampoline_pool_blocked_nesting_ = 0;
    242   constant_pool_entry_sharing_blocked_nesting_ = 0;
    243   next_trampoline_check_ = kMaxInt;
    244   internal_trampoline_exception_ = false;
    245   last_bound_pos_ = 0;
    246   optimizable_cmpi_pos_ = -1;
    247   trampoline_emitted_ = FLAG_force_long_branches;
    248   tracked_branch_count_ = 0;
    249   ClearRecordedAstId();
    250   relocations_.reserve(128);
    251 }
    252 
    253 
    254 void Assembler::GetCode(CodeDesc* desc) {
    255   // Emit constant pool if necessary.
    256   int constant_pool_offset = EmitConstantPool();
    257 
    258   EmitRelocations();
    259 
    260   // Set up code descriptor.
    261   desc->buffer = buffer_;
    262   desc->buffer_size = buffer_size_;
    263   desc->instr_size = pc_offset();
    264   desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
    265   desc->constant_pool_size =
    266       (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
    267   desc->origin = this;
    268   desc->unwinding_info_size = 0;
    269   desc->unwinding_info = nullptr;
    270 }
    271 
    272 
    273 void Assembler::Align(int m) {
    274   DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
    275   DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
    276   while ((pc_offset() & (m - 1)) != 0) {
    277     nop();
    278   }
    279 }
    280 
    281 
    282 void Assembler::CodeTargetAlign() { Align(8); }
    283 
    284 
    285 Condition Assembler::GetCondition(Instr instr) {
    286   switch (instr & kCondMask) {
    287     case BT:
    288       return eq;
    289     case BF:
    290       return ne;
    291     default:
    292       UNIMPLEMENTED();
    293   }
    294   return al;
    295 }
    296 
    297 
    298 bool Assembler::IsLis(Instr instr) {
    299   return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr).is(r0);
    300 }
    301 
    302 
    303 bool Assembler::IsLi(Instr instr) {
    304   return ((instr & kOpcodeMask) == ADDI) && GetRA(instr).is(r0);
    305 }
    306 
    307 
    308 bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; }
    309 
    310 
    311 bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; }
    312 
    313 
    314 bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
    315 
    316 
    317 Register Assembler::GetRA(Instr instr) {
    318   Register reg;
    319   reg.reg_code = Instruction::RAValue(instr);
    320   return reg;
    321 }
    322 
    323 
    324 Register Assembler::GetRB(Instr instr) {
    325   Register reg;
    326   reg.reg_code = Instruction::RBValue(instr);
    327   return reg;
    328 }
    329 
    330 
    331 #if V8_TARGET_ARCH_PPC64
    332 // This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori)
    333 bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
    334                                    Instr instr4, Instr instr5) {
    335   // Check the instructions are indeed a five part load (into r12)
    336   // 3d800000       lis     r12, 0
    337   // 618c0000       ori     r12, r12, 0
    338   // 798c07c6       rldicr  r12, r12, 32, 31
    339   // 658c00c3       oris    r12, r12, 195
    340   // 618ccd40       ori     r12, r12, 52544
    341   return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c) &&
    342           (instr3 == 0x798c07c6) && ((instr4 >> 16) == 0x658c) &&
    343           ((instr5 >> 16) == 0x618c));
    344 }
    345 #else
    346 // This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori)
    347 bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
    348   // Check the instruction is indeed a two part load (into r12)
    349   // 3d802553       lis     r12, 9555
    350   // 618c5000       ori   r12, r12, 20480
    351   return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c));
    352 }
    353 #endif
    354 
    355 
    356 bool Assembler::IsCmpRegister(Instr instr) {
    357   return (((instr & kOpcodeMask) == EXT2) &&
    358           ((EXT2 | (instr & kExt2OpcodeMask)) == CMP));
    359 }
    360 
    361 
    362 bool Assembler::IsRlwinm(Instr instr) {
    363   return ((instr & kOpcodeMask) == RLWINMX);
    364 }
    365 
    366 
    367 bool Assembler::IsAndi(Instr instr) { return ((instr & kOpcodeMask) == ANDIx); }
    368 
    369 
    370 #if V8_TARGET_ARCH_PPC64
    371 bool Assembler::IsRldicl(Instr instr) {
    372   return (((instr & kOpcodeMask) == EXT5) &&
    373           ((EXT5 | (instr & kExt5OpcodeMask)) == RLDICL));
    374 }
    375 #endif
    376 
    377 
    378 bool Assembler::IsCmpImmediate(Instr instr) {
    379   return ((instr & kOpcodeMask) == CMPI);
    380 }
    381 
    382 
    383 bool Assembler::IsCrSet(Instr instr) {
    384   return (((instr & kOpcodeMask) == EXT1) &&
    385           ((EXT1 | (instr & kExt1OpcodeMask)) == CREQV));
    386 }
    387 
    388 
    389 Register Assembler::GetCmpImmediateRegister(Instr instr) {
    390   DCHECK(IsCmpImmediate(instr));
    391   return GetRA(instr);
    392 }
    393 
    394 
    395 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
    396   DCHECK(IsCmpImmediate(instr));
    397   return instr & kOff16Mask;
    398 }
    399 
    400 
    401 // Labels refer to positions in the (to be) generated code.
    402 // There are bound, linked, and unused labels.
    403 //
    404 // Bound labels refer to known positions in the already
    405 // generated code. pos() is the position the label refers to.
    406 //
    407 // Linked labels refer to unknown positions in the code
    408 // to be generated; pos() is the position of the last
    409 // instruction using the label.
    410 
    411 
    412 // The link chain is terminated by a negative code position (must be aligned)
    413 const int kEndOfChain = -4;
    414 
    415 
    416 // Dummy opcodes for unbound label mov instructions or jump table entries.
    417 enum {
    418   kUnboundMovLabelOffsetOpcode = 0 << 26,
    419   kUnboundAddLabelOffsetOpcode = 1 << 26,
    420   kUnboundMovLabelAddrOpcode = 2 << 26,
    421   kUnboundJumpTableEntryOpcode = 3 << 26
    422 };
    423 
    424 
    425 int Assembler::target_at(int pos) {
    426   Instr instr = instr_at(pos);
    427   // check which type of branch this is 16 or 26 bit offset
    428   uint32_t opcode = instr & kOpcodeMask;
    429   int link;
    430   switch (opcode) {
    431     case BX:
    432       link = SIGN_EXT_IMM26(instr & kImm26Mask);
    433       link &= ~(kAAMask | kLKMask);  // discard AA|LK bits if present
    434       break;
    435     case BCX:
    436       link = SIGN_EXT_IMM16((instr & kImm16Mask));
    437       link &= ~(kAAMask | kLKMask);  // discard AA|LK bits if present
    438       break;
    439     case kUnboundMovLabelOffsetOpcode:
    440     case kUnboundAddLabelOffsetOpcode:
    441     case kUnboundMovLabelAddrOpcode:
    442     case kUnboundJumpTableEntryOpcode:
    443       link = SIGN_EXT_IMM26(instr & kImm26Mask);
    444       link <<= 2;
    445       break;
    446     default:
    447       DCHECK(false);
    448       return -1;
    449   }
    450 
    451   if (link == 0) return kEndOfChain;
    452   return pos + link;
    453 }
    454 
    455 
    456 void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
    457   Instr instr = instr_at(pos);
    458   uint32_t opcode = instr & kOpcodeMask;
    459 
    460   if (is_branch != nullptr) {
    461     *is_branch = (opcode == BX || opcode == BCX);
    462   }
    463 
    464   switch (opcode) {
    465     case BX: {
    466       int imm26 = target_pos - pos;
    467       CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
    468       if (imm26 == kInstrSize && !(instr & kLKMask)) {
    469         // Branch to next instr without link.
    470         instr = ORI;  // nop: ori, 0,0,0
    471       } else {
    472         instr &= ((~kImm26Mask) | kAAMask | kLKMask);
    473         instr |= (imm26 & kImm26Mask);
    474       }
    475       instr_at_put(pos, instr);
    476       break;
    477     }
    478     case BCX: {
    479       int imm16 = target_pos - pos;
    480       CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
    481       if (imm16 == kInstrSize && !(instr & kLKMask)) {
    482         // Branch to next instr without link.
    483         instr = ORI;  // nop: ori, 0,0,0
    484       } else {
    485         instr &= ((~kImm16Mask) | kAAMask | kLKMask);
    486         instr |= (imm16 & kImm16Mask);
    487       }
    488       instr_at_put(pos, instr);
    489       break;
    490     }
    491     case kUnboundMovLabelOffsetOpcode: {
    492       // Load the position of the label relative to the generated code object
    493       // pointer in a register.
    494       Register dst = Register::from_code(instr_at(pos + kInstrSize));
    495       int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
    496       CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
    497                           CodePatcher::DONT_FLUSH);
    498       patcher.masm()->bitwise_mov32(dst, offset);
    499       break;
    500     }
    501     case kUnboundAddLabelOffsetOpcode: {
    502       // dst = base + position + immediate
    503       Instr operands = instr_at(pos + kInstrSize);
    504       Register dst = Register::from_code((operands >> 21) & 0x1f);
    505       Register base = Register::from_code((operands >> 16) & 0x1f);
    506       int32_t offset = target_pos + SIGN_EXT_IMM16(operands & kImm16Mask);
    507       CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
    508                           CodePatcher::DONT_FLUSH);
    509       patcher.masm()->bitwise_add32(dst, base, offset);
    510       break;
    511     }
    512     case kUnboundMovLabelAddrOpcode: {
    513       // Load the address of the label in a register.
    514       Register dst = Register::from_code(instr_at(pos + kInstrSize));
    515       CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
    516                           kMovInstructionsNoConstantPool,
    517                           CodePatcher::DONT_FLUSH);
    518       // Keep internal references relative until EmitRelocations.
    519       patcher.masm()->bitwise_mov(dst, target_pos);
    520       break;
    521     }
    522     case kUnboundJumpTableEntryOpcode: {
    523       CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
    524                           kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH);
    525       // Keep internal references relative until EmitRelocations.
    526       patcher.masm()->dp(target_pos);
    527       break;
    528     }
    529     default:
    530       DCHECK(false);
    531       break;
    532   }
    533 }
    534 
    535 
    536 int Assembler::max_reach_from(int pos) {
    537   Instr instr = instr_at(pos);
    538   uint32_t opcode = instr & kOpcodeMask;
    539 
    540   // check which type of branch this is 16 or 26 bit offset
    541   switch (opcode) {
    542     case BX:
    543       return 26;
    544     case BCX:
    545       return 16;
    546     case kUnboundMovLabelOffsetOpcode:
    547     case kUnboundAddLabelOffsetOpcode:
    548     case kUnboundMovLabelAddrOpcode:
    549     case kUnboundJumpTableEntryOpcode:
    550       return 0;  // no limit on reach
    551   }
    552 
    553   DCHECK(false);
    554   return 0;
    555 }
    556 
    557 
    558 void Assembler::bind_to(Label* L, int pos) {
    559   DCHECK(0 <= pos && pos <= pc_offset());  // must have a valid binding position
    560   int32_t trampoline_pos = kInvalidSlotPos;
    561   bool is_branch = false;
    562   while (L->is_linked()) {
    563     int fixup_pos = L->pos();
    564     int32_t offset = pos - fixup_pos;
    565     int maxReach = max_reach_from(fixup_pos);
    566     next(L);  // call next before overwriting link with target at fixup_pos
    567     if (maxReach && is_intn(offset, maxReach) == false) {
    568       if (trampoline_pos == kInvalidSlotPos) {
    569         trampoline_pos = get_trampoline_entry();
    570         CHECK(trampoline_pos != kInvalidSlotPos);
    571         target_at_put(trampoline_pos, pos);
    572       }
    573       target_at_put(fixup_pos, trampoline_pos);
    574     } else {
    575       target_at_put(fixup_pos, pos, &is_branch);
    576     }
    577   }
    578   L->bind_to(pos);
    579 
    580   if (!trampoline_emitted_ && is_branch) {
    581     UntrackBranch();
    582   }
    583 
    584   // Keep track of the last bound label so we don't eliminate any instructions
    585   // before a bound label.
    586   if (pos > last_bound_pos_) last_bound_pos_ = pos;
    587 }
    588 
    589 
    590 void Assembler::bind(Label* L) {
    591   DCHECK(!L->is_bound());  // label can only be bound once
    592   bind_to(L, pc_offset());
    593 }
    594 
    595 
    596 void Assembler::next(Label* L) {
    597   DCHECK(L->is_linked());
    598   int link = target_at(L->pos());
    599   if (link == kEndOfChain) {
    600     L->Unuse();
    601   } else {
    602     DCHECK(link >= 0);
    603     L->link_to(link);
    604   }
    605 }
    606 
    607 
    608 bool Assembler::is_near(Label* L, Condition cond) {
    609   DCHECK(L->is_bound());
    610   if (L->is_bound() == false) return false;
    611 
    612   int maxReach = ((cond == al) ? 26 : 16);
    613   int offset = L->pos() - pc_offset();
    614 
    615   return is_intn(offset, maxReach);
    616 }
    617 
    618 
    619 void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
    620                        DoubleRegister frb, RCBit r) {
    621   emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r);
    622 }
    623 
    624 
    625 void Assembler::d_form(Instr instr, Register rt, Register ra,
    626                        const intptr_t val, bool signed_disp) {
    627   if (signed_disp) {
    628     if (!is_int16(val)) {
    629       PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val);
    630     }
    631     CHECK(is_int16(val));
    632   } else {
    633     if (!is_uint16(val)) {
    634       PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR
    635              ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
    636              val, val, is_uint16(val), kImm16Mask);
    637     }
    638     CHECK(is_uint16(val));
    639   }
    640   emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
    641 }
    642 
    643 
    644 void Assembler::x_form(Instr instr, Register ra, Register rs, Register rb,
    645                        RCBit r) {
    646   emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | r);
    647 }
    648 
    649 void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
    650                         OEBit o, RCBit r) {
    651   emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
    652 }
    653 
    654 void Assembler::xx3_form(Instr instr, DoubleRegister t, DoubleRegister a,
    655                          DoubleRegister b) {
    656   int AX = ((a.code() & 0x20) >> 5) & 0x1;
    657   int BX = ((b.code() & 0x20) >> 5) & 0x1;
    658   int TX = ((t.code() & 0x20) >> 5) & 0x1;
    659   emit(instr | (t.code() & 0x1F) * B21 | (a.code() & 0x1F) * B16 | (b.code()
    660        & 0x1F) * B11 | AX * B2 | BX * B1 | TX);
    661 }
    662 
    663 void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
    664                         int maskbit, RCBit r) {
    665   int sh0_4 = shift & 0x1f;
    666   int sh5 = (shift >> 5) & 0x1;
    667   int m0_4 = maskbit & 0x1f;
    668   int m5 = (maskbit >> 5) & 0x1;
    669 
    670   emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
    671        m5 * B5 | sh5 * B1 | r);
    672 }
    673 
    674 
    675 void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
    676                          int maskbit, RCBit r) {
    677   int m0_4 = maskbit & 0x1f;
    678   int m5 = (maskbit >> 5) & 0x1;
    679 
    680   emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
    681        m5 * B5 | r);
    682 }
    683 
    684 
    685 // Returns the next free trampoline entry.
    686 int32_t Assembler::get_trampoline_entry() {
    687   int32_t trampoline_entry = kInvalidSlotPos;
    688 
    689   if (!internal_trampoline_exception_) {
    690     trampoline_entry = trampoline_.take_slot();
    691 
    692     if (kInvalidSlotPos == trampoline_entry) {
    693       internal_trampoline_exception_ = true;
    694     }
    695   }
    696   return trampoline_entry;
    697 }
    698 
    699 
    700 int Assembler::link(Label* L) {
    701   int position;
    702   if (L->is_bound()) {
    703     position = L->pos();
    704   } else {
    705     if (L->is_linked()) {
    706       position = L->pos();  // L's link
    707     } else {
    708       // was: target_pos = kEndOfChain;
    709       // However, using self to mark the first reference
    710       // should avoid most instances of branch offset overflow.  See
    711       // target_at() for where this is converted back to kEndOfChain.
    712       position = pc_offset();
    713     }
    714     L->link_to(pc_offset());
    715   }
    716 
    717   return position;
    718 }
    719 
    720 
    721 // Branch instructions.
    722 
    723 
    724 void Assembler::bclr(BOfield bo, int condition_bit, LKBit lk) {
    725   emit(EXT1 | bo | condition_bit * B16 | BCLRX | lk);
    726 }
    727 
    728 
    729 void Assembler::bcctr(BOfield bo, int condition_bit, LKBit lk) {
    730   emit(EXT1 | bo | condition_bit * B16 | BCCTRX | lk);
    731 }
    732 
    733 
    734 // Pseudo op - branch to link register
    735 void Assembler::blr() { bclr(BA, 0, LeaveLK); }
    736 
    737 
    738 // Pseudo op - branch to count register -- used for "jump"
    739 void Assembler::bctr() { bcctr(BA, 0, LeaveLK); }
    740 
    741 
    742 void Assembler::bctrl() { bcctr(BA, 0, SetLK); }
    743 
    744 
    745 void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
    746   int imm16 = branch_offset;
    747   CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
    748   emit(BCX | bo | condition_bit * B16 | (imm16 & kImm16Mask) | lk);
    749 }
    750 
    751 
    752 void Assembler::b(int branch_offset, LKBit lk) {
    753   int imm26 = branch_offset;
    754   CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
    755   emit(BX | (imm26 & kImm26Mask) | lk);
    756 }
    757 
    758 
    759 void Assembler::xori(Register dst, Register src, const Operand& imm) {
    760   d_form(XORI, src, dst, imm.imm_, false);
    761 }
    762 
    763 
    764 void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
    765   d_form(XORIS, rs, ra, imm.imm_, false);
    766 }
    767 
    768 
    769 void Assembler::xor_(Register dst, Register src1, Register src2, RCBit rc) {
    770   x_form(EXT2 | XORX, dst, src1, src2, rc);
    771 }
    772 
    773 
    774 void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) {
    775   x_form(EXT2 | CNTLZWX, ra, rs, r0, rc);
    776 }
    777 
    778 
    779 void Assembler::popcntw(Register ra, Register rs) {
    780   emit(EXT2 | POPCNTW | rs.code() * B21 | ra.code() * B16);
    781 }
    782 
    783 
    784 void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) {
    785   x_form(EXT2 | ANDX, ra, rs, rb, rc);
    786 }
    787 
    788 
    789 void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
    790                        RCBit rc) {
    791   sh &= 0x1f;
    792   mb &= 0x1f;
    793   me &= 0x1f;
    794   emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
    795        me << 1 | rc);
    796 }
    797 
    798 
    799 void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
    800                       RCBit rc) {
    801   mb &= 0x1f;
    802   me &= 0x1f;
    803   emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
    804        me << 1 | rc);
    805 }
    806 
    807 
    808 void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
    809                        RCBit rc) {
    810   sh &= 0x1f;
    811   mb &= 0x1f;
    812   me &= 0x1f;
    813   emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
    814        me << 1 | rc);
    815 }
    816 
    817 
    818 void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
    819   DCHECK((32 > val.imm_) && (val.imm_ >= 0));
    820   rlwinm(dst, src, val.imm_, 0, 31 - val.imm_, rc);
    821 }
    822 
    823 
    824 void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
    825   DCHECK((32 > val.imm_) && (val.imm_ >= 0));
    826   rlwinm(dst, src, 32 - val.imm_, val.imm_, 31, rc);
    827 }
    828 
    829 
    830 void Assembler::clrrwi(Register dst, Register src, const Operand& val,
    831                        RCBit rc) {
    832   DCHECK((32 > val.imm_) && (val.imm_ >= 0));
    833   rlwinm(dst, src, 0, 0, 31 - val.imm_, rc);
    834 }
    835 
    836 
    837 void Assembler::clrlwi(Register dst, Register src, const Operand& val,
    838                        RCBit rc) {
    839   DCHECK((32 > val.imm_) && (val.imm_ >= 0));
    840   rlwinm(dst, src, 0, val.imm_, 31, rc);
    841 }
    842 
    843 
    844 void Assembler::srawi(Register ra, Register rs, int sh, RCBit r) {
    845   emit(EXT2 | SRAWIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | r);
    846 }
    847 
    848 
    849 void Assembler::srw(Register dst, Register src1, Register src2, RCBit r) {
    850   x_form(EXT2 | SRWX, dst, src1, src2, r);
    851 }
    852 
    853 
    854 void Assembler::slw(Register dst, Register src1, Register src2, RCBit r) {
    855   x_form(EXT2 | SLWX, dst, src1, src2, r);
    856 }
    857 
    858 
    859 void Assembler::sraw(Register ra, Register rs, Register rb, RCBit r) {
    860   x_form(EXT2 | SRAW, ra, rs, rb, r);
    861 }
    862 
    863 
    864 void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
    865   rlwnm(ra, rs, rb, 0, 31, r);
    866 }
    867 
    868 
    869 void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) {
    870   rlwinm(ra, rs, sh, 0, 31, r);
    871 }
    872 
    873 
    874 void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
    875   rlwinm(ra, rs, 32 - sh, 0, 31, r);
    876 }
    877 
    878 
    879 void Assembler::subi(Register dst, Register src, const Operand& imm) {
    880   addi(dst, src, Operand(-(imm.imm_)));
    881 }
    882 
    883 void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
    884                      RCBit r) {
    885   xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
    886 }
    887 
    888 void Assembler::adde(Register dst, Register src1, Register src2, OEBit o,
    889                      RCBit r) {
    890   xo_form(EXT2 | ADDEX, dst, src1, src2, o, r);
    891 }
    892 
    893 void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
    894   // a special xo_form
    895   emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r);
    896 }
    897 
    898 
    899 void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
    900                     RCBit r) {
    901   xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
    902 }
    903 
    904 void Assembler::subc(Register dst, Register src1, Register src2, OEBit o,
    905                      RCBit r) {
    906   xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
    907 }
    908 
    909 void Assembler::sube(Register dst, Register src1, Register src2, OEBit o,
    910                      RCBit r) {
    911   xo_form(EXT2 | SUBFEX, dst, src2, src1, o, r);
    912 }
    913 
    914 void Assembler::subfic(Register dst, Register src, const Operand& imm) {
    915   d_form(SUBFIC, dst, src, imm.imm_, true);
    916 }
    917 
    918 
    919 void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
    920                     RCBit r) {
    921   xo_form(EXT2 | ADDX, dst, src1, src2, o, r);
    922 }
    923 
    924 
    925 // Multiply low word
    926 void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
    927                       RCBit r) {
    928   xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
    929 }
    930 
    931 
    932 // Multiply hi word
    933 void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit r) {
    934   xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE, r);
    935 }
    936 
    937 
    938 // Multiply hi word unsigned
    939 void Assembler::mulhwu(Register dst, Register src1, Register src2, RCBit r) {
    940   xo_form(EXT2 | MULHWUX, dst, src1, src2, LeaveOE, r);
    941 }
    942 
    943 
    944 // Divide word
    945 void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
    946                      RCBit r) {
    947   xo_form(EXT2 | DIVW, dst, src1, src2, o, r);
    948 }
    949 
    950 
    951 // Divide word unsigned
    952 void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
    953                       RCBit r) {
    954   xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
    955 }
    956 
    957 void Assembler::modsw(Register rt, Register ra, Register rb) {
    958   x_form(EXT2 | MODSW, ra, rt, rb, LeaveRC);
    959 }
    960 
    961 void Assembler::moduw(Register rt, Register ra, Register rb) {
    962   x_form(EXT2 | MODUW, ra, rt, rb, LeaveRC);
    963 }
    964 
    965 void Assembler::addi(Register dst, Register src, const Operand& imm) {
    966   DCHECK(!src.is(r0));  // use li instead to show intent
    967   d_form(ADDI, dst, src, imm.imm_, true);
    968 }
    969 
    970 
    971 void Assembler::addis(Register dst, Register src, const Operand& imm) {
    972   DCHECK(!src.is(r0));  // use lis instead to show intent
    973   d_form(ADDIS, dst, src, imm.imm_, true);
    974 }
    975 
    976 
    977 void Assembler::addic(Register dst, Register src, const Operand& imm) {
    978   d_form(ADDIC, dst, src, imm.imm_, true);
    979 }
    980 
    981 
    982 void Assembler::andi(Register ra, Register rs, const Operand& imm) {
    983   d_form(ANDIx, rs, ra, imm.imm_, false);
    984 }
    985 
    986 
    987 void Assembler::andis(Register ra, Register rs, const Operand& imm) {
    988   d_form(ANDISx, rs, ra, imm.imm_, false);
    989 }
    990 
    991 
    992 void Assembler::nor(Register dst, Register src1, Register src2, RCBit r) {
    993   x_form(EXT2 | NORX, dst, src1, src2, r);
    994 }
    995 
    996 
    997 void Assembler::notx(Register dst, Register src, RCBit r) {
    998   x_form(EXT2 | NORX, dst, src, src, r);
    999 }
   1000 
   1001 
   1002 void Assembler::ori(Register ra, Register rs, const Operand& imm) {
   1003   d_form(ORI, rs, ra, imm.imm_, false);
   1004 }
   1005 
   1006 
   1007 void Assembler::oris(Register dst, Register src, const Operand& imm) {
   1008   d_form(ORIS, src, dst, imm.imm_, false);
   1009 }
   1010 
   1011 
   1012 void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) {
   1013   x_form(EXT2 | ORX, dst, src1, src2, rc);
   1014 }
   1015 
   1016 
   1017 void Assembler::orc(Register dst, Register src1, Register src2, RCBit rc) {
   1018   x_form(EXT2 | ORC, dst, src1, src2, rc);
   1019 }
   1020 
   1021 
   1022 void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
   1023   intptr_t imm16 = src2.imm_;
   1024 #if V8_TARGET_ARCH_PPC64
   1025   int L = 1;
   1026 #else
   1027   int L = 0;
   1028 #endif
   1029   DCHECK(is_int16(imm16));
   1030   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1031   imm16 &= kImm16Mask;
   1032   emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
   1033 }
   1034 
   1035 
   1036 void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
   1037   uintptr_t uimm16 = src2.imm_;
   1038 #if V8_TARGET_ARCH_PPC64
   1039   int L = 1;
   1040 #else
   1041   int L = 0;
   1042 #endif
   1043   DCHECK(is_uint16(uimm16));
   1044   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1045   uimm16 &= kImm16Mask;
   1046   emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
   1047 }
   1048 
   1049 
   1050 void Assembler::cmp(Register src1, Register src2, CRegister cr) {
   1051 #if V8_TARGET_ARCH_PPC64
   1052   int L = 1;
   1053 #else
   1054   int L = 0;
   1055 #endif
   1056   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1057   emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
   1058        src2.code() * B11);
   1059 }
   1060 
   1061 
   1062 void Assembler::cmpl(Register src1, Register src2, CRegister cr) {
   1063 #if V8_TARGET_ARCH_PPC64
   1064   int L = 1;
   1065 #else
   1066   int L = 0;
   1067 #endif
   1068   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1069   emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
   1070        src2.code() * B11);
   1071 }
   1072 
   1073 
   1074 void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
   1075   intptr_t imm16 = src2.imm_;
   1076   int L = 0;
   1077   int pos = pc_offset();
   1078   DCHECK(is_int16(imm16));
   1079   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1080   imm16 &= kImm16Mask;
   1081 
   1082   // For cmpwi against 0, save postition and cr for later examination
   1083   // of potential optimization.
   1084   if (imm16 == 0 && pos > 0 && last_bound_pos_ != pos) {
   1085     optimizable_cmpi_pos_ = pos;
   1086     cmpi_cr_ = cr;
   1087   }
   1088   emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
   1089 }
   1090 
   1091 
   1092 void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
   1093   uintptr_t uimm16 = src2.imm_;
   1094   int L = 0;
   1095   DCHECK(is_uint16(uimm16));
   1096   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1097   uimm16 &= kImm16Mask;
   1098   emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
   1099 }
   1100 
   1101 
   1102 void Assembler::cmpw(Register src1, Register src2, CRegister cr) {
   1103   int L = 0;
   1104   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1105   emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
   1106        src2.code() * B11);
   1107 }
   1108 
   1109 
   1110 void Assembler::cmplw(Register src1, Register src2, CRegister cr) {
   1111   int L = 0;
   1112   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1113   emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
   1114        src2.code() * B11);
   1115 }
   1116 
   1117 
   1118 void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
   1119   emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1120        cb * B6);
   1121 }
   1122 
   1123 
   1124 // Pseudo op - load immediate
   1125 void Assembler::li(Register dst, const Operand& imm) {
   1126   d_form(ADDI, dst, r0, imm.imm_, true);
   1127 }
   1128 
   1129 
   1130 void Assembler::lis(Register dst, const Operand& imm) {
   1131   d_form(ADDIS, dst, r0, imm.imm_, true);
   1132 }
   1133 
   1134 
   1135 // Pseudo op - move register
   1136 void Assembler::mr(Register dst, Register src) {
   1137   // actually or(dst, src, src)
   1138   orx(dst, src, src);
   1139 }
   1140 
   1141 
   1142 void Assembler::lbz(Register dst, const MemOperand& src) {
   1143   DCHECK(!src.ra_.is(r0));
   1144   d_form(LBZ, dst, src.ra(), src.offset(), true);
   1145 }
   1146 
   1147 
   1148 void Assembler::lbzx(Register rt, const MemOperand& src) {
   1149   Register ra = src.ra();
   1150   Register rb = src.rb();
   1151   DCHECK(!ra.is(r0));
   1152   emit(EXT2 | LBZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1153        LeaveRC);
   1154 }
   1155 
   1156 
   1157 void Assembler::lbzux(Register rt, const MemOperand& src) {
   1158   Register ra = src.ra();
   1159   Register rb = src.rb();
   1160   DCHECK(!ra.is(r0));
   1161   emit(EXT2 | LBZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1162        LeaveRC);
   1163 }
   1164 
   1165 
   1166 void Assembler::lhz(Register dst, const MemOperand& src) {
   1167   DCHECK(!src.ra_.is(r0));
   1168   d_form(LHZ, dst, src.ra(), src.offset(), true);
   1169 }
   1170 
   1171 
   1172 void Assembler::lhzx(Register rt, const MemOperand& src) {
   1173   Register ra = src.ra();
   1174   Register rb = src.rb();
   1175   DCHECK(!ra.is(r0));
   1176   emit(EXT2 | LHZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1177        LeaveRC);
   1178 }
   1179 
   1180 
   1181 void Assembler::lhzux(Register rt, const MemOperand& src) {
   1182   Register ra = src.ra();
   1183   Register rb = src.rb();
   1184   DCHECK(!ra.is(r0));
   1185   emit(EXT2 | LHZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1186        LeaveRC);
   1187 }
   1188 
   1189 
   1190 void Assembler::lhax(Register rt, const MemOperand& src) {
   1191   Register ra = src.ra();
   1192   Register rb = src.rb();
   1193   DCHECK(!ra.is(r0));
   1194   emit(EXT2 | LHAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1195 }
   1196 
   1197 
   1198 void Assembler::lwz(Register dst, const MemOperand& src) {
   1199   DCHECK(!src.ra_.is(r0));
   1200   d_form(LWZ, dst, src.ra(), src.offset(), true);
   1201 }
   1202 
   1203 
   1204 void Assembler::lwzu(Register dst, const MemOperand& src) {
   1205   DCHECK(!src.ra_.is(r0));
   1206   d_form(LWZU, dst, src.ra(), src.offset(), true);
   1207 }
   1208 
   1209 
   1210 void Assembler::lwzx(Register rt, const MemOperand& src) {
   1211   Register ra = src.ra();
   1212   Register rb = src.rb();
   1213   DCHECK(!ra.is(r0));
   1214   emit(EXT2 | LWZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1215        LeaveRC);
   1216 }
   1217 
   1218 
   1219 void Assembler::lwzux(Register rt, const MemOperand& src) {
   1220   Register ra = src.ra();
   1221   Register rb = src.rb();
   1222   DCHECK(!ra.is(r0));
   1223   emit(EXT2 | LWZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1224        LeaveRC);
   1225 }
   1226 
   1227 
   1228 void Assembler::lha(Register dst, const MemOperand& src) {
   1229   DCHECK(!src.ra_.is(r0));
   1230   d_form(LHA, dst, src.ra(), src.offset(), true);
   1231 }
   1232 
   1233 
   1234 void Assembler::lwa(Register dst, const MemOperand& src) {
   1235 #if V8_TARGET_ARCH_PPC64
   1236   int offset = src.offset();
   1237   DCHECK(!src.ra_.is(r0));
   1238   CHECK(!(offset & 3) && is_int16(offset));
   1239   offset = kImm16Mask & offset;
   1240   emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
   1241 #else
   1242   lwz(dst, src);
   1243 #endif
   1244 }
   1245 
   1246 
   1247 void Assembler::lwax(Register rt, const MemOperand& src) {
   1248 #if V8_TARGET_ARCH_PPC64
   1249   Register ra = src.ra();
   1250   Register rb = src.rb();
   1251   DCHECK(!ra.is(r0));
   1252   emit(EXT2 | LWAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1253 #else
   1254   lwzx(rt, src);
   1255 #endif
   1256 }
   1257 
   1258 
   1259 void Assembler::ldbrx(Register dst, const MemOperand& src) {
   1260   x_form(EXT2 | LDBRX, src.ra(), dst, src.rb(), LeaveRC);
   1261 }
   1262 
   1263 
   1264 void Assembler::lwbrx(Register dst, const MemOperand& src) {
   1265   x_form(EXT2 | LWBRX, src.ra(), dst, src.rb(), LeaveRC);
   1266 }
   1267 
   1268 
   1269 void Assembler::lhbrx(Register dst, const MemOperand& src) {
   1270   x_form(EXT2 | LHBRX, src.ra(), dst, src.rb(), LeaveRC);
   1271 }
   1272 
   1273 
   1274 void Assembler::stb(Register dst, const MemOperand& src) {
   1275   DCHECK(!src.ra_.is(r0));
   1276   d_form(STB, dst, src.ra(), src.offset(), true);
   1277 }
   1278 
   1279 
   1280 void Assembler::stbx(Register rs, const MemOperand& src) {
   1281   Register ra = src.ra();
   1282   Register rb = src.rb();
   1283   DCHECK(!ra.is(r0));
   1284   emit(EXT2 | STBX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1285        LeaveRC);
   1286 }
   1287 
   1288 
   1289 void Assembler::stbux(Register rs, const MemOperand& src) {
   1290   Register ra = src.ra();
   1291   Register rb = src.rb();
   1292   DCHECK(!ra.is(r0));
   1293   emit(EXT2 | STBUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1294        LeaveRC);
   1295 }
   1296 
   1297 
   1298 void Assembler::sth(Register dst, const MemOperand& src) {
   1299   DCHECK(!src.ra_.is(r0));
   1300   d_form(STH, dst, src.ra(), src.offset(), true);
   1301 }
   1302 
   1303 
   1304 void Assembler::sthx(Register rs, const MemOperand& src) {
   1305   Register ra = src.ra();
   1306   Register rb = src.rb();
   1307   DCHECK(!ra.is(r0));
   1308   emit(EXT2 | STHX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1309        LeaveRC);
   1310 }
   1311 
   1312 
   1313 void Assembler::sthux(Register rs, const MemOperand& src) {
   1314   Register ra = src.ra();
   1315   Register rb = src.rb();
   1316   DCHECK(!ra.is(r0));
   1317   emit(EXT2 | STHUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1318        LeaveRC);
   1319 }
   1320 
   1321 
   1322 void Assembler::stw(Register dst, const MemOperand& src) {
   1323   DCHECK(!src.ra_.is(r0));
   1324   d_form(STW, dst, src.ra(), src.offset(), true);
   1325 }
   1326 
   1327 
   1328 void Assembler::stwu(Register dst, const MemOperand& src) {
   1329   DCHECK(!src.ra_.is(r0));
   1330   d_form(STWU, dst, src.ra(), src.offset(), true);
   1331 }
   1332 
   1333 
   1334 void Assembler::stwx(Register rs, const MemOperand& src) {
   1335   Register ra = src.ra();
   1336   Register rb = src.rb();
   1337   DCHECK(!ra.is(r0));
   1338   emit(EXT2 | STWX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1339        LeaveRC);
   1340 }
   1341 
   1342 
   1343 void Assembler::stwux(Register rs, const MemOperand& src) {
   1344   Register ra = src.ra();
   1345   Register rb = src.rb();
   1346   DCHECK(!ra.is(r0));
   1347   emit(EXT2 | STWUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1348        LeaveRC);
   1349 }
   1350 
   1351 
   1352 void Assembler::extsb(Register rs, Register ra, RCBit rc) {
   1353   emit(EXT2 | EXTSB | ra.code() * B21 | rs.code() * B16 | rc);
   1354 }
   1355 
   1356 
   1357 void Assembler::extsh(Register rs, Register ra, RCBit rc) {
   1358   emit(EXT2 | EXTSH | ra.code() * B21 | rs.code() * B16 | rc);
   1359 }
   1360 
   1361 
   1362 void Assembler::extsw(Register rs, Register ra, RCBit rc) {
   1363 #if V8_TARGET_ARCH_PPC64
   1364   emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
   1365 #else
   1366   // nop on 32-bit
   1367   DCHECK(rs.is(ra) && rc == LeaveRC);
   1368 #endif
   1369 }
   1370 
   1371 
   1372 void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
   1373   emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
   1374 }
   1375 
   1376 
   1377 void Assembler::andc(Register dst, Register src1, Register src2, RCBit rc) {
   1378   x_form(EXT2 | ANDCX, dst, src1, src2, rc);
   1379 }
   1380 
   1381 
   1382 #if V8_TARGET_ARCH_PPC64
   1383 // 64bit specific instructions
   1384 void Assembler::ld(Register rd, const MemOperand& src) {
   1385   int offset = src.offset();
   1386   DCHECK(!src.ra_.is(r0));
   1387   CHECK(!(offset & 3) && is_int16(offset));
   1388   offset = kImm16Mask & offset;
   1389   emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
   1390 }
   1391 
   1392 
   1393 void Assembler::ldx(Register rd, const MemOperand& src) {
   1394   Register ra = src.ra();
   1395   Register rb = src.rb();
   1396   DCHECK(!ra.is(r0));
   1397   emit(EXT2 | LDX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1398 }
   1399 
   1400 
   1401 void Assembler::ldu(Register rd, const MemOperand& src) {
   1402   int offset = src.offset();
   1403   DCHECK(!src.ra_.is(r0));
   1404   CHECK(!(offset & 3) && is_int16(offset));
   1405   offset = kImm16Mask & offset;
   1406   emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
   1407 }
   1408 
   1409 
   1410 void Assembler::ldux(Register rd, const MemOperand& src) {
   1411   Register ra = src.ra();
   1412   Register rb = src.rb();
   1413   DCHECK(!ra.is(r0));
   1414   emit(EXT2 | LDUX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1415 }
   1416 
   1417 
   1418 void Assembler::std(Register rs, const MemOperand& src) {
   1419   int offset = src.offset();
   1420   DCHECK(!src.ra_.is(r0));
   1421   CHECK(!(offset & 3) && is_int16(offset));
   1422   offset = kImm16Mask & offset;
   1423   emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
   1424 }
   1425 
   1426 
   1427 void Assembler::stdx(Register rs, const MemOperand& src) {
   1428   Register ra = src.ra();
   1429   Register rb = src.rb();
   1430   DCHECK(!ra.is(r0));
   1431   emit(EXT2 | STDX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1432 }
   1433 
   1434 
   1435 void Assembler::stdu(Register rs, const MemOperand& src) {
   1436   int offset = src.offset();
   1437   DCHECK(!src.ra_.is(r0));
   1438   CHECK(!(offset & 3) && is_int16(offset));
   1439   offset = kImm16Mask & offset;
   1440   emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
   1441 }
   1442 
   1443 
   1444 void Assembler::stdux(Register rs, const MemOperand& src) {
   1445   Register ra = src.ra();
   1446   Register rb = src.rb();
   1447   DCHECK(!ra.is(r0));
   1448   emit(EXT2 | STDUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1449 }
   1450 
   1451 
   1452 void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) {
   1453   md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
   1454 }
   1455 
   1456 
   1457 void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) {
   1458   md_form(EXT5 | RLDICL, ra, rs, sh, mb, r);
   1459 }
   1460 
   1461 
   1462 void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) {
   1463   mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r);
   1464 }
   1465 
   1466 
   1467 void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
   1468   md_form(EXT5 | RLDICR, ra, rs, sh, me, r);
   1469 }
   1470 
   1471 
   1472 void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
   1473   DCHECK((64 > val.imm_) && (val.imm_ >= 0));
   1474   rldicr(dst, src, val.imm_, 63 - val.imm_, rc);
   1475 }
   1476 
   1477 
   1478 void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
   1479   DCHECK((64 > val.imm_) && (val.imm_ >= 0));
   1480   rldicl(dst, src, 64 - val.imm_, val.imm_, rc);
   1481 }
   1482 
   1483 
   1484 void Assembler::clrrdi(Register dst, Register src, const Operand& val,
   1485                        RCBit rc) {
   1486   DCHECK((64 > val.imm_) && (val.imm_ >= 0));
   1487   rldicr(dst, src, 0, 63 - val.imm_, rc);
   1488 }
   1489 
   1490 
   1491 void Assembler::clrldi(Register dst, Register src, const Operand& val,
   1492                        RCBit rc) {
   1493   DCHECK((64 > val.imm_) && (val.imm_ >= 0));
   1494   rldicl(dst, src, 0, val.imm_, rc);
   1495 }
   1496 
   1497 
   1498 void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
   1499   md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r);
   1500 }
   1501 
   1502 
   1503 void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
   1504   int sh0_4 = sh & 0x1f;
   1505   int sh5 = (sh >> 5) & 0x1;
   1506 
   1507   emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
   1508        sh5 * B1 | r);
   1509 }
   1510 
   1511 
   1512 void Assembler::srd(Register dst, Register src1, Register src2, RCBit r) {
   1513   x_form(EXT2 | SRDX, dst, src1, src2, r);
   1514 }
   1515 
   1516 
   1517 void Assembler::sld(Register dst, Register src1, Register src2, RCBit r) {
   1518   x_form(EXT2 | SLDX, dst, src1, src2, r);
   1519 }
   1520 
   1521 
   1522 void Assembler::srad(Register ra, Register rs, Register rb, RCBit r) {
   1523   x_form(EXT2 | SRAD, ra, rs, rb, r);
   1524 }
   1525 
   1526 
   1527 void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
   1528   rldcl(ra, rs, rb, 0, r);
   1529 }
   1530 
   1531 
   1532 void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) {
   1533   rldicl(ra, rs, sh, 0, r);
   1534 }
   1535 
   1536 
   1537 void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) {
   1538   rldicl(ra, rs, 64 - sh, 0, r);
   1539 }
   1540 
   1541 
   1542 void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) {
   1543   x_form(EXT2 | CNTLZDX, ra, rs, r0, rc);
   1544 }
   1545 
   1546 
   1547 void Assembler::popcntd(Register ra, Register rs) {
   1548   emit(EXT2 | POPCNTD | rs.code() * B21 | ra.code() * B16);
   1549 }
   1550 
   1551 
   1552 void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
   1553                       RCBit r) {
   1554   xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
   1555 }
   1556 
   1557 
   1558 void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
   1559                      RCBit r) {
   1560   xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
   1561 }
   1562 
   1563 
   1564 void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
   1565                       RCBit r) {
   1566   xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
   1567 }
   1568 
   1569 void Assembler::modsd(Register rt, Register ra, Register rb) {
   1570   x_form(EXT2 | MODSD, ra, rt, rb, LeaveRC);
   1571 }
   1572 
   1573 void Assembler::modud(Register rt, Register ra, Register rb) {
   1574   x_form(EXT2 | MODUD, ra, rt, rb, LeaveRC);
   1575 }
   1576 #endif
   1577 
   1578 
   1579 // Function descriptor for AIX.
   1580 // Code address skips the function descriptor "header".
   1581 // TOC and static chain are ignored and set to 0.
   1582 void Assembler::function_descriptor() {
   1583   if (ABI_USES_FUNCTION_DESCRIPTORS) {
   1584     Label instructions;
   1585     DCHECK(pc_offset() == 0);
   1586     emit_label_addr(&instructions);
   1587     dp(0);
   1588     dp(0);
   1589     bind(&instructions);
   1590   }
   1591 }
   1592 
   1593 
   1594 int Assembler::instructions_required_for_mov(Register dst,
   1595                                              const Operand& src) const {
   1596   bool canOptimize =
   1597       !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
   1598   if (use_constant_pool_for_mov(dst, src, canOptimize)) {
   1599     if (ConstantPoolAccessIsInOverflow()) {
   1600       return kMovInstructionsConstantPool + 1;
   1601     }
   1602     return kMovInstructionsConstantPool;
   1603   }
   1604   DCHECK(!canOptimize);
   1605   return kMovInstructionsNoConstantPool;
   1606 }
   1607 
   1608 
   1609 bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
   1610                                           bool canOptimize) const {
   1611   if (!FLAG_enable_embedded_constant_pool || !is_constant_pool_available()) {
   1612     // If there is no constant pool available, we must use a mov
   1613     // immediate sequence.
   1614     return false;
   1615   }
   1616 
   1617   intptr_t value = src.immediate();
   1618 #if V8_TARGET_ARCH_PPC64
   1619   bool allowOverflow = !((canOptimize && is_int32(value)) || dst.is(r0));
   1620 #else
   1621   bool allowOverflow = !(canOptimize || dst.is(r0));
   1622 #endif
   1623   if (canOptimize && is_int16(value)) {
   1624     // Prefer a single-instruction load-immediate.
   1625     return false;
   1626   }
   1627   if (!allowOverflow && ConstantPoolAccessIsInOverflow()) {
   1628     // Prefer non-relocatable two-instruction bitwise-mov32 over
   1629     // overflow sequence.
   1630     return false;
   1631   }
   1632 
   1633   return true;
   1634 }
   1635 
   1636 
   1637 void Assembler::EnsureSpaceFor(int space_needed) {
   1638   if (buffer_space() <= (kGap + space_needed)) {
   1639     GrowBuffer(space_needed);
   1640   }
   1641 }
   1642 
   1643 
   1644 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
   1645   if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
   1646     if (assembler != NULL && assembler->predictable_code_size()) return true;
   1647     return assembler->serializer_enabled();
   1648   } else if (RelocInfo::IsNone(rmode_)) {
   1649     return false;
   1650   }
   1651   return true;
   1652 }
   1653 
   1654 
   1655 // Primarily used for loading constants
   1656 // This should really move to be in macro-assembler as it
   1657 // is really a pseudo instruction
   1658 // Some usages of this intend for a FIXED_SEQUENCE to be used
   1659 // Todo - break this dependency so we can optimize mov() in general
   1660 // and only use the generic version when we require a fixed sequence
   1661 void Assembler::mov(Register dst, const Operand& src) {
   1662   intptr_t value = src.immediate();
   1663   bool relocatable = src.must_output_reloc_info(this);
   1664   bool canOptimize;
   1665 
   1666   canOptimize =
   1667       !(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
   1668 
   1669   if (use_constant_pool_for_mov(dst, src, canOptimize)) {
   1670     DCHECK(is_constant_pool_available());
   1671     if (relocatable) {
   1672       RecordRelocInfo(src.rmode_);
   1673     }
   1674     ConstantPoolEntry::Access access = ConstantPoolAddEntry(src.rmode_, value);
   1675 #if V8_TARGET_ARCH_PPC64
   1676     if (access == ConstantPoolEntry::OVERFLOWED) {
   1677       addis(dst, kConstantPoolRegister, Operand::Zero());
   1678       ld(dst, MemOperand(dst, 0));
   1679     } else {
   1680       ld(dst, MemOperand(kConstantPoolRegister, 0));
   1681     }
   1682 #else
   1683     if (access == ConstantPoolEntry::OVERFLOWED) {
   1684       addis(dst, kConstantPoolRegister, Operand::Zero());
   1685       lwz(dst, MemOperand(dst, 0));
   1686     } else {
   1687       lwz(dst, MemOperand(kConstantPoolRegister, 0));
   1688     }
   1689 #endif
   1690     return;
   1691   }
   1692 
   1693   if (canOptimize) {
   1694     if (is_int16(value)) {
   1695       li(dst, Operand(value));
   1696     } else {
   1697       uint16_t u16;
   1698 #if V8_TARGET_ARCH_PPC64
   1699       if (is_int32(value)) {
   1700 #endif
   1701         lis(dst, Operand(value >> 16));
   1702 #if V8_TARGET_ARCH_PPC64
   1703       } else {
   1704         if (is_int48(value)) {
   1705           li(dst, Operand(value >> 32));
   1706         } else {
   1707           lis(dst, Operand(value >> 48));
   1708           u16 = ((value >> 32) & 0xffff);
   1709           if (u16) {
   1710             ori(dst, dst, Operand(u16));
   1711           }
   1712         }
   1713         sldi(dst, dst, Operand(32));
   1714         u16 = ((value >> 16) & 0xffff);
   1715         if (u16) {
   1716           oris(dst, dst, Operand(u16));
   1717         }
   1718       }
   1719 #endif
   1720       u16 = (value & 0xffff);
   1721       if (u16) {
   1722         ori(dst, dst, Operand(u16));
   1723       }
   1724     }
   1725     return;
   1726   }
   1727 
   1728   DCHECK(!canOptimize);
   1729   if (relocatable) {
   1730     RecordRelocInfo(src.rmode_);
   1731   }
   1732   bitwise_mov(dst, value);
   1733 }
   1734 
   1735 
   1736 void Assembler::bitwise_mov(Register dst, intptr_t value) {
   1737     BlockTrampolinePoolScope block_trampoline_pool(this);
   1738 #if V8_TARGET_ARCH_PPC64
   1739     int32_t hi_32 = static_cast<int32_t>(value >> 32);
   1740     int32_t lo_32 = static_cast<int32_t>(value);
   1741     int hi_word = static_cast<int>(hi_32 >> 16);
   1742     int lo_word = static_cast<int>(hi_32 & 0xffff);
   1743     lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
   1744     ori(dst, dst, Operand(lo_word));
   1745     sldi(dst, dst, Operand(32));
   1746     hi_word = static_cast<int>(((lo_32 >> 16) & 0xffff));
   1747     lo_word = static_cast<int>(lo_32 & 0xffff);
   1748     oris(dst, dst, Operand(hi_word));
   1749     ori(dst, dst, Operand(lo_word));
   1750 #else
   1751     int hi_word = static_cast<int>(value >> 16);
   1752     int lo_word = static_cast<int>(value & 0xffff);
   1753     lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
   1754     ori(dst, dst, Operand(lo_word));
   1755 #endif
   1756 }
   1757 
   1758 
   1759 void Assembler::bitwise_mov32(Register dst, int32_t value) {
   1760   BlockTrampolinePoolScope block_trampoline_pool(this);
   1761   int hi_word = static_cast<int>(value >> 16);
   1762   int lo_word = static_cast<int>(value & 0xffff);
   1763   lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
   1764   ori(dst, dst, Operand(lo_word));
   1765 }
   1766 
   1767 
   1768 void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
   1769   BlockTrampolinePoolScope block_trampoline_pool(this);
   1770   if (is_int16(value)) {
   1771     addi(dst, src, Operand(value));
   1772     nop();
   1773   } else {
   1774     int hi_word = static_cast<int>(value >> 16);
   1775     int lo_word = static_cast<int>(value & 0xffff);
   1776     if (lo_word & 0x8000) hi_word++;
   1777     addis(dst, src, Operand(SIGN_EXT_IMM16(hi_word)));
   1778     addic(dst, dst, Operand(SIGN_EXT_IMM16(lo_word)));
   1779   }
   1780 }
   1781 
   1782 
   1783 void Assembler::mov_label_offset(Register dst, Label* label) {
   1784   int position = link(label);
   1785   if (label->is_bound()) {
   1786     // Load the position of the label relative to the generated code object.
   1787     mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag));
   1788   } else {
   1789     // Encode internal reference to unbound label. We use a dummy opcode
   1790     // such that it won't collide with any opcode that might appear in the
   1791     // label's chain.  Encode the destination register in the 2nd instruction.
   1792     int link = position - pc_offset();
   1793     DCHECK_EQ(0, link & 3);
   1794     link >>= 2;
   1795     DCHECK(is_int26(link));
   1796 
   1797     // When the label is bound, these instructions will be patched
   1798     // with a 2 instruction mov sequence that will load the
   1799     // destination register with the position of the label from the
   1800     // beginning of the code.
   1801     //
   1802     // target_at extracts the link and target_at_put patches the instructions.
   1803     BlockTrampolinePoolScope block_trampoline_pool(this);
   1804     emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask));
   1805     emit(dst.code());
   1806   }
   1807 }
   1808 
   1809 
   1810 void Assembler::add_label_offset(Register dst, Register base, Label* label,
   1811                                  int delta) {
   1812   int position = link(label);
   1813   if (label->is_bound()) {
   1814     // dst = base + position + delta
   1815     position += delta;
   1816     bitwise_add32(dst, base, position);
   1817   } else {
   1818     // Encode internal reference to unbound label. We use a dummy opcode
   1819     // such that it won't collide with any opcode that might appear in the
   1820     // label's chain.  Encode the operands in the 2nd instruction.
   1821     int link = position - pc_offset();
   1822     DCHECK_EQ(0, link & 3);
   1823     link >>= 2;
   1824     DCHECK(is_int26(link));
   1825     DCHECK(is_int16(delta));
   1826 
   1827     BlockTrampolinePoolScope block_trampoline_pool(this);
   1828     emit(kUnboundAddLabelOffsetOpcode | (link & kImm26Mask));
   1829     emit(dst.code() * B21 | base.code() * B16 | (delta & kImm16Mask));
   1830   }
   1831 }
   1832 
   1833 
   1834 void Assembler::mov_label_addr(Register dst, Label* label) {
   1835   CheckBuffer();
   1836   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
   1837   int position = link(label);
   1838   if (label->is_bound()) {
   1839     // Keep internal references relative until EmitRelocations.
   1840     bitwise_mov(dst, position);
   1841   } else {
   1842     // Encode internal reference to unbound label. We use a dummy opcode
   1843     // such that it won't collide with any opcode that might appear in the
   1844     // label's chain.  Encode the destination register in the 2nd instruction.
   1845     int link = position - pc_offset();
   1846     DCHECK_EQ(0, link & 3);
   1847     link >>= 2;
   1848     DCHECK(is_int26(link));
   1849 
   1850     // When the label is bound, these instructions will be patched
   1851     // with a multi-instruction mov sequence that will load the
   1852     // destination register with the address of the label.
   1853     //
   1854     // target_at extracts the link and target_at_put patches the instructions.
   1855     BlockTrampolinePoolScope block_trampoline_pool(this);
   1856     emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
   1857     emit(dst.code());
   1858     DCHECK(kMovInstructionsNoConstantPool >= 2);
   1859     for (int i = 0; i < kMovInstructionsNoConstantPool - 2; i++) nop();
   1860   }
   1861 }
   1862 
   1863 
   1864 void Assembler::emit_label_addr(Label* label) {
   1865   CheckBuffer();
   1866   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
   1867   int position = link(label);
   1868   if (label->is_bound()) {
   1869     // Keep internal references relative until EmitRelocations.
   1870     dp(position);
   1871   } else {
   1872     // Encode internal reference to unbound label. We use a dummy opcode
   1873     // such that it won't collide with any opcode that might appear in the
   1874     // label's chain.
   1875     int link = position - pc_offset();
   1876     DCHECK_EQ(0, link & 3);
   1877     link >>= 2;
   1878     DCHECK(is_int26(link));
   1879 
   1880     // When the label is bound, the instruction(s) will be patched
   1881     // as a jump table entry containing the label address.  target_at extracts
   1882     // the link and target_at_put patches the instruction(s).
   1883     BlockTrampolinePoolScope block_trampoline_pool(this);
   1884     emit(kUnboundJumpTableEntryOpcode | (link & kImm26Mask));
   1885 #if V8_TARGET_ARCH_PPC64
   1886     nop();
   1887 #endif
   1888   }
   1889 }
   1890 
   1891 
   1892 // Special register instructions
   1893 void Assembler::crxor(int bt, int ba, int bb) {
   1894   emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
   1895 }
   1896 
   1897 
   1898 void Assembler::creqv(int bt, int ba, int bb) {
   1899   emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
   1900 }
   1901 
   1902 
   1903 void Assembler::mflr(Register dst) {
   1904   emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11);  // Ignore RC bit
   1905 }
   1906 
   1907 
   1908 void Assembler::mtlr(Register src) {
   1909   emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11);  // Ignore RC bit
   1910 }
   1911 
   1912 
   1913 void Assembler::mtctr(Register src) {
   1914   emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11);  // Ignore RC bit
   1915 }
   1916 
   1917 
   1918 void Assembler::mtxer(Register src) {
   1919   emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
   1920 }
   1921 
   1922 
   1923 void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
   1924   DCHECK(static_cast<int>(bit) < 32);
   1925   int bf = cr.code();
   1926   int bfa = bit / CRWIDTH;
   1927   emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
   1928 }
   1929 
   1930 
   1931 void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
   1932 
   1933 
   1934 #if V8_TARGET_ARCH_PPC64
   1935 void Assembler::mffprd(Register dst, DoubleRegister src) {
   1936   emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
   1937 }
   1938 
   1939 
   1940 void Assembler::mffprwz(Register dst, DoubleRegister src) {
   1941   emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
   1942 }
   1943 
   1944 
   1945 void Assembler::mtfprd(DoubleRegister dst, Register src) {
   1946   emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
   1947 }
   1948 
   1949 
   1950 void Assembler::mtfprwz(DoubleRegister dst, Register src) {
   1951   emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
   1952 }
   1953 
   1954 
   1955 void Assembler::mtfprwa(DoubleRegister dst, Register src) {
   1956   emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
   1957 }
   1958 #endif
   1959 
   1960 
   1961 // Exception-generating instructions and debugging support.
   1962 // Stops with a non-negative code less than kNumOfWatchedStops support
   1963 // enabling/disabling and a counter feature. See simulator-ppc.h .
   1964 void Assembler::stop(const char* msg, Condition cond, int32_t code,
   1965                      CRegister cr) {
   1966   if (cond != al) {
   1967     Label skip;
   1968     b(NegateCondition(cond), &skip, cr);
   1969     bkpt(0);
   1970     bind(&skip);
   1971   } else {
   1972     bkpt(0);
   1973   }
   1974 }
   1975 
   1976 
   1977 void Assembler::bkpt(uint32_t imm16) { emit(0x7d821008); }
   1978 
   1979 
   1980 void Assembler::dcbf(Register ra, Register rb) {
   1981   emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
   1982 }
   1983 
   1984 
   1985 void Assembler::sync() { emit(EXT2 | SYNC); }
   1986 
   1987 
   1988 void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); }
   1989 
   1990 
   1991 void Assembler::icbi(Register ra, Register rb) {
   1992   emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
   1993 }
   1994 
   1995 
   1996 void Assembler::isync() { emit(EXT1 | ISYNC); }
   1997 
   1998 
   1999 // Floating point support
   2000 
   2001 void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
   2002   int offset = src.offset();
   2003   Register ra = src.ra();
   2004   DCHECK(!ra.is(r0));
   2005   CHECK(is_int16(offset));
   2006   int imm16 = offset & kImm16Mask;
   2007   // could be x_form instruction with some casting magic
   2008   emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
   2009 }
   2010 
   2011 
   2012 void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
   2013   int offset = src.offset();
   2014   Register ra = src.ra();
   2015   DCHECK(!ra.is(r0));
   2016   CHECK(is_int16(offset));
   2017   int imm16 = offset & kImm16Mask;
   2018   // could be x_form instruction with some casting magic
   2019   emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
   2020 }
   2021 
   2022 
   2023 void Assembler::lfdx(const DoubleRegister frt, const MemOperand& src) {
   2024   Register ra = src.ra();
   2025   Register rb = src.rb();
   2026   DCHECK(!ra.is(r0));
   2027   emit(EXT2 | LFDX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2028        LeaveRC);
   2029 }
   2030 
   2031 
   2032 void Assembler::lfdux(const DoubleRegister frt, const MemOperand& src) {
   2033   Register ra = src.ra();
   2034   Register rb = src.rb();
   2035   DCHECK(!ra.is(r0));
   2036   emit(EXT2 | LFDUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2037        LeaveRC);
   2038 }
   2039 
   2040 
   2041 void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
   2042   int offset = src.offset();
   2043   Register ra = src.ra();
   2044   CHECK(is_int16(offset));
   2045   DCHECK(!ra.is(r0));
   2046   int imm16 = offset & kImm16Mask;
   2047   // could be x_form instruction with some casting magic
   2048   emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
   2049 }
   2050 
   2051 
   2052 void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
   2053   int offset = src.offset();
   2054   Register ra = src.ra();
   2055   CHECK(is_int16(offset));
   2056   DCHECK(!ra.is(r0));
   2057   int imm16 = offset & kImm16Mask;
   2058   // could be x_form instruction with some casting magic
   2059   emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
   2060 }
   2061 
   2062 
   2063 void Assembler::lfsx(const DoubleRegister frt, const MemOperand& src) {
   2064   Register ra = src.ra();
   2065   Register rb = src.rb();
   2066   DCHECK(!ra.is(r0));
   2067   emit(EXT2 | LFSX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2068        LeaveRC);
   2069 }
   2070 
   2071 
   2072 void Assembler::lfsux(const DoubleRegister frt, const MemOperand& src) {
   2073   Register ra = src.ra();
   2074   Register rb = src.rb();
   2075   DCHECK(!ra.is(r0));
   2076   emit(EXT2 | LFSUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2077        LeaveRC);
   2078 }
   2079 
   2080 
   2081 void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
   2082   int offset = src.offset();
   2083   Register ra = src.ra();
   2084   CHECK(is_int16(offset));
   2085   DCHECK(!ra.is(r0));
   2086   int imm16 = offset & kImm16Mask;
   2087   // could be x_form instruction with some casting magic
   2088   emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
   2089 }
   2090 
   2091 
   2092 void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
   2093   int offset = src.offset();
   2094   Register ra = src.ra();
   2095   CHECK(is_int16(offset));
   2096   DCHECK(!ra.is(r0));
   2097   int imm16 = offset & kImm16Mask;
   2098   // could be x_form instruction with some casting magic
   2099   emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
   2100 }
   2101 
   2102 
   2103 void Assembler::stfdx(const DoubleRegister frs, const MemOperand& src) {
   2104   Register ra = src.ra();
   2105   Register rb = src.rb();
   2106   DCHECK(!ra.is(r0));
   2107   emit(EXT2 | STFDX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2108        LeaveRC);
   2109 }
   2110 
   2111 
   2112 void Assembler::stfdux(const DoubleRegister frs, const MemOperand& src) {
   2113   Register ra = src.ra();
   2114   Register rb = src.rb();
   2115   DCHECK(!ra.is(r0));
   2116   emit(EXT2 | STFDUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2117        LeaveRC);
   2118 }
   2119 
   2120 
   2121 void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
   2122   int offset = src.offset();
   2123   Register ra = src.ra();
   2124   CHECK(is_int16(offset));
   2125   DCHECK(!ra.is(r0));
   2126   int imm16 = offset & kImm16Mask;
   2127   // could be x_form instruction with some casting magic
   2128   emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
   2129 }
   2130 
   2131 
   2132 void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
   2133   int offset = src.offset();
   2134   Register ra = src.ra();
   2135   CHECK(is_int16(offset));
   2136   DCHECK(!ra.is(r0));
   2137   int imm16 = offset & kImm16Mask;
   2138   // could be x_form instruction with some casting magic
   2139   emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
   2140 }
   2141 
   2142 
   2143 void Assembler::stfsx(const DoubleRegister frs, const MemOperand& src) {
   2144   Register ra = src.ra();
   2145   Register rb = src.rb();
   2146   DCHECK(!ra.is(r0));
   2147   emit(EXT2 | STFSX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2148        LeaveRC);
   2149 }
   2150 
   2151 
   2152 void Assembler::stfsux(const DoubleRegister frs, const MemOperand& src) {
   2153   Register ra = src.ra();
   2154   Register rb = src.rb();
   2155   DCHECK(!ra.is(r0));
   2156   emit(EXT2 | STFSUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2157        LeaveRC);
   2158 }
   2159 
   2160 
   2161 void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra,
   2162                      const DoubleRegister frb, RCBit rc) {
   2163   a_form(EXT4 | FSUB, frt, fra, frb, rc);
   2164 }
   2165 
   2166 
   2167 void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra,
   2168                      const DoubleRegister frb, RCBit rc) {
   2169   a_form(EXT4 | FADD, frt, fra, frb, rc);
   2170 }
   2171 
   2172 
   2173 void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
   2174                      const DoubleRegister frc, RCBit rc) {
   2175   emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
   2176        rc);
   2177 }
   2178 
   2179 
   2180 void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra,
   2181                      const DoubleRegister frb, RCBit rc) {
   2182   a_form(EXT4 | FDIV, frt, fra, frb, rc);
   2183 }
   2184 
   2185 
   2186 void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb,
   2187                       CRegister cr) {
   2188   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   2189   emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
   2190 }
   2191 
   2192 
   2193 void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb,
   2194                     RCBit rc) {
   2195   emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
   2196 }
   2197 
   2198 
   2199 void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) {
   2200   emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
   2201 }
   2202 
   2203 
   2204 void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
   2205   emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
   2206 }
   2207 
   2208 
   2209 void Assembler::frin(const DoubleRegister frt, const DoubleRegister frb,
   2210                      RCBit rc) {
   2211   emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
   2212 }
   2213 
   2214 
   2215 void Assembler::friz(const DoubleRegister frt, const DoubleRegister frb,
   2216                      RCBit rc) {
   2217   emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc);
   2218 }
   2219 
   2220 
   2221 void Assembler::frip(const DoubleRegister frt, const DoubleRegister frb,
   2222                      RCBit rc) {
   2223   emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc);
   2224 }
   2225 
   2226 
   2227 void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb,
   2228                      RCBit rc) {
   2229   emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc);
   2230 }
   2231 
   2232 
   2233 void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb,
   2234                      RCBit rc) {
   2235   emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
   2236 }
   2237 
   2238 
   2239 void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
   2240                       RCBit rc) {
   2241   emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
   2242 }
   2243 
   2244 
   2245 void Assembler::fcfidu(const DoubleRegister frt, const DoubleRegister frb,
   2246                        RCBit rc) {
   2247   emit(EXT4 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
   2248 }
   2249 
   2250 
   2251 void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb,
   2252                         RCBit rc) {
   2253   emit(EXT3 | FCFIDUS | frt.code() * B21 | frb.code() * B11 | rc);
   2254 }
   2255 
   2256 
   2257 void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb,
   2258                        RCBit rc) {
   2259   emit(EXT3 | FCFIDS | frt.code() * B21 | frb.code() * B11 | rc);
   2260 }
   2261 
   2262 
   2263 void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
   2264                       RCBit rc) {
   2265   emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
   2266 }
   2267 
   2268 
   2269 void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
   2270                        RCBit rc) {
   2271   emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
   2272 }
   2273 
   2274 
   2275 void Assembler::fctidu(const DoubleRegister frt, const DoubleRegister frb,
   2276                        RCBit rc) {
   2277   emit(EXT4 | FCTIDU | frt.code() * B21 | frb.code() * B11 | rc);
   2278 }
   2279 
   2280 
   2281 void Assembler::fctiduz(const DoubleRegister frt, const DoubleRegister frb,
   2282                         RCBit rc) {
   2283   emit(EXT4 | FCTIDUZ | frt.code() * B21 | frb.code() * B11 | rc);
   2284 }
   2285 
   2286 
   2287 void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
   2288                      const DoubleRegister frc, const DoubleRegister frb,
   2289                      RCBit rc) {
   2290   emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
   2291        frc.code() * B6 | rc);
   2292 }
   2293 
   2294 
   2295 void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
   2296                      RCBit rc) {
   2297   emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
   2298 }
   2299 
   2300 
   2301 void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
   2302   DCHECK(static_cast<int>(bit) < 32);
   2303   int bt = bit;
   2304   emit(EXT4 | MTFSB0 | bt * B21 | rc);
   2305 }
   2306 
   2307 
   2308 void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
   2309   DCHECK(static_cast<int>(bit) < 32);
   2310   int bt = bit;
   2311   emit(EXT4 | MTFSB1 | bt * B21 | rc);
   2312 }
   2313 
   2314 
   2315 void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
   2316   emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
   2317 }
   2318 
   2319 
   2320 void Assembler::mffs(const DoubleRegister frt, RCBit rc) {
   2321   emit(EXT4 | MFFS | frt.code() * B21 | rc);
   2322 }
   2323 
   2324 
   2325 void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W,
   2326                       RCBit rc) {
   2327   emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
   2328 }
   2329 
   2330 
   2331 void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb,
   2332                       RCBit rc) {
   2333   emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
   2334 }
   2335 
   2336 
   2337 void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb,
   2338                      RCBit rc) {
   2339   emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
   2340 }
   2341 
   2342 
   2343 void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra,
   2344                       const DoubleRegister frc, const DoubleRegister frb,
   2345                       RCBit rc) {
   2346   emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
   2347        frc.code() * B6 | rc);
   2348 }
   2349 
   2350 
   2351 void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
   2352                       const DoubleRegister frc, const DoubleRegister frb,
   2353                       RCBit rc) {
   2354   emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
   2355        frc.code() * B6 | rc);
   2356 }
   2357 
   2358 // Support for VSX instructions
   2359 
   2360 void Assembler::xsadddp(const DoubleRegister frt, const DoubleRegister fra,
   2361                         const DoubleRegister frb) {
   2362   xx3_form(EXT6 | XSADDDP, frt, fra, frb);
   2363 }
   2364 void Assembler::xssubdp(const DoubleRegister frt, const DoubleRegister fra,
   2365                         const DoubleRegister frb) {
   2366   xx3_form(EXT6 | XSSUBDP, frt, fra, frb);
   2367 }
   2368 void Assembler::xsdivdp(const DoubleRegister frt, const DoubleRegister fra,
   2369                         const DoubleRegister frb) {
   2370   xx3_form(EXT6 | XSDIVDP, frt, fra, frb);
   2371 }
   2372 void Assembler::xsmuldp(const DoubleRegister frt, const DoubleRegister fra,
   2373                         const DoubleRegister frb) {
   2374   xx3_form(EXT6 | XSMULDP, frt, fra, frb);
   2375 }
   2376 
   2377 // Pseudo instructions.
   2378 void Assembler::nop(int type) {
   2379   Register reg = r0;
   2380   switch (type) {
   2381     case NON_MARKING_NOP:
   2382       reg = r0;
   2383       break;
   2384     case GROUP_ENDING_NOP:
   2385       reg = r2;
   2386       break;
   2387     case DEBUG_BREAK_NOP:
   2388       reg = r3;
   2389       break;
   2390     default:
   2391       UNIMPLEMENTED();
   2392   }
   2393 
   2394   ori(reg, reg, Operand::Zero());
   2395 }
   2396 
   2397 
   2398 bool Assembler::IsNop(Instr instr, int type) {
   2399   int reg = 0;
   2400   switch (type) {
   2401     case NON_MARKING_NOP:
   2402       reg = 0;
   2403       break;
   2404     case GROUP_ENDING_NOP:
   2405       reg = 2;
   2406       break;
   2407     case DEBUG_BREAK_NOP:
   2408       reg = 3;
   2409       break;
   2410     default:
   2411       UNIMPLEMENTED();
   2412   }
   2413   return instr == (ORI | reg * B21 | reg * B16);
   2414 }
   2415 
   2416 
   2417 void Assembler::GrowBuffer(int needed) {
   2418   if (!own_buffer_) FATAL("external code buffer is too small");
   2419 
   2420   // Compute new buffer size.
   2421   CodeDesc desc;  // the new buffer
   2422   if (buffer_size_ < 4 * KB) {
   2423     desc.buffer_size = 4 * KB;
   2424   } else if (buffer_size_ < 1 * MB) {
   2425     desc.buffer_size = 2 * buffer_size_;
   2426   } else {
   2427     desc.buffer_size = buffer_size_ + 1 * MB;
   2428   }
   2429   int space = buffer_space() + (desc.buffer_size - buffer_size_);
   2430   if (space < needed) {
   2431     desc.buffer_size += needed - space;
   2432   }
   2433   CHECK_GT(desc.buffer_size, 0);  // no overflow
   2434 
   2435   // Set up new buffer.
   2436   desc.buffer = NewArray<byte>(desc.buffer_size);
   2437   desc.origin = this;
   2438 
   2439   desc.instr_size = pc_offset();
   2440   desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
   2441 
   2442   // Copy the data.
   2443   intptr_t pc_delta = desc.buffer - buffer_;
   2444   intptr_t rc_delta =
   2445       (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
   2446   memmove(desc.buffer, buffer_, desc.instr_size);
   2447   memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
   2448           desc.reloc_size);
   2449 
   2450   // Switch buffers.
   2451   DeleteArray(buffer_);
   2452   buffer_ = desc.buffer;
   2453   buffer_size_ = desc.buffer_size;
   2454   pc_ += pc_delta;
   2455   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
   2456                                reloc_info_writer.last_pc() + pc_delta);
   2457 
   2458   // Nothing else to do here since we keep all internal references and
   2459   // deferred relocation entries relative to the buffer (until
   2460   // EmitRelocations).
   2461 }
   2462 
   2463 
   2464 void Assembler::db(uint8_t data) {
   2465   CheckBuffer();
   2466   *reinterpret_cast<uint8_t*>(pc_) = data;
   2467   pc_ += sizeof(uint8_t);
   2468 }
   2469 
   2470 
   2471 void Assembler::dd(uint32_t data) {
   2472   CheckBuffer();
   2473   *reinterpret_cast<uint32_t*>(pc_) = data;
   2474   pc_ += sizeof(uint32_t);
   2475 }
   2476 
   2477 
   2478 void Assembler::dq(uint64_t value) {
   2479   CheckBuffer();
   2480   *reinterpret_cast<uint64_t*>(pc_) = value;
   2481   pc_ += sizeof(uint64_t);
   2482 }
   2483 
   2484 
   2485 void Assembler::dp(uintptr_t data) {
   2486   CheckBuffer();
   2487   *reinterpret_cast<uintptr_t*>(pc_) = data;
   2488   pc_ += sizeof(uintptr_t);
   2489 }
   2490 
   2491 
   2492 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
   2493   if (RelocInfo::IsNone(rmode) ||
   2494       // Don't record external references unless the heap will be serialized.
   2495       (rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
   2496        !emit_debug_code())) {
   2497     return;
   2498   }
   2499   if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
   2500     data = RecordedAstId().ToInt();
   2501     ClearRecordedAstId();
   2502   }
   2503   DeferredRelocInfo rinfo(pc_offset(), rmode, data);
   2504   relocations_.push_back(rinfo);
   2505 }
   2506 
   2507 
   2508 void Assembler::EmitRelocations() {
   2509   EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
   2510 
   2511   for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
   2512        it != relocations_.end(); it++) {
   2513     RelocInfo::Mode rmode = it->rmode();
   2514     Address pc = buffer_ + it->position();
   2515     Code* code = NULL;
   2516     RelocInfo rinfo(isolate(), pc, rmode, it->data(), code);
   2517 
   2518     // Fix up internal references now that they are guaranteed to be bound.
   2519     if (RelocInfo::IsInternalReference(rmode)) {
   2520       // Jump table entry
   2521       intptr_t pos = reinterpret_cast<intptr_t>(Memory::Address_at(pc));
   2522       Memory::Address_at(pc) = buffer_ + pos;
   2523     } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
   2524       // mov sequence
   2525       intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, code));
   2526       set_target_address_at(isolate(), pc, code, buffer_ + pos,
   2527                             SKIP_ICACHE_FLUSH);
   2528     }
   2529 
   2530     reloc_info_writer.Write(&rinfo);
   2531   }
   2532 }
   2533 
   2534 
   2535 void Assembler::BlockTrampolinePoolFor(int instructions) {
   2536   BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
   2537 }
   2538 
   2539 
   2540 void Assembler::CheckTrampolinePool() {
   2541   // Some small sequences of instructions must not be broken up by the
   2542   // insertion of a trampoline pool; such sequences are protected by setting
   2543   // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
   2544   // which are both checked here. Also, recursive calls to CheckTrampolinePool
   2545   // are blocked by trampoline_pool_blocked_nesting_.
   2546   if (trampoline_pool_blocked_nesting_ > 0) return;
   2547   if (pc_offset() < no_trampoline_pool_before_) {
   2548     next_trampoline_check_ = no_trampoline_pool_before_;
   2549     return;
   2550   }
   2551 
   2552   DCHECK(!trampoline_emitted_);
   2553   if (tracked_branch_count_ > 0) {
   2554     int size = tracked_branch_count_ * kInstrSize;
   2555 
   2556     // As we are only going to emit trampoline once, we need to prevent any
   2557     // further emission.
   2558     trampoline_emitted_ = true;
   2559     next_trampoline_check_ = kMaxInt;
   2560 
   2561     // First we emit jump, then we emit trampoline pool.
   2562     b(size + kInstrSize, LeaveLK);
   2563     for (int i = size; i > 0; i -= kInstrSize) {
   2564       b(i, LeaveLK);
   2565     }
   2566 
   2567     trampoline_ = Trampoline(pc_offset() - size, tracked_branch_count_);
   2568   }
   2569 }
   2570 
   2571 
   2572 }  // namespace internal
   2573 }  // namespace v8
   2574 
   2575 #endif  // V8_TARGET_ARCH_PPC
   2576