Home | History | Annotate | Download | only in ppc
      1 // Copyright (c) 1994-2006 Sun Microsystems Inc.
      2 // All Rights Reserved.
      3 //
      4 // Redistribution and use in source and binary forms, with or without
      5 // modification, are permitted provided that the following conditions
      6 // are met:
      7 //
      8 // - Redistributions of source code must retain the above copyright notice,
      9 // this list of conditions and the following disclaimer.
     10 //
     11 // - Redistribution in binary form must reproduce the above copyright
     12 // notice, this list of conditions and the following disclaimer in the
     13 // documentation and/or other materials provided with the
     14 // distribution.
     15 //
     16 // - Neither the name of Sun Microsystems or the names of contributors may
     17 // be used to endorse or promote products derived from this software without
     18 // specific prior written permission.
     19 //
     20 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     21 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     22 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
     23 // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
     24 // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
     25 // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
     26 // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
     27 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     28 // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
     29 // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     30 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
     31 // OF THE POSSIBILITY OF SUCH DAMAGE.
     32 
     33 // The original source code covered by the above license above has been
     34 // modified significantly by Google Inc.
     35 // Copyright 2014 the V8 project authors. All rights reserved.
     36 
     37 #include "src/ppc/assembler-ppc.h"
     38 
     39 #if V8_TARGET_ARCH_PPC
     40 
     41 #include "src/base/bits.h"
     42 #include "src/base/cpu.h"
     43 #include "src/macro-assembler.h"
     44 #include "src/ppc/assembler-ppc-inl.h"
     45 
     46 namespace v8 {
     47 namespace internal {
     48 
     49 // Get the CPU features enabled by the build.
     50 static unsigned CpuFeaturesImpliedByCompiler() {
     51   unsigned answer = 0;
     52   return answer;
     53 }
     54 
     55 
     56 void CpuFeatures::ProbeImpl(bool cross_compile) {
     57   supported_ |= CpuFeaturesImpliedByCompiler();
     58   cache_line_size_ = 128;
     59 
     60   // Only use statically determined features for cross compile (snapshot).
     61   if (cross_compile) return;
     62 
     63 // Detect whether frim instruction is supported (POWER5+)
     64 // For now we will just check for processors we know do not
     65 // support it
     66 #ifndef USE_SIMULATOR
     67   // Probe for additional features at runtime.
     68   base::CPU cpu;
     69 #if V8_TARGET_ARCH_PPC64
     70   if (cpu.part() == base::CPU::PPC_POWER8) {
     71     supported_ |= (1u << FPR_GPR_MOV);
     72   }
     73 #endif
     74   if (cpu.part() == base::CPU::PPC_POWER6 ||
     75       cpu.part() == base::CPU::PPC_POWER7 ||
     76       cpu.part() == base::CPU::PPC_POWER8) {
     77     supported_ |= (1u << LWSYNC);
     78   }
     79   if (cpu.part() == base::CPU::PPC_POWER7 ||
     80       cpu.part() == base::CPU::PPC_POWER8) {
     81     supported_ |= (1u << ISELECT);
     82   }
     83 #if V8_OS_LINUX
     84   if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
     85     // Assume support
     86     supported_ |= (1u << FPU);
     87   }
     88 #elif V8_OS_AIX
     89   // Assume support FP support and default cache line size
     90   supported_ |= (1u << FPU);
     91 #endif
     92 #else  // Simulator
     93   supported_ |= (1u << FPU);
     94   supported_ |= (1u << LWSYNC);
     95   supported_ |= (1u << ISELECT);
     96 #if V8_TARGET_ARCH_PPC64
     97   supported_ |= (1u << FPR_GPR_MOV);
     98 #endif
     99 #endif
    100 }
    101 
    102 
    103 void CpuFeatures::PrintTarget() {
    104   const char* ppc_arch = NULL;
    105 
    106 #if V8_TARGET_ARCH_PPC64
    107   ppc_arch = "ppc64";
    108 #else
    109   ppc_arch = "ppc";
    110 #endif
    111 
    112   printf("target %s\n", ppc_arch);
    113 }
    114 
    115 
    116 void CpuFeatures::PrintFeatures() {
    117   printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
    118 }
    119 
    120 
    121 Register ToRegister(int num) {
    122   DCHECK(num >= 0 && num < kNumRegisters);
    123   const Register kRegisters[] = {r0,  sp,  r2,  r3,  r4,  r5,  r6,  r7,
    124                                  r8,  r9,  r10, r11, ip,  r13, r14, r15,
    125                                  r16, r17, r18, r19, r20, r21, r22, r23,
    126                                  r24, r25, r26, r27, r28, r29, r30, fp};
    127   return kRegisters[num];
    128 }
    129 
    130 
    131 // -----------------------------------------------------------------------------
    132 // Implementation of RelocInfo
    133 
    134 const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE |
    135                                   1 << RelocInfo::INTERNAL_REFERENCE_ENCODED;
    136 
    137 
    138 bool RelocInfo::IsCodedSpecially() {
    139   // The deserializer needs to know whether a pointer is specially
    140   // coded.  Being specially coded on PPC means that it is a lis/ori
    141   // instruction sequence or is a constant pool entry, and these are
    142   // always the case inside code objects.
    143   return true;
    144 }
    145 
    146 
    147 bool RelocInfo::IsInConstantPool() {
    148   if (FLAG_enable_embedded_constant_pool) {
    149     Address constant_pool = host_->constant_pool();
    150     return (constant_pool && Assembler::IsConstantPoolLoadStart(pc_));
    151   }
    152   return false;
    153 }
    154 
    155 
    156 // -----------------------------------------------------------------------------
    157 // Implementation of Operand and MemOperand
    158 // See assembler-ppc-inl.h for inlined constructors
    159 
    160 Operand::Operand(Handle<Object> handle) {
    161   AllowDeferredHandleDereference using_raw_address;
    162   rm_ = no_reg;
    163   // Verify all Objects referred by code are NOT in new space.
    164   Object* obj = *handle;
    165   if (obj->IsHeapObject()) {
    166     DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
    167     imm_ = reinterpret_cast<intptr_t>(handle.location());
    168     rmode_ = RelocInfo::EMBEDDED_OBJECT;
    169   } else {
    170     // no relocation needed
    171     imm_ = reinterpret_cast<intptr_t>(obj);
    172     rmode_ = kRelocInfo_NONEPTR;
    173   }
    174 }
    175 
    176 
    177 MemOperand::MemOperand(Register rn, int32_t offset) {
    178   ra_ = rn;
    179   rb_ = no_reg;
    180   offset_ = offset;
    181 }
    182 
    183 
    184 MemOperand::MemOperand(Register ra, Register rb) {
    185   ra_ = ra;
    186   rb_ = rb;
    187   offset_ = 0;
    188 }
    189 
    190 
    191 // -----------------------------------------------------------------------------
    192 // Specific instructions, constants, and masks.
    193 
    194 
    195 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
    196     : AssemblerBase(isolate, buffer, buffer_size),
    197       recorded_ast_id_(TypeFeedbackId::None()),
    198       constant_pool_builder_(kLoadPtrMaxReachBits, kLoadDoubleMaxReachBits),
    199       positions_recorder_(this) {
    200   reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
    201 
    202   no_trampoline_pool_before_ = 0;
    203   trampoline_pool_blocked_nesting_ = 0;
    204   constant_pool_entry_sharing_blocked_nesting_ = 0;
    205   next_trampoline_check_ = kMaxInt;
    206   internal_trampoline_exception_ = false;
    207   last_bound_pos_ = 0;
    208   optimizable_cmpi_pos_ = -1;
    209   trampoline_emitted_ = FLAG_force_long_branches;
    210   tracked_branch_count_ = 0;
    211   ClearRecordedAstId();
    212   relocations_.reserve(128);
    213 }
    214 
    215 
    216 void Assembler::GetCode(CodeDesc* desc) {
    217   // Emit constant pool if necessary.
    218   int constant_pool_offset = EmitConstantPool();
    219 
    220   EmitRelocations();
    221 
    222   // Set up code descriptor.
    223   desc->buffer = buffer_;
    224   desc->buffer_size = buffer_size_;
    225   desc->instr_size = pc_offset();
    226   desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
    227   desc->constant_pool_size =
    228       (constant_pool_offset ? desc->instr_size - constant_pool_offset : 0);
    229   desc->origin = this;
    230 }
    231 
    232 
    233 void Assembler::Align(int m) {
    234   DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
    235   DCHECK((pc_offset() & (kInstrSize - 1)) == 0);
    236   while ((pc_offset() & (m - 1)) != 0) {
    237     nop();
    238   }
    239 }
    240 
    241 
    242 void Assembler::CodeTargetAlign() { Align(8); }
    243 
    244 
    245 Condition Assembler::GetCondition(Instr instr) {
    246   switch (instr & kCondMask) {
    247     case BT:
    248       return eq;
    249     case BF:
    250       return ne;
    251     default:
    252       UNIMPLEMENTED();
    253   }
    254   return al;
    255 }
    256 
    257 
    258 bool Assembler::IsLis(Instr instr) {
    259   return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr).is(r0);
    260 }
    261 
    262 
    263 bool Assembler::IsLi(Instr instr) {
    264   return ((instr & kOpcodeMask) == ADDI) && GetRA(instr).is(r0);
    265 }
    266 
    267 
    268 bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; }
    269 
    270 
    271 bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; }
    272 
    273 
    274 bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
    275 
    276 
    277 Register Assembler::GetRA(Instr instr) {
    278   Register reg;
    279   reg.reg_code = Instruction::RAValue(instr);
    280   return reg;
    281 }
    282 
    283 
    284 Register Assembler::GetRB(Instr instr) {
    285   Register reg;
    286   reg.reg_code = Instruction::RBValue(instr);
    287   return reg;
    288 }
    289 
    290 
    291 #if V8_TARGET_ARCH_PPC64
    292 // This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori)
    293 bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
    294                                    Instr instr4, Instr instr5) {
    295   // Check the instructions are indeed a five part load (into r12)
    296   // 3d800000       lis     r12, 0
    297   // 618c0000       ori     r12, r12, 0
    298   // 798c07c6       rldicr  r12, r12, 32, 31
    299   // 658c00c3       oris    r12, r12, 195
    300   // 618ccd40       ori     r12, r12, 52544
    301   return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c) &&
    302           (instr3 == 0x798c07c6) && ((instr4 >> 16) == 0x658c) &&
    303           ((instr5 >> 16) == 0x618c));
    304 }
    305 #else
    306 // This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori)
    307 bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
    308   // Check the instruction is indeed a two part load (into r12)
    309   // 3d802553       lis     r12, 9555
    310   // 618c5000       ori   r12, r12, 20480
    311   return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c));
    312 }
    313 #endif
    314 
    315 
    316 bool Assembler::IsCmpRegister(Instr instr) {
    317   return (((instr & kOpcodeMask) == EXT2) &&
    318           ((instr & kExt2OpcodeMask) == CMP));
    319 }
    320 
    321 
    322 bool Assembler::IsRlwinm(Instr instr) {
    323   return ((instr & kOpcodeMask) == RLWINMX);
    324 }
    325 
    326 
    327 bool Assembler::IsAndi(Instr instr) { return ((instr & kOpcodeMask) == ANDIx); }
    328 
    329 
    330 #if V8_TARGET_ARCH_PPC64
    331 bool Assembler::IsRldicl(Instr instr) {
    332   return (((instr & kOpcodeMask) == EXT5) &&
    333           ((instr & kExt5OpcodeMask) == RLDICL));
    334 }
    335 #endif
    336 
    337 
    338 bool Assembler::IsCmpImmediate(Instr instr) {
    339   return ((instr & kOpcodeMask) == CMPI);
    340 }
    341 
    342 
    343 bool Assembler::IsCrSet(Instr instr) {
    344   return (((instr & kOpcodeMask) == EXT1) &&
    345           ((instr & kExt1OpcodeMask) == CREQV));
    346 }
    347 
    348 
    349 Register Assembler::GetCmpImmediateRegister(Instr instr) {
    350   DCHECK(IsCmpImmediate(instr));
    351   return GetRA(instr);
    352 }
    353 
    354 
    355 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
    356   DCHECK(IsCmpImmediate(instr));
    357   return instr & kOff16Mask;
    358 }
    359 
    360 
    361 // Labels refer to positions in the (to be) generated code.
    362 // There are bound, linked, and unused labels.
    363 //
    364 // Bound labels refer to known positions in the already
    365 // generated code. pos() is the position the label refers to.
    366 //
    367 // Linked labels refer to unknown positions in the code
    368 // to be generated; pos() is the position of the last
    369 // instruction using the label.
    370 
    371 
    372 // The link chain is terminated by a negative code position (must be aligned)
    373 const int kEndOfChain = -4;
    374 
    375 
    376 // Dummy opcodes for unbound label mov instructions or jump table entries.
    377 enum {
    378   kUnboundMovLabelOffsetOpcode = 0 << 26,
    379   kUnboundAddLabelOffsetOpcode = 1 << 26,
    380   kUnboundMovLabelAddrOpcode = 2 << 26,
    381   kUnboundJumpTableEntryOpcode = 3 << 26
    382 };
    383 
    384 
    385 int Assembler::target_at(int pos) {
    386   Instr instr = instr_at(pos);
    387   // check which type of branch this is 16 or 26 bit offset
    388   int opcode = instr & kOpcodeMask;
    389   int link;
    390   switch (opcode) {
    391     case BX:
    392       link = SIGN_EXT_IMM26(instr & kImm26Mask);
    393       link &= ~(kAAMask | kLKMask);  // discard AA|LK bits if present
    394       break;
    395     case BCX:
    396       link = SIGN_EXT_IMM16((instr & kImm16Mask));
    397       link &= ~(kAAMask | kLKMask);  // discard AA|LK bits if present
    398       break;
    399     case kUnboundMovLabelOffsetOpcode:
    400     case kUnboundAddLabelOffsetOpcode:
    401     case kUnboundMovLabelAddrOpcode:
    402     case kUnboundJumpTableEntryOpcode:
    403       link = SIGN_EXT_IMM26(instr & kImm26Mask);
    404       link <<= 2;
    405       break;
    406     default:
    407       DCHECK(false);
    408       return -1;
    409   }
    410 
    411   if (link == 0) return kEndOfChain;
    412   return pos + link;
    413 }
    414 
    415 
    416 void Assembler::target_at_put(int pos, int target_pos, bool* is_branch) {
    417   Instr instr = instr_at(pos);
    418   int opcode = instr & kOpcodeMask;
    419 
    420   if (is_branch != nullptr) {
    421     *is_branch = (opcode == BX || opcode == BCX);
    422   }
    423 
    424   switch (opcode) {
    425     case BX: {
    426       int imm26 = target_pos - pos;
    427       CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
    428       if (imm26 == kInstrSize && !(instr & kLKMask)) {
    429         // Branch to next instr without link.
    430         instr = ORI;  // nop: ori, 0,0,0
    431       } else {
    432         instr &= ((~kImm26Mask) | kAAMask | kLKMask);
    433         instr |= (imm26 & kImm26Mask);
    434       }
    435       instr_at_put(pos, instr);
    436       break;
    437     }
    438     case BCX: {
    439       int imm16 = target_pos - pos;
    440       CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
    441       if (imm16 == kInstrSize && !(instr & kLKMask)) {
    442         // Branch to next instr without link.
    443         instr = ORI;  // nop: ori, 0,0,0
    444       } else {
    445         instr &= ((~kImm16Mask) | kAAMask | kLKMask);
    446         instr |= (imm16 & kImm16Mask);
    447       }
    448       instr_at_put(pos, instr);
    449       break;
    450     }
    451     case kUnboundMovLabelOffsetOpcode: {
    452       // Load the position of the label relative to the generated code object
    453       // pointer in a register.
    454       Register dst = Register::from_code(instr_at(pos + kInstrSize));
    455       int32_t offset = target_pos + (Code::kHeaderSize - kHeapObjectTag);
    456       CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
    457                           CodePatcher::DONT_FLUSH);
    458       patcher.masm()->bitwise_mov32(dst, offset);
    459       break;
    460     }
    461     case kUnboundAddLabelOffsetOpcode: {
    462       // dst = base + position + immediate
    463       Instr operands = instr_at(pos + kInstrSize);
    464       Register dst = Register::from_code((operands >> 21) & 0x1f);
    465       Register base = Register::from_code((operands >> 16) & 0x1f);
    466       int32_t offset = target_pos + SIGN_EXT_IMM16(operands & kImm16Mask);
    467       CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos), 2,
    468                           CodePatcher::DONT_FLUSH);
    469       patcher.masm()->bitwise_add32(dst, base, offset);
    470       break;
    471     }
    472     case kUnboundMovLabelAddrOpcode: {
    473       // Load the address of the label in a register.
    474       Register dst = Register::from_code(instr_at(pos + kInstrSize));
    475       CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
    476                           kMovInstructionsNoConstantPool,
    477                           CodePatcher::DONT_FLUSH);
    478       // Keep internal references relative until EmitRelocations.
    479       patcher.masm()->bitwise_mov(dst, target_pos);
    480       break;
    481     }
    482     case kUnboundJumpTableEntryOpcode: {
    483       CodePatcher patcher(isolate(), reinterpret_cast<byte*>(buffer_ + pos),
    484                           kPointerSize / kInstrSize, CodePatcher::DONT_FLUSH);
    485       // Keep internal references relative until EmitRelocations.
    486       patcher.masm()->dp(target_pos);
    487       break;
    488     }
    489     default:
    490       DCHECK(false);
    491       break;
    492   }
    493 }
    494 
    495 
    496 int Assembler::max_reach_from(int pos) {
    497   Instr instr = instr_at(pos);
    498   int opcode = instr & kOpcodeMask;
    499 
    500   // check which type of branch this is 16 or 26 bit offset
    501   switch (opcode) {
    502     case BX:
    503       return 26;
    504     case BCX:
    505       return 16;
    506     case kUnboundMovLabelOffsetOpcode:
    507     case kUnboundAddLabelOffsetOpcode:
    508     case kUnboundMovLabelAddrOpcode:
    509     case kUnboundJumpTableEntryOpcode:
    510       return 0;  // no limit on reach
    511   }
    512 
    513   DCHECK(false);
    514   return 0;
    515 }
    516 
    517 
    518 void Assembler::bind_to(Label* L, int pos) {
    519   DCHECK(0 <= pos && pos <= pc_offset());  // must have a valid binding position
    520   int32_t trampoline_pos = kInvalidSlotPos;
    521   bool is_branch = false;
    522   while (L->is_linked()) {
    523     int fixup_pos = L->pos();
    524     int32_t offset = pos - fixup_pos;
    525     int maxReach = max_reach_from(fixup_pos);
    526     next(L);  // call next before overwriting link with target at fixup_pos
    527     if (maxReach && is_intn(offset, maxReach) == false) {
    528       if (trampoline_pos == kInvalidSlotPos) {
    529         trampoline_pos = get_trampoline_entry();
    530         CHECK(trampoline_pos != kInvalidSlotPos);
    531         target_at_put(trampoline_pos, pos);
    532       }
    533       target_at_put(fixup_pos, trampoline_pos);
    534     } else {
    535       target_at_put(fixup_pos, pos, &is_branch);
    536     }
    537   }
    538   L->bind_to(pos);
    539 
    540   if (!trampoline_emitted_ && is_branch) {
    541     UntrackBranch();
    542   }
    543 
    544   // Keep track of the last bound label so we don't eliminate any instructions
    545   // before a bound label.
    546   if (pos > last_bound_pos_) last_bound_pos_ = pos;
    547 }
    548 
    549 
    550 void Assembler::bind(Label* L) {
    551   DCHECK(!L->is_bound());  // label can only be bound once
    552   bind_to(L, pc_offset());
    553 }
    554 
    555 
    556 void Assembler::next(Label* L) {
    557   DCHECK(L->is_linked());
    558   int link = target_at(L->pos());
    559   if (link == kEndOfChain) {
    560     L->Unuse();
    561   } else {
    562     DCHECK(link >= 0);
    563     L->link_to(link);
    564   }
    565 }
    566 
    567 
    568 bool Assembler::is_near(Label* L, Condition cond) {
    569   DCHECK(L->is_bound());
    570   if (L->is_bound() == false) return false;
    571 
    572   int maxReach = ((cond == al) ? 26 : 16);
    573   int offset = L->pos() - pc_offset();
    574 
    575   return is_intn(offset, maxReach);
    576 }
    577 
    578 
    579 void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
    580                        DoubleRegister frb, RCBit r) {
    581   emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r);
    582 }
    583 
    584 
    585 void Assembler::d_form(Instr instr, Register rt, Register ra,
    586                        const intptr_t val, bool signed_disp) {
    587   if (signed_disp) {
    588     if (!is_int16(val)) {
    589       PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val);
    590     }
    591     CHECK(is_int16(val));
    592   } else {
    593     if (!is_uint16(val)) {
    594       PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR
    595              ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
    596              val, val, is_uint16(val), kImm16Mask);
    597     }
    598     CHECK(is_uint16(val));
    599   }
    600   emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
    601 }
    602 
    603 
    604 void Assembler::x_form(Instr instr, Register ra, Register rs, Register rb,
    605                        RCBit r) {
    606   emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | r);
    607 }
    608 
    609 
    610 void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
    611                         OEBit o, RCBit r) {
    612   emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
    613 }
    614 
    615 
    616 void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
    617                         int maskbit, RCBit r) {
    618   int sh0_4 = shift & 0x1f;
    619   int sh5 = (shift >> 5) & 0x1;
    620   int m0_4 = maskbit & 0x1f;
    621   int m5 = (maskbit >> 5) & 0x1;
    622 
    623   emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
    624        m5 * B5 | sh5 * B1 | r);
    625 }
    626 
    627 
    628 void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
    629                          int maskbit, RCBit r) {
    630   int m0_4 = maskbit & 0x1f;
    631   int m5 = (maskbit >> 5) & 0x1;
    632 
    633   emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
    634        m5 * B5 | r);
    635 }
    636 
    637 
    638 // Returns the next free trampoline entry.
    639 int32_t Assembler::get_trampoline_entry() {
    640   int32_t trampoline_entry = kInvalidSlotPos;
    641 
    642   if (!internal_trampoline_exception_) {
    643     trampoline_entry = trampoline_.take_slot();
    644 
    645     if (kInvalidSlotPos == trampoline_entry) {
    646       internal_trampoline_exception_ = true;
    647     }
    648   }
    649   return trampoline_entry;
    650 }
    651 
    652 
    653 int Assembler::link(Label* L) {
    654   int position;
    655   if (L->is_bound()) {
    656     position = L->pos();
    657   } else {
    658     if (L->is_linked()) {
    659       position = L->pos();  // L's link
    660     } else {
    661       // was: target_pos = kEndOfChain;
    662       // However, using self to mark the first reference
    663       // should avoid most instances of branch offset overflow.  See
    664       // target_at() for where this is converted back to kEndOfChain.
    665       position = pc_offset();
    666     }
    667     L->link_to(pc_offset());
    668   }
    669 
    670   return position;
    671 }
    672 
    673 
    674 // Branch instructions.
    675 
    676 
    677 void Assembler::bclr(BOfield bo, int condition_bit, LKBit lk) {
    678   positions_recorder()->WriteRecordedPositions();
    679   emit(EXT1 | bo | condition_bit * B16 | BCLRX | lk);
    680 }
    681 
    682 
    683 void Assembler::bcctr(BOfield bo, int condition_bit, LKBit lk) {
    684   positions_recorder()->WriteRecordedPositions();
    685   emit(EXT1 | bo | condition_bit * B16 | BCCTRX | lk);
    686 }
    687 
    688 
    689 // Pseudo op - branch to link register
    690 void Assembler::blr() { bclr(BA, 0, LeaveLK); }
    691 
    692 
    693 // Pseudo op - branch to count register -- used for "jump"
    694 void Assembler::bctr() { bcctr(BA, 0, LeaveLK); }
    695 
    696 
    697 void Assembler::bctrl() { bcctr(BA, 0, SetLK); }
    698 
    699 
    700 void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
    701   if (lk == SetLK) {
    702     positions_recorder()->WriteRecordedPositions();
    703   }
    704   int imm16 = branch_offset;
    705   CHECK(is_int16(imm16) && (imm16 & (kAAMask | kLKMask)) == 0);
    706   emit(BCX | bo | condition_bit * B16 | (imm16 & kImm16Mask) | lk);
    707 }
    708 
    709 
    710 void Assembler::b(int branch_offset, LKBit lk) {
    711   if (lk == SetLK) {
    712     positions_recorder()->WriteRecordedPositions();
    713   }
    714   int imm26 = branch_offset;
    715   CHECK(is_int26(imm26) && (imm26 & (kAAMask | kLKMask)) == 0);
    716   emit(BX | (imm26 & kImm26Mask) | lk);
    717 }
    718 
    719 
    720 void Assembler::xori(Register dst, Register src, const Operand& imm) {
    721   d_form(XORI, src, dst, imm.imm_, false);
    722 }
    723 
    724 
    725 void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
    726   d_form(XORIS, rs, ra, imm.imm_, false);
    727 }
    728 
    729 
    730 void Assembler::xor_(Register dst, Register src1, Register src2, RCBit rc) {
    731   x_form(EXT2 | XORX, dst, src1, src2, rc);
    732 }
    733 
    734 
    735 void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) {
    736   x_form(EXT2 | CNTLZWX, ra, rs, r0, rc);
    737 }
    738 
    739 
    740 void Assembler::popcntw(Register ra, Register rs) {
    741   emit(EXT2 | POPCNTW | rs.code() * B21 | ra.code() * B16);
    742 }
    743 
    744 
    745 void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) {
    746   x_form(EXT2 | ANDX, ra, rs, rb, rc);
    747 }
    748 
    749 
    750 void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
    751                        RCBit rc) {
    752   sh &= 0x1f;
    753   mb &= 0x1f;
    754   me &= 0x1f;
    755   emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
    756        me << 1 | rc);
    757 }
    758 
    759 
    760 void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
    761                       RCBit rc) {
    762   mb &= 0x1f;
    763   me &= 0x1f;
    764   emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
    765        me << 1 | rc);
    766 }
    767 
    768 
    769 void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
    770                        RCBit rc) {
    771   sh &= 0x1f;
    772   mb &= 0x1f;
    773   me &= 0x1f;
    774   emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
    775        me << 1 | rc);
    776 }
    777 
    778 
    779 void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
    780   DCHECK((32 > val.imm_) && (val.imm_ >= 0));
    781   rlwinm(dst, src, val.imm_, 0, 31 - val.imm_, rc);
    782 }
    783 
    784 
    785 void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
    786   DCHECK((32 > val.imm_) && (val.imm_ >= 0));
    787   rlwinm(dst, src, 32 - val.imm_, val.imm_, 31, rc);
    788 }
    789 
    790 
    791 void Assembler::clrrwi(Register dst, Register src, const Operand& val,
    792                        RCBit rc) {
    793   DCHECK((32 > val.imm_) && (val.imm_ >= 0));
    794   rlwinm(dst, src, 0, 0, 31 - val.imm_, rc);
    795 }
    796 
    797 
    798 void Assembler::clrlwi(Register dst, Register src, const Operand& val,
    799                        RCBit rc) {
    800   DCHECK((32 > val.imm_) && (val.imm_ >= 0));
    801   rlwinm(dst, src, 0, val.imm_, 31, rc);
    802 }
    803 
    804 
    805 void Assembler::srawi(Register ra, Register rs, int sh, RCBit r) {
    806   emit(EXT2 | SRAWIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | r);
    807 }
    808 
    809 
    810 void Assembler::srw(Register dst, Register src1, Register src2, RCBit r) {
    811   x_form(EXT2 | SRWX, dst, src1, src2, r);
    812 }
    813 
    814 
    815 void Assembler::slw(Register dst, Register src1, Register src2, RCBit r) {
    816   x_form(EXT2 | SLWX, dst, src1, src2, r);
    817 }
    818 
    819 
    820 void Assembler::sraw(Register ra, Register rs, Register rb, RCBit r) {
    821   x_form(EXT2 | SRAW, ra, rs, rb, r);
    822 }
    823 
    824 
    825 void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
    826   rlwnm(ra, rs, rb, 0, 31, r);
    827 }
    828 
    829 
    830 void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) {
    831   rlwinm(ra, rs, sh, 0, 31, r);
    832 }
    833 
    834 
    835 void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
    836   rlwinm(ra, rs, 32 - sh, 0, 31, r);
    837 }
    838 
    839 
    840 void Assembler::subi(Register dst, Register src, const Operand& imm) {
    841   addi(dst, src, Operand(-(imm.imm_)));
    842 }
    843 
    844 void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
    845                      RCBit r) {
    846   xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
    847 }
    848 
    849 
    850 void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
    851   // a special xo_form
    852   emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r);
    853 }
    854 
    855 
    856 void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
    857                     RCBit r) {
    858   xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
    859 }
    860 
    861 
    862 void Assembler::subfc(Register dst, Register src1, Register src2, OEBit o,
    863                       RCBit r) {
    864   xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
    865 }
    866 
    867 
    868 void Assembler::subfic(Register dst, Register src, const Operand& imm) {
    869   d_form(SUBFIC, dst, src, imm.imm_, true);
    870 }
    871 
    872 
    873 void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
    874                     RCBit r) {
    875   xo_form(EXT2 | ADDX, dst, src1, src2, o, r);
    876 }
    877 
    878 
    879 // Multiply low word
    880 void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
    881                       RCBit r) {
    882   xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
    883 }
    884 
    885 
    886 // Multiply hi word
    887 void Assembler::mulhw(Register dst, Register src1, Register src2, RCBit r) {
    888   xo_form(EXT2 | MULHWX, dst, src1, src2, LeaveOE, r);
    889 }
    890 
    891 
    892 // Multiply hi word unsigned
    893 void Assembler::mulhwu(Register dst, Register src1, Register src2, RCBit r) {
    894   xo_form(EXT2 | MULHWUX, dst, src1, src2, LeaveOE, r);
    895 }
    896 
    897 
    898 // Divide word
    899 void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
    900                      RCBit r) {
    901   xo_form(EXT2 | DIVW, dst, src1, src2, o, r);
    902 }
    903 
    904 
    905 // Divide word unsigned
    906 void Assembler::divwu(Register dst, Register src1, Register src2, OEBit o,
    907                       RCBit r) {
    908   xo_form(EXT2 | DIVWU, dst, src1, src2, o, r);
    909 }
    910 
    911 
    912 void Assembler::addi(Register dst, Register src, const Operand& imm) {
    913   DCHECK(!src.is(r0));  // use li instead to show intent
    914   d_form(ADDI, dst, src, imm.imm_, true);
    915 }
    916 
    917 
    918 void Assembler::addis(Register dst, Register src, const Operand& imm) {
    919   DCHECK(!src.is(r0));  // use lis instead to show intent
    920   d_form(ADDIS, dst, src, imm.imm_, true);
    921 }
    922 
    923 
    924 void Assembler::addic(Register dst, Register src, const Operand& imm) {
    925   d_form(ADDIC, dst, src, imm.imm_, true);
    926 }
    927 
    928 
    929 void Assembler::andi(Register ra, Register rs, const Operand& imm) {
    930   d_form(ANDIx, rs, ra, imm.imm_, false);
    931 }
    932 
    933 
    934 void Assembler::andis(Register ra, Register rs, const Operand& imm) {
    935   d_form(ANDISx, rs, ra, imm.imm_, false);
    936 }
    937 
    938 
    939 void Assembler::nor(Register dst, Register src1, Register src2, RCBit r) {
    940   x_form(EXT2 | NORX, dst, src1, src2, r);
    941 }
    942 
    943 
    944 void Assembler::notx(Register dst, Register src, RCBit r) {
    945   x_form(EXT2 | NORX, dst, src, src, r);
    946 }
    947 
    948 
    949 void Assembler::ori(Register ra, Register rs, const Operand& imm) {
    950   d_form(ORI, rs, ra, imm.imm_, false);
    951 }
    952 
    953 
    954 void Assembler::oris(Register dst, Register src, const Operand& imm) {
    955   d_form(ORIS, src, dst, imm.imm_, false);
    956 }
    957 
    958 
    959 void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) {
    960   x_form(EXT2 | ORX, dst, src1, src2, rc);
    961 }
    962 
    963 
    964 void Assembler::orc(Register dst, Register src1, Register src2, RCBit rc) {
    965   x_form(EXT2 | ORC, dst, src1, src2, rc);
    966 }
    967 
    968 
    969 void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
    970   intptr_t imm16 = src2.imm_;
    971 #if V8_TARGET_ARCH_PPC64
    972   int L = 1;
    973 #else
    974   int L = 0;
    975 #endif
    976   DCHECK(is_int16(imm16));
    977   DCHECK(cr.code() >= 0 && cr.code() <= 7);
    978   imm16 &= kImm16Mask;
    979   emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
    980 }
    981 
    982 
    983 void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
    984   uintptr_t uimm16 = src2.imm_;
    985 #if V8_TARGET_ARCH_PPC64
    986   int L = 1;
    987 #else
    988   int L = 0;
    989 #endif
    990   DCHECK(is_uint16(uimm16));
    991   DCHECK(cr.code() >= 0 && cr.code() <= 7);
    992   uimm16 &= kImm16Mask;
    993   emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
    994 }
    995 
    996 
    997 void Assembler::cmp(Register src1, Register src2, CRegister cr) {
    998 #if V8_TARGET_ARCH_PPC64
    999   int L = 1;
   1000 #else
   1001   int L = 0;
   1002 #endif
   1003   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1004   emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
   1005        src2.code() * B11);
   1006 }
   1007 
   1008 
   1009 void Assembler::cmpl(Register src1, Register src2, CRegister cr) {
   1010 #if V8_TARGET_ARCH_PPC64
   1011   int L = 1;
   1012 #else
   1013   int L = 0;
   1014 #endif
   1015   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1016   emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
   1017        src2.code() * B11);
   1018 }
   1019 
   1020 
   1021 void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
   1022   intptr_t imm16 = src2.imm_;
   1023   int L = 0;
   1024   int pos = pc_offset();
   1025   DCHECK(is_int16(imm16));
   1026   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1027   imm16 &= kImm16Mask;
   1028 
   1029   // For cmpwi against 0, save postition and cr for later examination
   1030   // of potential optimization.
   1031   if (imm16 == 0 && pos > 0 && last_bound_pos_ != pos) {
   1032     optimizable_cmpi_pos_ = pos;
   1033     cmpi_cr_ = cr;
   1034   }
   1035   emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
   1036 }
   1037 
   1038 
   1039 void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
   1040   uintptr_t uimm16 = src2.imm_;
   1041   int L = 0;
   1042   DCHECK(is_uint16(uimm16));
   1043   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1044   uimm16 &= kImm16Mask;
   1045   emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
   1046 }
   1047 
   1048 
   1049 void Assembler::cmpw(Register src1, Register src2, CRegister cr) {
   1050   int L = 0;
   1051   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1052   emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
   1053        src2.code() * B11);
   1054 }
   1055 
   1056 
   1057 void Assembler::cmplw(Register src1, Register src2, CRegister cr) {
   1058   int L = 0;
   1059   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   1060   emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
   1061        src2.code() * B11);
   1062 }
   1063 
   1064 
   1065 void Assembler::isel(Register rt, Register ra, Register rb, int cb) {
   1066   emit(EXT2 | ISEL | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1067        cb * B6);
   1068 }
   1069 
   1070 
   1071 // Pseudo op - load immediate
   1072 void Assembler::li(Register dst, const Operand& imm) {
   1073   d_form(ADDI, dst, r0, imm.imm_, true);
   1074 }
   1075 
   1076 
   1077 void Assembler::lis(Register dst, const Operand& imm) {
   1078   d_form(ADDIS, dst, r0, imm.imm_, true);
   1079 }
   1080 
   1081 
   1082 // Pseudo op - move register
   1083 void Assembler::mr(Register dst, Register src) {
   1084   // actually or(dst, src, src)
   1085   orx(dst, src, src);
   1086 }
   1087 
   1088 
   1089 void Assembler::lbz(Register dst, const MemOperand& src) {
   1090   DCHECK(!src.ra_.is(r0));
   1091   d_form(LBZ, dst, src.ra(), src.offset(), true);
   1092 }
   1093 
   1094 
   1095 void Assembler::lbzx(Register rt, const MemOperand& src) {
   1096   Register ra = src.ra();
   1097   Register rb = src.rb();
   1098   DCHECK(!ra.is(r0));
   1099   emit(EXT2 | LBZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1100        LeaveRC);
   1101 }
   1102 
   1103 
   1104 void Assembler::lbzux(Register rt, const MemOperand& src) {
   1105   Register ra = src.ra();
   1106   Register rb = src.rb();
   1107   DCHECK(!ra.is(r0));
   1108   emit(EXT2 | LBZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1109        LeaveRC);
   1110 }
   1111 
   1112 
   1113 void Assembler::lhz(Register dst, const MemOperand& src) {
   1114   DCHECK(!src.ra_.is(r0));
   1115   d_form(LHZ, dst, src.ra(), src.offset(), true);
   1116 }
   1117 
   1118 
   1119 void Assembler::lhzx(Register rt, const MemOperand& src) {
   1120   Register ra = src.ra();
   1121   Register rb = src.rb();
   1122   DCHECK(!ra.is(r0));
   1123   emit(EXT2 | LHZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1124        LeaveRC);
   1125 }
   1126 
   1127 
   1128 void Assembler::lhzux(Register rt, const MemOperand& src) {
   1129   Register ra = src.ra();
   1130   Register rb = src.rb();
   1131   DCHECK(!ra.is(r0));
   1132   emit(EXT2 | LHZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1133        LeaveRC);
   1134 }
   1135 
   1136 
   1137 void Assembler::lhax(Register rt, const MemOperand& src) {
   1138   Register ra = src.ra();
   1139   Register rb = src.rb();
   1140   DCHECK(!ra.is(r0));
   1141   emit(EXT2 | LHAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1142 }
   1143 
   1144 
   1145 void Assembler::lwz(Register dst, const MemOperand& src) {
   1146   DCHECK(!src.ra_.is(r0));
   1147   d_form(LWZ, dst, src.ra(), src.offset(), true);
   1148 }
   1149 
   1150 
   1151 void Assembler::lwzu(Register dst, const MemOperand& src) {
   1152   DCHECK(!src.ra_.is(r0));
   1153   d_form(LWZU, dst, src.ra(), src.offset(), true);
   1154 }
   1155 
   1156 
   1157 void Assembler::lwzx(Register rt, const MemOperand& src) {
   1158   Register ra = src.ra();
   1159   Register rb = src.rb();
   1160   DCHECK(!ra.is(r0));
   1161   emit(EXT2 | LWZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1162        LeaveRC);
   1163 }
   1164 
   1165 
   1166 void Assembler::lwzux(Register rt, const MemOperand& src) {
   1167   Register ra = src.ra();
   1168   Register rb = src.rb();
   1169   DCHECK(!ra.is(r0));
   1170   emit(EXT2 | LWZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1171        LeaveRC);
   1172 }
   1173 
   1174 
   1175 void Assembler::lha(Register dst, const MemOperand& src) {
   1176   DCHECK(!src.ra_.is(r0));
   1177   d_form(LHA, dst, src.ra(), src.offset(), true);
   1178 }
   1179 
   1180 
   1181 void Assembler::lwa(Register dst, const MemOperand& src) {
   1182 #if V8_TARGET_ARCH_PPC64
   1183   int offset = src.offset();
   1184   DCHECK(!src.ra_.is(r0));
   1185   CHECK(!(offset & 3) && is_int16(offset));
   1186   offset = kImm16Mask & offset;
   1187   emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
   1188 #else
   1189   lwz(dst, src);
   1190 #endif
   1191 }
   1192 
   1193 
   1194 void Assembler::lwax(Register rt, const MemOperand& src) {
   1195 #if V8_TARGET_ARCH_PPC64
   1196   Register ra = src.ra();
   1197   Register rb = src.rb();
   1198   DCHECK(!ra.is(r0));
   1199   emit(EXT2 | LWAX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1200 #else
   1201   lwzx(rt, src);
   1202 #endif
   1203 }
   1204 
   1205 
   1206 void Assembler::stb(Register dst, const MemOperand& src) {
   1207   DCHECK(!src.ra_.is(r0));
   1208   d_form(STB, dst, src.ra(), src.offset(), true);
   1209 }
   1210 
   1211 
   1212 void Assembler::stbx(Register rs, const MemOperand& src) {
   1213   Register ra = src.ra();
   1214   Register rb = src.rb();
   1215   DCHECK(!ra.is(r0));
   1216   emit(EXT2 | STBX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1217        LeaveRC);
   1218 }
   1219 
   1220 
   1221 void Assembler::stbux(Register rs, const MemOperand& src) {
   1222   Register ra = src.ra();
   1223   Register rb = src.rb();
   1224   DCHECK(!ra.is(r0));
   1225   emit(EXT2 | STBUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1226        LeaveRC);
   1227 }
   1228 
   1229 
   1230 void Assembler::sth(Register dst, const MemOperand& src) {
   1231   DCHECK(!src.ra_.is(r0));
   1232   d_form(STH, dst, src.ra(), src.offset(), true);
   1233 }
   1234 
   1235 
   1236 void Assembler::sthx(Register rs, const MemOperand& src) {
   1237   Register ra = src.ra();
   1238   Register rb = src.rb();
   1239   DCHECK(!ra.is(r0));
   1240   emit(EXT2 | STHX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1241        LeaveRC);
   1242 }
   1243 
   1244 
   1245 void Assembler::sthux(Register rs, const MemOperand& src) {
   1246   Register ra = src.ra();
   1247   Register rb = src.rb();
   1248   DCHECK(!ra.is(r0));
   1249   emit(EXT2 | STHUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1250        LeaveRC);
   1251 }
   1252 
   1253 
   1254 void Assembler::stw(Register dst, const MemOperand& src) {
   1255   DCHECK(!src.ra_.is(r0));
   1256   d_form(STW, dst, src.ra(), src.offset(), true);
   1257 }
   1258 
   1259 
   1260 void Assembler::stwu(Register dst, const MemOperand& src) {
   1261   DCHECK(!src.ra_.is(r0));
   1262   d_form(STWU, dst, src.ra(), src.offset(), true);
   1263 }
   1264 
   1265 
   1266 void Assembler::stwx(Register rs, const MemOperand& src) {
   1267   Register ra = src.ra();
   1268   Register rb = src.rb();
   1269   DCHECK(!ra.is(r0));
   1270   emit(EXT2 | STWX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1271        LeaveRC);
   1272 }
   1273 
   1274 
   1275 void Assembler::stwux(Register rs, const MemOperand& src) {
   1276   Register ra = src.ra();
   1277   Register rb = src.rb();
   1278   DCHECK(!ra.is(r0));
   1279   emit(EXT2 | STWUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1280        LeaveRC);
   1281 }
   1282 
   1283 
   1284 void Assembler::extsb(Register rs, Register ra, RCBit rc) {
   1285   emit(EXT2 | EXTSB | ra.code() * B21 | rs.code() * B16 | rc);
   1286 }
   1287 
   1288 
   1289 void Assembler::extsh(Register rs, Register ra, RCBit rc) {
   1290   emit(EXT2 | EXTSH | ra.code() * B21 | rs.code() * B16 | rc);
   1291 }
   1292 
   1293 
   1294 void Assembler::extsw(Register rs, Register ra, RCBit rc) {
   1295 #if V8_TARGET_ARCH_PPC64
   1296   emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
   1297 #else
   1298   // nop on 32-bit
   1299   DCHECK(rs.is(ra) && rc == LeaveRC);
   1300 #endif
   1301 }
   1302 
   1303 
   1304 void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
   1305   emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
   1306 }
   1307 
   1308 
   1309 void Assembler::andc(Register dst, Register src1, Register src2, RCBit rc) {
   1310   x_form(EXT2 | ANDCX, dst, src1, src2, rc);
   1311 }
   1312 
   1313 
   1314 #if V8_TARGET_ARCH_PPC64
   1315 // 64bit specific instructions
   1316 void Assembler::ld(Register rd, const MemOperand& src) {
   1317   int offset = src.offset();
   1318   DCHECK(!src.ra_.is(r0));
   1319   CHECK(!(offset & 3) && is_int16(offset));
   1320   offset = kImm16Mask & offset;
   1321   emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
   1322 }
   1323 
   1324 
   1325 void Assembler::ldx(Register rd, const MemOperand& src) {
   1326   Register ra = src.ra();
   1327   Register rb = src.rb();
   1328   DCHECK(!ra.is(r0));
   1329   emit(EXT2 | LDX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1330 }
   1331 
   1332 
   1333 void Assembler::ldu(Register rd, const MemOperand& src) {
   1334   int offset = src.offset();
   1335   DCHECK(!src.ra_.is(r0));
   1336   CHECK(!(offset & 3) && is_int16(offset));
   1337   offset = kImm16Mask & offset;
   1338   emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
   1339 }
   1340 
   1341 
   1342 void Assembler::ldux(Register rd, const MemOperand& src) {
   1343   Register ra = src.ra();
   1344   Register rb = src.rb();
   1345   DCHECK(!ra.is(r0));
   1346   emit(EXT2 | LDUX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1347 }
   1348 
   1349 
   1350 void Assembler::std(Register rs, const MemOperand& src) {
   1351   int offset = src.offset();
   1352   DCHECK(!src.ra_.is(r0));
   1353   CHECK(!(offset & 3) && is_int16(offset));
   1354   offset = kImm16Mask & offset;
   1355   emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
   1356 }
   1357 
   1358 
   1359 void Assembler::stdx(Register rs, const MemOperand& src) {
   1360   Register ra = src.ra();
   1361   Register rb = src.rb();
   1362   DCHECK(!ra.is(r0));
   1363   emit(EXT2 | STDX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1364 }
   1365 
   1366 
   1367 void Assembler::stdu(Register rs, const MemOperand& src) {
   1368   int offset = src.offset();
   1369   DCHECK(!src.ra_.is(r0));
   1370   CHECK(!(offset & 3) && is_int16(offset));
   1371   offset = kImm16Mask & offset;
   1372   emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
   1373 }
   1374 
   1375 
   1376 void Assembler::stdux(Register rs, const MemOperand& src) {
   1377   Register ra = src.ra();
   1378   Register rb = src.rb();
   1379   DCHECK(!ra.is(r0));
   1380   emit(EXT2 | STDUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
   1381 }
   1382 
   1383 
   1384 void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) {
   1385   md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
   1386 }
   1387 
   1388 
   1389 void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) {
   1390   md_form(EXT5 | RLDICL, ra, rs, sh, mb, r);
   1391 }
   1392 
   1393 
   1394 void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) {
   1395   mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r);
   1396 }
   1397 
   1398 
   1399 void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
   1400   md_form(EXT5 | RLDICR, ra, rs, sh, me, r);
   1401 }
   1402 
   1403 
   1404 void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
   1405   DCHECK((64 > val.imm_) && (val.imm_ >= 0));
   1406   rldicr(dst, src, val.imm_, 63 - val.imm_, rc);
   1407 }
   1408 
   1409 
   1410 void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
   1411   DCHECK((64 > val.imm_) && (val.imm_ >= 0));
   1412   rldicl(dst, src, 64 - val.imm_, val.imm_, rc);
   1413 }
   1414 
   1415 
   1416 void Assembler::clrrdi(Register dst, Register src, const Operand& val,
   1417                        RCBit rc) {
   1418   DCHECK((64 > val.imm_) && (val.imm_ >= 0));
   1419   rldicr(dst, src, 0, 63 - val.imm_, rc);
   1420 }
   1421 
   1422 
   1423 void Assembler::clrldi(Register dst, Register src, const Operand& val,
   1424                        RCBit rc) {
   1425   DCHECK((64 > val.imm_) && (val.imm_ >= 0));
   1426   rldicl(dst, src, 0, val.imm_, rc);
   1427 }
   1428 
   1429 
   1430 void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
   1431   md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r);
   1432 }
   1433 
   1434 
   1435 void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
   1436   int sh0_4 = sh & 0x1f;
   1437   int sh5 = (sh >> 5) & 0x1;
   1438 
   1439   emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
   1440        sh5 * B1 | r);
   1441 }
   1442 
   1443 
   1444 void Assembler::srd(Register dst, Register src1, Register src2, RCBit r) {
   1445   x_form(EXT2 | SRDX, dst, src1, src2, r);
   1446 }
   1447 
   1448 
   1449 void Assembler::sld(Register dst, Register src1, Register src2, RCBit r) {
   1450   x_form(EXT2 | SLDX, dst, src1, src2, r);
   1451 }
   1452 
   1453 
   1454 void Assembler::srad(Register ra, Register rs, Register rb, RCBit r) {
   1455   x_form(EXT2 | SRAD, ra, rs, rb, r);
   1456 }
   1457 
   1458 
   1459 void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
   1460   rldcl(ra, rs, rb, 0, r);
   1461 }
   1462 
   1463 
   1464 void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) {
   1465   rldicl(ra, rs, sh, 0, r);
   1466 }
   1467 
   1468 
   1469 void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) {
   1470   rldicl(ra, rs, 64 - sh, 0, r);
   1471 }
   1472 
   1473 
   1474 void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) {
   1475   x_form(EXT2 | CNTLZDX, ra, rs, r0, rc);
   1476 }
   1477 
   1478 
   1479 void Assembler::popcntd(Register ra, Register rs) {
   1480   emit(EXT2 | POPCNTD | rs.code() * B21 | ra.code() * B16);
   1481 }
   1482 
   1483 
   1484 void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
   1485                       RCBit r) {
   1486   xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
   1487 }
   1488 
   1489 
   1490 void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
   1491                      RCBit r) {
   1492   xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
   1493 }
   1494 
   1495 
   1496 void Assembler::divdu(Register dst, Register src1, Register src2, OEBit o,
   1497                       RCBit r) {
   1498   xo_form(EXT2 | DIVDU, dst, src1, src2, o, r);
   1499 }
   1500 #endif
   1501 
   1502 
   1503 // Function descriptor for AIX.
   1504 // Code address skips the function descriptor "header".
   1505 // TOC and static chain are ignored and set to 0.
   1506 void Assembler::function_descriptor() {
   1507 #if ABI_USES_FUNCTION_DESCRIPTORS
   1508   Label instructions;
   1509   DCHECK(pc_offset() == 0);
   1510   emit_label_addr(&instructions);
   1511   dp(0);
   1512   dp(0);
   1513   bind(&instructions);
   1514 #endif
   1515 }
   1516 
   1517 
   1518 int Assembler::instructions_required_for_mov(Register dst,
   1519                                              const Operand& src) const {
   1520   bool canOptimize =
   1521       !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
   1522   if (use_constant_pool_for_mov(dst, src, canOptimize)) {
   1523     if (ConstantPoolAccessIsInOverflow()) {
   1524       return kMovInstructionsConstantPool + 1;
   1525     }
   1526     return kMovInstructionsConstantPool;
   1527   }
   1528   DCHECK(!canOptimize);
   1529   return kMovInstructionsNoConstantPool;
   1530 }
   1531 
   1532 
   1533 bool Assembler::use_constant_pool_for_mov(Register dst, const Operand& src,
   1534                                           bool canOptimize) const {
   1535   if (!FLAG_enable_embedded_constant_pool || !is_constant_pool_available()) {
   1536     // If there is no constant pool available, we must use a mov
   1537     // immediate sequence.
   1538     return false;
   1539   }
   1540 
   1541   intptr_t value = src.immediate();
   1542 #if V8_TARGET_ARCH_PPC64
   1543   bool allowOverflow = !((canOptimize && is_int32(value)) || dst.is(r0));
   1544 #else
   1545   bool allowOverflow = !(canOptimize || dst.is(r0));
   1546 #endif
   1547   if (canOptimize && is_int16(value)) {
   1548     // Prefer a single-instruction load-immediate.
   1549     return false;
   1550   }
   1551   if (!allowOverflow && ConstantPoolAccessIsInOverflow()) {
   1552     // Prefer non-relocatable two-instruction bitwise-mov32 over
   1553     // overflow sequence.
   1554     return false;
   1555   }
   1556 
   1557   return true;
   1558 }
   1559 
   1560 
   1561 void Assembler::EnsureSpaceFor(int space_needed) {
   1562   if (buffer_space() <= (kGap + space_needed)) {
   1563     GrowBuffer(space_needed);
   1564   }
   1565 }
   1566 
   1567 
   1568 bool Operand::must_output_reloc_info(const Assembler* assembler) const {
   1569   if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
   1570     if (assembler != NULL && assembler->predictable_code_size()) return true;
   1571     return assembler->serializer_enabled();
   1572   } else if (RelocInfo::IsNone(rmode_)) {
   1573     return false;
   1574   }
   1575   return true;
   1576 }
   1577 
   1578 
   1579 // Primarily used for loading constants
   1580 // This should really move to be in macro-assembler as it
   1581 // is really a pseudo instruction
   1582 // Some usages of this intend for a FIXED_SEQUENCE to be used
   1583 // Todo - break this dependency so we can optimize mov() in general
   1584 // and only use the generic version when we require a fixed sequence
   1585 void Assembler::mov(Register dst, const Operand& src) {
   1586   intptr_t value = src.immediate();
   1587   bool relocatable = src.must_output_reloc_info(this);
   1588   bool canOptimize;
   1589 
   1590   canOptimize =
   1591       !(relocatable || (is_trampoline_pool_blocked() && !is_int16(value)));
   1592 
   1593   if (use_constant_pool_for_mov(dst, src, canOptimize)) {
   1594     DCHECK(is_constant_pool_available());
   1595     if (relocatable) {
   1596       RecordRelocInfo(src.rmode_);
   1597     }
   1598     ConstantPoolEntry::Access access = ConstantPoolAddEntry(src.rmode_, value);
   1599 #if V8_TARGET_ARCH_PPC64
   1600     if (access == ConstantPoolEntry::OVERFLOWED) {
   1601       addis(dst, kConstantPoolRegister, Operand::Zero());
   1602       ld(dst, MemOperand(dst, 0));
   1603     } else {
   1604       ld(dst, MemOperand(kConstantPoolRegister, 0));
   1605     }
   1606 #else
   1607     if (access == ConstantPoolEntry::OVERFLOWED) {
   1608       addis(dst, kConstantPoolRegister, Operand::Zero());
   1609       lwz(dst, MemOperand(dst, 0));
   1610     } else {
   1611       lwz(dst, MemOperand(kConstantPoolRegister, 0));
   1612     }
   1613 #endif
   1614     return;
   1615   }
   1616 
   1617   if (canOptimize) {
   1618     if (is_int16(value)) {
   1619       li(dst, Operand(value));
   1620     } else {
   1621       uint16_t u16;
   1622 #if V8_TARGET_ARCH_PPC64
   1623       if (is_int32(value)) {
   1624 #endif
   1625         lis(dst, Operand(value >> 16));
   1626 #if V8_TARGET_ARCH_PPC64
   1627       } else {
   1628         if (is_int48(value)) {
   1629           li(dst, Operand(value >> 32));
   1630         } else {
   1631           lis(dst, Operand(value >> 48));
   1632           u16 = ((value >> 32) & 0xffff);
   1633           if (u16) {
   1634             ori(dst, dst, Operand(u16));
   1635           }
   1636         }
   1637         sldi(dst, dst, Operand(32));
   1638         u16 = ((value >> 16) & 0xffff);
   1639         if (u16) {
   1640           oris(dst, dst, Operand(u16));
   1641         }
   1642       }
   1643 #endif
   1644       u16 = (value & 0xffff);
   1645       if (u16) {
   1646         ori(dst, dst, Operand(u16));
   1647       }
   1648     }
   1649     return;
   1650   }
   1651 
   1652   DCHECK(!canOptimize);
   1653   if (relocatable) {
   1654     RecordRelocInfo(src.rmode_);
   1655   }
   1656   bitwise_mov(dst, value);
   1657 }
   1658 
   1659 
   1660 void Assembler::bitwise_mov(Register dst, intptr_t value) {
   1661     BlockTrampolinePoolScope block_trampoline_pool(this);
   1662 #if V8_TARGET_ARCH_PPC64
   1663     int32_t hi_32 = static_cast<int32_t>(value >> 32);
   1664     int32_t lo_32 = static_cast<int32_t>(value);
   1665     int hi_word = static_cast<int>(hi_32 >> 16);
   1666     int lo_word = static_cast<int>(hi_32 & 0xffff);
   1667     lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
   1668     ori(dst, dst, Operand(lo_word));
   1669     sldi(dst, dst, Operand(32));
   1670     hi_word = static_cast<int>(((lo_32 >> 16) & 0xffff));
   1671     lo_word = static_cast<int>(lo_32 & 0xffff);
   1672     oris(dst, dst, Operand(hi_word));
   1673     ori(dst, dst, Operand(lo_word));
   1674 #else
   1675     int hi_word = static_cast<int>(value >> 16);
   1676     int lo_word = static_cast<int>(value & 0xffff);
   1677     lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
   1678     ori(dst, dst, Operand(lo_word));
   1679 #endif
   1680 }
   1681 
   1682 
   1683 void Assembler::bitwise_mov32(Register dst, int32_t value) {
   1684   BlockTrampolinePoolScope block_trampoline_pool(this);
   1685   int hi_word = static_cast<int>(value >> 16);
   1686   int lo_word = static_cast<int>(value & 0xffff);
   1687   lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
   1688   ori(dst, dst, Operand(lo_word));
   1689 }
   1690 
   1691 
   1692 void Assembler::bitwise_add32(Register dst, Register src, int32_t value) {
   1693   BlockTrampolinePoolScope block_trampoline_pool(this);
   1694   if (is_int16(value)) {
   1695     addi(dst, src, Operand(value));
   1696     nop();
   1697   } else {
   1698     int hi_word = static_cast<int>(value >> 16);
   1699     int lo_word = static_cast<int>(value & 0xffff);
   1700     if (lo_word & 0x8000) hi_word++;
   1701     addis(dst, src, Operand(SIGN_EXT_IMM16(hi_word)));
   1702     addic(dst, dst, Operand(SIGN_EXT_IMM16(lo_word)));
   1703   }
   1704 }
   1705 
   1706 
   1707 void Assembler::mov_label_offset(Register dst, Label* label) {
   1708   int position = link(label);
   1709   if (label->is_bound()) {
   1710     // Load the position of the label relative to the generated code object.
   1711     mov(dst, Operand(position + Code::kHeaderSize - kHeapObjectTag));
   1712   } else {
   1713     // Encode internal reference to unbound label. We use a dummy opcode
   1714     // such that it won't collide with any opcode that might appear in the
   1715     // label's chain.  Encode the destination register in the 2nd instruction.
   1716     int link = position - pc_offset();
   1717     DCHECK_EQ(0, link & 3);
   1718     link >>= 2;
   1719     DCHECK(is_int26(link));
   1720 
   1721     // When the label is bound, these instructions will be patched
   1722     // with a 2 instruction mov sequence that will load the
   1723     // destination register with the position of the label from the
   1724     // beginning of the code.
   1725     //
   1726     // target_at extracts the link and target_at_put patches the instructions.
   1727     BlockTrampolinePoolScope block_trampoline_pool(this);
   1728     emit(kUnboundMovLabelOffsetOpcode | (link & kImm26Mask));
   1729     emit(dst.code());
   1730   }
   1731 }
   1732 
   1733 
   1734 void Assembler::add_label_offset(Register dst, Register base, Label* label,
   1735                                  int delta) {
   1736   int position = link(label);
   1737   if (label->is_bound()) {
   1738     // dst = base + position + delta
   1739     position += delta;
   1740     bitwise_add32(dst, base, position);
   1741   } else {
   1742     // Encode internal reference to unbound label. We use a dummy opcode
   1743     // such that it won't collide with any opcode that might appear in the
   1744     // label's chain.  Encode the operands in the 2nd instruction.
   1745     int link = position - pc_offset();
   1746     DCHECK_EQ(0, link & 3);
   1747     link >>= 2;
   1748     DCHECK(is_int26(link));
   1749     DCHECK(is_int16(delta));
   1750 
   1751     BlockTrampolinePoolScope block_trampoline_pool(this);
   1752     emit(kUnboundAddLabelOffsetOpcode | (link & kImm26Mask));
   1753     emit(dst.code() * B21 | base.code() * B16 | (delta & kImm16Mask));
   1754   }
   1755 }
   1756 
   1757 
   1758 void Assembler::mov_label_addr(Register dst, Label* label) {
   1759   CheckBuffer();
   1760   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED);
   1761   int position = link(label);
   1762   if (label->is_bound()) {
   1763     // Keep internal references relative until EmitRelocations.
   1764     bitwise_mov(dst, position);
   1765   } else {
   1766     // Encode internal reference to unbound label. We use a dummy opcode
   1767     // such that it won't collide with any opcode that might appear in the
   1768     // label's chain.  Encode the destination register in the 2nd instruction.
   1769     int link = position - pc_offset();
   1770     DCHECK_EQ(0, link & 3);
   1771     link >>= 2;
   1772     DCHECK(is_int26(link));
   1773 
   1774     // When the label is bound, these instructions will be patched
   1775     // with a multi-instruction mov sequence that will load the
   1776     // destination register with the address of the label.
   1777     //
   1778     // target_at extracts the link and target_at_put patches the instructions.
   1779     BlockTrampolinePoolScope block_trampoline_pool(this);
   1780     emit(kUnboundMovLabelAddrOpcode | (link & kImm26Mask));
   1781     emit(dst.code());
   1782     DCHECK(kMovInstructionsNoConstantPool >= 2);
   1783     for (int i = 0; i < kMovInstructionsNoConstantPool - 2; i++) nop();
   1784   }
   1785 }
   1786 
   1787 
   1788 void Assembler::emit_label_addr(Label* label) {
   1789   CheckBuffer();
   1790   RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
   1791   int position = link(label);
   1792   if (label->is_bound()) {
   1793     // Keep internal references relative until EmitRelocations.
   1794     dp(position);
   1795   } else {
   1796     // Encode internal reference to unbound label. We use a dummy opcode
   1797     // such that it won't collide with any opcode that might appear in the
   1798     // label's chain.
   1799     int link = position - pc_offset();
   1800     DCHECK_EQ(0, link & 3);
   1801     link >>= 2;
   1802     DCHECK(is_int26(link));
   1803 
   1804     // When the label is bound, the instruction(s) will be patched
   1805     // as a jump table entry containing the label address.  target_at extracts
   1806     // the link and target_at_put patches the instruction(s).
   1807     BlockTrampolinePoolScope block_trampoline_pool(this);
   1808     emit(kUnboundJumpTableEntryOpcode | (link & kImm26Mask));
   1809 #if V8_TARGET_ARCH_PPC64
   1810     nop();
   1811 #endif
   1812   }
   1813 }
   1814 
   1815 
   1816 // Special register instructions
   1817 void Assembler::crxor(int bt, int ba, int bb) {
   1818   emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
   1819 }
   1820 
   1821 
   1822 void Assembler::creqv(int bt, int ba, int bb) {
   1823   emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
   1824 }
   1825 
   1826 
   1827 void Assembler::mflr(Register dst) {
   1828   emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11);  // Ignore RC bit
   1829 }
   1830 
   1831 
   1832 void Assembler::mtlr(Register src) {
   1833   emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11);  // Ignore RC bit
   1834 }
   1835 
   1836 
   1837 void Assembler::mtctr(Register src) {
   1838   emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11);  // Ignore RC bit
   1839 }
   1840 
   1841 
   1842 void Assembler::mtxer(Register src) {
   1843   emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
   1844 }
   1845 
   1846 
   1847 void Assembler::mcrfs(CRegister cr, FPSCRBit bit) {
   1848   DCHECK(static_cast<int>(bit) < 32);
   1849   int bf = cr.code();
   1850   int bfa = bit / CRWIDTH;
   1851   emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
   1852 }
   1853 
   1854 
   1855 void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
   1856 
   1857 
   1858 #if V8_TARGET_ARCH_PPC64
   1859 void Assembler::mffprd(Register dst, DoubleRegister src) {
   1860   emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
   1861 }
   1862 
   1863 
   1864 void Assembler::mffprwz(Register dst, DoubleRegister src) {
   1865   emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
   1866 }
   1867 
   1868 
   1869 void Assembler::mtfprd(DoubleRegister dst, Register src) {
   1870   emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
   1871 }
   1872 
   1873 
   1874 void Assembler::mtfprwz(DoubleRegister dst, Register src) {
   1875   emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
   1876 }
   1877 
   1878 
   1879 void Assembler::mtfprwa(DoubleRegister dst, Register src) {
   1880   emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
   1881 }
   1882 #endif
   1883 
   1884 
   1885 // Exception-generating instructions and debugging support.
   1886 // Stops with a non-negative code less than kNumOfWatchedStops support
   1887 // enabling/disabling and a counter feature. See simulator-ppc.h .
   1888 void Assembler::stop(const char* msg, Condition cond, int32_t code,
   1889                      CRegister cr) {
   1890   if (cond != al) {
   1891     Label skip;
   1892     b(NegateCondition(cond), &skip, cr);
   1893     bkpt(0);
   1894     bind(&skip);
   1895   } else {
   1896     bkpt(0);
   1897   }
   1898 }
   1899 
   1900 
   1901 void Assembler::bkpt(uint32_t imm16) { emit(0x7d821008); }
   1902 
   1903 
   1904 void Assembler::dcbf(Register ra, Register rb) {
   1905   emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
   1906 }
   1907 
   1908 
   1909 void Assembler::sync() { emit(EXT2 | SYNC); }
   1910 
   1911 
   1912 void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); }
   1913 
   1914 
   1915 void Assembler::icbi(Register ra, Register rb) {
   1916   emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
   1917 }
   1918 
   1919 
   1920 void Assembler::isync() { emit(EXT1 | ISYNC); }
   1921 
   1922 
   1923 // Floating point support
   1924 
   1925 void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
   1926   int offset = src.offset();
   1927   Register ra = src.ra();
   1928   DCHECK(!ra.is(r0));
   1929   CHECK(is_int16(offset));
   1930   int imm16 = offset & kImm16Mask;
   1931   // could be x_form instruction with some casting magic
   1932   emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
   1933 }
   1934 
   1935 
   1936 void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
   1937   int offset = src.offset();
   1938   Register ra = src.ra();
   1939   DCHECK(!ra.is(r0));
   1940   CHECK(is_int16(offset));
   1941   int imm16 = offset & kImm16Mask;
   1942   // could be x_form instruction with some casting magic
   1943   emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
   1944 }
   1945 
   1946 
   1947 void Assembler::lfdx(const DoubleRegister frt, const MemOperand& src) {
   1948   Register ra = src.ra();
   1949   Register rb = src.rb();
   1950   DCHECK(!ra.is(r0));
   1951   emit(EXT2 | LFDX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1952        LeaveRC);
   1953 }
   1954 
   1955 
   1956 void Assembler::lfdux(const DoubleRegister frt, const MemOperand& src) {
   1957   Register ra = src.ra();
   1958   Register rb = src.rb();
   1959   DCHECK(!ra.is(r0));
   1960   emit(EXT2 | LFDUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1961        LeaveRC);
   1962 }
   1963 
   1964 
   1965 void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
   1966   int offset = src.offset();
   1967   Register ra = src.ra();
   1968   CHECK(is_int16(offset));
   1969   DCHECK(!ra.is(r0));
   1970   int imm16 = offset & kImm16Mask;
   1971   // could be x_form instruction with some casting magic
   1972   emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
   1973 }
   1974 
   1975 
   1976 void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
   1977   int offset = src.offset();
   1978   Register ra = src.ra();
   1979   CHECK(is_int16(offset));
   1980   DCHECK(!ra.is(r0));
   1981   int imm16 = offset & kImm16Mask;
   1982   // could be x_form instruction with some casting magic
   1983   emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
   1984 }
   1985 
   1986 
   1987 void Assembler::lfsx(const DoubleRegister frt, const MemOperand& src) {
   1988   Register ra = src.ra();
   1989   Register rb = src.rb();
   1990   DCHECK(!ra.is(r0));
   1991   emit(EXT2 | LFSX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   1992        LeaveRC);
   1993 }
   1994 
   1995 
   1996 void Assembler::lfsux(const DoubleRegister frt, const MemOperand& src) {
   1997   Register ra = src.ra();
   1998   Register rb = src.rb();
   1999   DCHECK(!ra.is(r0));
   2000   emit(EXT2 | LFSUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2001        LeaveRC);
   2002 }
   2003 
   2004 
   2005 void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
   2006   int offset = src.offset();
   2007   Register ra = src.ra();
   2008   CHECK(is_int16(offset));
   2009   DCHECK(!ra.is(r0));
   2010   int imm16 = offset & kImm16Mask;
   2011   // could be x_form instruction with some casting magic
   2012   emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
   2013 }
   2014 
   2015 
   2016 void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
   2017   int offset = src.offset();
   2018   Register ra = src.ra();
   2019   CHECK(is_int16(offset));
   2020   DCHECK(!ra.is(r0));
   2021   int imm16 = offset & kImm16Mask;
   2022   // could be x_form instruction with some casting magic
   2023   emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
   2024 }
   2025 
   2026 
   2027 void Assembler::stfdx(const DoubleRegister frs, const MemOperand& src) {
   2028   Register ra = src.ra();
   2029   Register rb = src.rb();
   2030   DCHECK(!ra.is(r0));
   2031   emit(EXT2 | STFDX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2032        LeaveRC);
   2033 }
   2034 
   2035 
   2036 void Assembler::stfdux(const DoubleRegister frs, const MemOperand& src) {
   2037   Register ra = src.ra();
   2038   Register rb = src.rb();
   2039   DCHECK(!ra.is(r0));
   2040   emit(EXT2 | STFDUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2041        LeaveRC);
   2042 }
   2043 
   2044 
   2045 void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
   2046   int offset = src.offset();
   2047   Register ra = src.ra();
   2048   CHECK(is_int16(offset));
   2049   DCHECK(!ra.is(r0));
   2050   int imm16 = offset & kImm16Mask;
   2051   // could be x_form instruction with some casting magic
   2052   emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
   2053 }
   2054 
   2055 
   2056 void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
   2057   int offset = src.offset();
   2058   Register ra = src.ra();
   2059   CHECK(is_int16(offset));
   2060   DCHECK(!ra.is(r0));
   2061   int imm16 = offset & kImm16Mask;
   2062   // could be x_form instruction with some casting magic
   2063   emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
   2064 }
   2065 
   2066 
   2067 void Assembler::stfsx(const DoubleRegister frs, const MemOperand& src) {
   2068   Register ra = src.ra();
   2069   Register rb = src.rb();
   2070   DCHECK(!ra.is(r0));
   2071   emit(EXT2 | STFSX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2072        LeaveRC);
   2073 }
   2074 
   2075 
   2076 void Assembler::stfsux(const DoubleRegister frs, const MemOperand& src) {
   2077   Register ra = src.ra();
   2078   Register rb = src.rb();
   2079   DCHECK(!ra.is(r0));
   2080   emit(EXT2 | STFSUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
   2081        LeaveRC);
   2082 }
   2083 
   2084 
   2085 void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra,
   2086                      const DoubleRegister frb, RCBit rc) {
   2087   a_form(EXT4 | FSUB, frt, fra, frb, rc);
   2088 }
   2089 
   2090 
   2091 void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra,
   2092                      const DoubleRegister frb, RCBit rc) {
   2093   a_form(EXT4 | FADD, frt, fra, frb, rc);
   2094 }
   2095 
   2096 
   2097 void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
   2098                      const DoubleRegister frc, RCBit rc) {
   2099   emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
   2100        rc);
   2101 }
   2102 
   2103 
   2104 void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra,
   2105                      const DoubleRegister frb, RCBit rc) {
   2106   a_form(EXT4 | FDIV, frt, fra, frb, rc);
   2107 }
   2108 
   2109 
   2110 void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb,
   2111                       CRegister cr) {
   2112   DCHECK(cr.code() >= 0 && cr.code() <= 7);
   2113   emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
   2114 }
   2115 
   2116 
   2117 void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb,
   2118                     RCBit rc) {
   2119   emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
   2120 }
   2121 
   2122 
   2123 void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) {
   2124   emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
   2125 }
   2126 
   2127 
   2128 void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
   2129   emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
   2130 }
   2131 
   2132 
   2133 void Assembler::frin(const DoubleRegister frt, const DoubleRegister frb,
   2134                      RCBit rc) {
   2135   emit(EXT4 | FRIN | frt.code() * B21 | frb.code() * B11 | rc);
   2136 }
   2137 
   2138 
   2139 void Assembler::friz(const DoubleRegister frt, const DoubleRegister frb,
   2140                      RCBit rc) {
   2141   emit(EXT4 | FRIZ | frt.code() * B21 | frb.code() * B11 | rc);
   2142 }
   2143 
   2144 
   2145 void Assembler::frip(const DoubleRegister frt, const DoubleRegister frb,
   2146                      RCBit rc) {
   2147   emit(EXT4 | FRIP | frt.code() * B21 | frb.code() * B11 | rc);
   2148 }
   2149 
   2150 
   2151 void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb,
   2152                      RCBit rc) {
   2153   emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11 | rc);
   2154 }
   2155 
   2156 
   2157 void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb,
   2158                      RCBit rc) {
   2159   emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
   2160 }
   2161 
   2162 
   2163 void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
   2164                       RCBit rc) {
   2165   emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
   2166 }
   2167 
   2168 
   2169 void Assembler::fcfidu(const DoubleRegister frt, const DoubleRegister frb,
   2170                        RCBit rc) {
   2171   emit(EXT4 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
   2172 }
   2173 
   2174 
   2175 void Assembler::fcfidus(const DoubleRegister frt, const DoubleRegister frb,
   2176                         RCBit rc) {
   2177   emit(EXT3 | FCFIDU | frt.code() * B21 | frb.code() * B11 | rc);
   2178 }
   2179 
   2180 
   2181 void Assembler::fcfids(const DoubleRegister frt, const DoubleRegister frb,
   2182                        RCBit rc) {
   2183   emit(EXT3 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
   2184 }
   2185 
   2186 
   2187 void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
   2188                       RCBit rc) {
   2189   emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
   2190 }
   2191 
   2192 
   2193 void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
   2194                        RCBit rc) {
   2195   emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
   2196 }
   2197 
   2198 
   2199 void Assembler::fctidu(const DoubleRegister frt, const DoubleRegister frb,
   2200                        RCBit rc) {
   2201   emit(EXT4 | FCTIDU | frt.code() * B21 | frb.code() * B11 | rc);
   2202 }
   2203 
   2204 
   2205 void Assembler::fctiduz(const DoubleRegister frt, const DoubleRegister frb,
   2206                         RCBit rc) {
   2207   emit(EXT4 | FCTIDUZ | frt.code() * B21 | frb.code() * B11 | rc);
   2208 }
   2209 
   2210 
   2211 void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
   2212                      const DoubleRegister frc, const DoubleRegister frb,
   2213                      RCBit rc) {
   2214   emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
   2215        frc.code() * B6 | rc);
   2216 }
   2217 
   2218 
   2219 void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
   2220                      RCBit rc) {
   2221   emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
   2222 }
   2223 
   2224 
   2225 void Assembler::mtfsb0(FPSCRBit bit, RCBit rc) {
   2226   DCHECK(static_cast<int>(bit) < 32);
   2227   int bt = bit;
   2228   emit(EXT4 | MTFSB0 | bt * B21 | rc);
   2229 }
   2230 
   2231 
   2232 void Assembler::mtfsb1(FPSCRBit bit, RCBit rc) {
   2233   DCHECK(static_cast<int>(bit) < 32);
   2234   int bt = bit;
   2235   emit(EXT4 | MTFSB1 | bt * B21 | rc);
   2236 }
   2237 
   2238 
   2239 void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
   2240   emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
   2241 }
   2242 
   2243 
   2244 void Assembler::mffs(const DoubleRegister frt, RCBit rc) {
   2245   emit(EXT4 | MFFS | frt.code() * B21 | rc);
   2246 }
   2247 
   2248 
   2249 void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W,
   2250                       RCBit rc) {
   2251   emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
   2252 }
   2253 
   2254 
   2255 void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb,
   2256                       RCBit rc) {
   2257   emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
   2258 }
   2259 
   2260 
   2261 void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb,
   2262                      RCBit rc) {
   2263   emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
   2264 }
   2265 
   2266 
   2267 void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra,
   2268                       const DoubleRegister frc, const DoubleRegister frb,
   2269                       RCBit rc) {
   2270   emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
   2271        frc.code() * B6 | rc);
   2272 }
   2273 
   2274 
   2275 void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
   2276                       const DoubleRegister frc, const DoubleRegister frb,
   2277                       RCBit rc) {
   2278   emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
   2279        frc.code() * B6 | rc);
   2280 }
   2281 
   2282 
   2283 // Pseudo instructions.
   2284 void Assembler::nop(int type) {
   2285   Register reg = r0;
   2286   switch (type) {
   2287     case NON_MARKING_NOP:
   2288       reg = r0;
   2289       break;
   2290     case GROUP_ENDING_NOP:
   2291       reg = r2;
   2292       break;
   2293     case DEBUG_BREAK_NOP:
   2294       reg = r3;
   2295       break;
   2296     default:
   2297       UNIMPLEMENTED();
   2298   }
   2299 
   2300   ori(reg, reg, Operand::Zero());
   2301 }
   2302 
   2303 
   2304 bool Assembler::IsNop(Instr instr, int type) {
   2305   int reg = 0;
   2306   switch (type) {
   2307     case NON_MARKING_NOP:
   2308       reg = 0;
   2309       break;
   2310     case GROUP_ENDING_NOP:
   2311       reg = 2;
   2312       break;
   2313     case DEBUG_BREAK_NOP:
   2314       reg = 3;
   2315       break;
   2316     default:
   2317       UNIMPLEMENTED();
   2318   }
   2319   return instr == (ORI | reg * B21 | reg * B16);
   2320 }
   2321 
   2322 
   2323 void Assembler::GrowBuffer(int needed) {
   2324   if (!own_buffer_) FATAL("external code buffer is too small");
   2325 
   2326   // Compute new buffer size.
   2327   CodeDesc desc;  // the new buffer
   2328   if (buffer_size_ < 4 * KB) {
   2329     desc.buffer_size = 4 * KB;
   2330   } else if (buffer_size_ < 1 * MB) {
   2331     desc.buffer_size = 2 * buffer_size_;
   2332   } else {
   2333     desc.buffer_size = buffer_size_ + 1 * MB;
   2334   }
   2335   int space = buffer_space() + (desc.buffer_size - buffer_size_);
   2336   if (space < needed) {
   2337     desc.buffer_size += needed - space;
   2338   }
   2339   CHECK_GT(desc.buffer_size, 0);  // no overflow
   2340 
   2341   // Set up new buffer.
   2342   desc.buffer = NewArray<byte>(desc.buffer_size);
   2343   desc.origin = this;
   2344 
   2345   desc.instr_size = pc_offset();
   2346   desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
   2347 
   2348   // Copy the data.
   2349   intptr_t pc_delta = desc.buffer - buffer_;
   2350   intptr_t rc_delta =
   2351       (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
   2352   memmove(desc.buffer, buffer_, desc.instr_size);
   2353   memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
   2354           desc.reloc_size);
   2355 
   2356   // Switch buffers.
   2357   DeleteArray(buffer_);
   2358   buffer_ = desc.buffer;
   2359   buffer_size_ = desc.buffer_size;
   2360   pc_ += pc_delta;
   2361   reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
   2362                                reloc_info_writer.last_pc() + pc_delta);
   2363 
   2364   // Nothing else to do here since we keep all internal references and
   2365   // deferred relocation entries relative to the buffer (until
   2366   // EmitRelocations).
   2367 }
   2368 
   2369 
   2370 void Assembler::db(uint8_t data) {
   2371   CheckBuffer();
   2372   *reinterpret_cast<uint8_t*>(pc_) = data;
   2373   pc_ += sizeof(uint8_t);
   2374 }
   2375 
   2376 
   2377 void Assembler::dd(uint32_t data) {
   2378   CheckBuffer();
   2379   *reinterpret_cast<uint32_t*>(pc_) = data;
   2380   pc_ += sizeof(uint32_t);
   2381 }
   2382 
   2383 
   2384 void Assembler::dq(uint64_t value) {
   2385   CheckBuffer();
   2386   *reinterpret_cast<uint64_t*>(pc_) = value;
   2387   pc_ += sizeof(uint64_t);
   2388 }
   2389 
   2390 
   2391 void Assembler::dp(uintptr_t data) {
   2392   CheckBuffer();
   2393   *reinterpret_cast<uintptr_t*>(pc_) = data;
   2394   pc_ += sizeof(uintptr_t);
   2395 }
   2396 
   2397 
   2398 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
   2399   if (RelocInfo::IsNone(rmode) ||
   2400       // Don't record external references unless the heap will be serialized.
   2401       (rmode == RelocInfo::EXTERNAL_REFERENCE && !serializer_enabled() &&
   2402        !emit_debug_code())) {
   2403     return;
   2404   }
   2405   if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
   2406     data = RecordedAstId().ToInt();
   2407     ClearRecordedAstId();
   2408   }
   2409   DeferredRelocInfo rinfo(pc_offset(), rmode, data);
   2410   relocations_.push_back(rinfo);
   2411 }
   2412 
   2413 
   2414 void Assembler::EmitRelocations() {
   2415   EnsureSpaceFor(relocations_.size() * kMaxRelocSize);
   2416 
   2417   for (std::vector<DeferredRelocInfo>::iterator it = relocations_.begin();
   2418        it != relocations_.end(); it++) {
   2419     RelocInfo::Mode rmode = it->rmode();
   2420     Address pc = buffer_ + it->position();
   2421     Code* code = NULL;
   2422     RelocInfo rinfo(isolate(), pc, rmode, it->data(), code);
   2423 
   2424     // Fix up internal references now that they are guaranteed to be bound.
   2425     if (RelocInfo::IsInternalReference(rmode)) {
   2426       // Jump table entry
   2427       intptr_t pos = reinterpret_cast<intptr_t>(Memory::Address_at(pc));
   2428       Memory::Address_at(pc) = buffer_ + pos;
   2429     } else if (RelocInfo::IsInternalReferenceEncoded(rmode)) {
   2430       // mov sequence
   2431       intptr_t pos = reinterpret_cast<intptr_t>(target_address_at(pc, code));
   2432       set_target_address_at(isolate(), pc, code, buffer_ + pos,
   2433                             SKIP_ICACHE_FLUSH);
   2434     }
   2435 
   2436     reloc_info_writer.Write(&rinfo);
   2437   }
   2438 
   2439   reloc_info_writer.Finish();
   2440 }
   2441 
   2442 
   2443 void Assembler::BlockTrampolinePoolFor(int instructions) {
   2444   BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
   2445 }
   2446 
   2447 
   2448 void Assembler::CheckTrampolinePool() {
   2449   // Some small sequences of instructions must not be broken up by the
   2450   // insertion of a trampoline pool; such sequences are protected by setting
   2451   // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
   2452   // which are both checked here. Also, recursive calls to CheckTrampolinePool
   2453   // are blocked by trampoline_pool_blocked_nesting_.
   2454   if (trampoline_pool_blocked_nesting_ > 0) return;
   2455   if (pc_offset() < no_trampoline_pool_before_) {
   2456     next_trampoline_check_ = no_trampoline_pool_before_;
   2457     return;
   2458   }
   2459 
   2460   DCHECK(!trampoline_emitted_);
   2461   if (tracked_branch_count_ > 0) {
   2462     int size = tracked_branch_count_ * kInstrSize;
   2463 
   2464     // As we are only going to emit trampoline once, we need to prevent any
   2465     // further emission.
   2466     trampoline_emitted_ = true;
   2467     next_trampoline_check_ = kMaxInt;
   2468 
   2469     // First we emit jump, then we emit trampoline pool.
   2470     b(size + kInstrSize, LeaveLK);
   2471     for (int i = size; i > 0; i -= kInstrSize) {
   2472       b(i, LeaveLK);
   2473     }
   2474 
   2475     trampoline_ = Trampoline(pc_offset() - size, tracked_branch_count_);
   2476   }
   2477 }
   2478 
   2479 
   2480 }  // namespace internal
   2481 }  // namespace v8
   2482 
   2483 #endif  // V8_TARGET_ARCH_PPC
   2484