Home | History | Annotate | Download | only in ppc
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/compiler/code-generator.h"
      6 
      7 #include "src/compilation-info.h"
      8 #include "src/compiler/code-generator-impl.h"
      9 #include "src/compiler/gap-resolver.h"
     10 #include "src/compiler/node-matchers.h"
     11 #include "src/compiler/osr.h"
     12 #include "src/ppc/macro-assembler-ppc.h"
     13 
     14 namespace v8 {
     15 namespace internal {
     16 namespace compiler {
     17 
     18 #define __ masm()->
     19 
     20 
     21 #define kScratchReg r11
     22 
     23 
     24 // Adds PPC-specific methods to convert InstructionOperands.
     25 class PPCOperandConverter final : public InstructionOperandConverter {
     26  public:
     27   PPCOperandConverter(CodeGenerator* gen, Instruction* instr)
     28       : InstructionOperandConverter(gen, instr) {}
     29 
     30   size_t OutputCount() { return instr_->OutputCount(); }
     31 
     32   RCBit OutputRCBit() const {
     33     switch (instr_->flags_mode()) {
     34       case kFlags_branch:
     35       case kFlags_deoptimize:
     36       case kFlags_set:
     37       case kFlags_trap:
     38         return SetRC;
     39       case kFlags_none:
     40         return LeaveRC;
     41     }
     42     UNREACHABLE();
     43     return LeaveRC;
     44   }
     45 
     46   bool CompareLogical() const {
     47     switch (instr_->flags_condition()) {
     48       case kUnsignedLessThan:
     49       case kUnsignedGreaterThanOrEqual:
     50       case kUnsignedLessThanOrEqual:
     51       case kUnsignedGreaterThan:
     52         return true;
     53       default:
     54         return false;
     55     }
     56     UNREACHABLE();
     57     return false;
     58   }
     59 
     60   Operand InputImmediate(size_t index) {
     61     Constant constant = ToConstant(instr_->InputAt(index));
     62     switch (constant.type()) {
     63       case Constant::kInt32:
     64         return Operand(constant.ToInt32());
     65       case Constant::kFloat32:
     66         return Operand(
     67             isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
     68       case Constant::kFloat64:
     69         return Operand(
     70             isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
     71       case Constant::kInt64:
     72 #if V8_TARGET_ARCH_PPC64
     73         return Operand(constant.ToInt64());
     74 #endif
     75       case Constant::kExternalReference:
     76       case Constant::kHeapObject:
     77       case Constant::kRpoNumber:
     78         break;
     79     }
     80     UNREACHABLE();
     81     return Operand::Zero();
     82   }
     83 
     84   MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
     85     const size_t index = *first_index;
     86     *mode = AddressingModeField::decode(instr_->opcode());
     87     switch (*mode) {
     88       case kMode_None:
     89         break;
     90       case kMode_MRI:
     91         *first_index += 2;
     92         return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
     93       case kMode_MRR:
     94         *first_index += 2;
     95         return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
     96     }
     97     UNREACHABLE();
     98     return MemOperand(r0);
     99   }
    100 
    101   MemOperand MemoryOperand(AddressingMode* mode, size_t first_index = 0) {
    102     return MemoryOperand(mode, &first_index);
    103   }
    104 
    105   MemOperand ToMemOperand(InstructionOperand* op) const {
    106     DCHECK_NOT_NULL(op);
    107     DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
    108     return SlotToMemOperand(AllocatedOperand::cast(op)->index());
    109   }
    110 
    111   MemOperand SlotToMemOperand(int slot) const {
    112     FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
    113     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
    114   }
    115 };
    116 
    117 
    118 static inline bool HasRegisterInput(Instruction* instr, size_t index) {
    119   return instr->InputAt(index)->IsRegister();
    120 }
    121 
    122 
    123 namespace {
    124 
    125 class OutOfLineLoadNAN32 final : public OutOfLineCode {
    126  public:
    127   OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
    128       : OutOfLineCode(gen), result_(result) {}
    129 
    130   void Generate() final {
    131     __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
    132                          kScratchReg);
    133   }
    134 
    135  private:
    136   DoubleRegister const result_;
    137 };
    138 
    139 
    140 class OutOfLineLoadNAN64 final : public OutOfLineCode {
    141  public:
    142   OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
    143       : OutOfLineCode(gen), result_(result) {}
    144 
    145   void Generate() final {
    146     __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
    147                          kScratchReg);
    148   }
    149 
    150  private:
    151   DoubleRegister const result_;
    152 };
    153 
    154 
    155 class OutOfLineLoadZero final : public OutOfLineCode {
    156  public:
    157   OutOfLineLoadZero(CodeGenerator* gen, Register result)
    158       : OutOfLineCode(gen), result_(result) {}
    159 
    160   void Generate() final { __ li(result_, Operand::Zero()); }
    161 
    162  private:
    163   Register const result_;
    164 };
    165 
    166 
    167 class OutOfLineRecordWrite final : public OutOfLineCode {
    168  public:
    169   OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
    170                        Register value, Register scratch0, Register scratch1,
    171                        RecordWriteMode mode)
    172       : OutOfLineCode(gen),
    173         object_(object),
    174         offset_(offset),
    175         offset_immediate_(0),
    176         value_(value),
    177         scratch0_(scratch0),
    178         scratch1_(scratch1),
    179         mode_(mode),
    180         must_save_lr_(!gen->frame_access_state()->has_frame()) {}
    181 
    182   OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
    183                        Register value, Register scratch0, Register scratch1,
    184                        RecordWriteMode mode)
    185       : OutOfLineCode(gen),
    186         object_(object),
    187         offset_(no_reg),
    188         offset_immediate_(offset),
    189         value_(value),
    190         scratch0_(scratch0),
    191         scratch1_(scratch1),
    192         mode_(mode),
    193         must_save_lr_(!gen->frame_access_state()->has_frame()) {}
    194 
    195   void Generate() final {
    196     if (mode_ > RecordWriteMode::kValueIsPointer) {
    197       __ JumpIfSmi(value_, exit());
    198     }
    199     __ CheckPageFlag(value_, scratch0_,
    200                      MemoryChunk::kPointersToHereAreInterestingMask, eq,
    201                      exit());
    202     RememberedSetAction const remembered_set_action =
    203         mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
    204                                              : OMIT_REMEMBERED_SET;
    205     SaveFPRegsMode const save_fp_mode =
    206         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
    207     if (must_save_lr_) {
    208       // We need to save and restore lr if the frame was elided.
    209       __ mflr(scratch1_);
    210       __ Push(scratch1_);
    211     }
    212     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
    213                          remembered_set_action, save_fp_mode);
    214     if (offset_.is(no_reg)) {
    215       __ addi(scratch1_, object_, Operand(offset_immediate_));
    216     } else {
    217       DCHECK_EQ(0, offset_immediate_);
    218       __ add(scratch1_, object_, offset_);
    219     }
    220     if (must_save_lr_ && FLAG_enable_embedded_constant_pool) {
    221       ConstantPoolUnavailableScope constant_pool_unavailable(masm());
    222       __ CallStub(&stub);
    223     } else {
    224       __ CallStub(&stub);
    225     }
    226     if (must_save_lr_) {
    227       // We need to save and restore lr if the frame was elided.
    228       __ Pop(scratch1_);
    229       __ mtlr(scratch1_);
    230     }
    231   }
    232 
    233  private:
    234   Register const object_;
    235   Register const offset_;
    236   int32_t const offset_immediate_;  // Valid if offset_.is(no_reg).
    237   Register const value_;
    238   Register const scratch0_;
    239   Register const scratch1_;
    240   RecordWriteMode const mode_;
    241   bool must_save_lr_;
    242 };
    243 
    244 
    245 Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
    246   switch (condition) {
    247     case kEqual:
    248       return eq;
    249     case kNotEqual:
    250       return ne;
    251     case kSignedLessThan:
    252     case kUnsignedLessThan:
    253       return lt;
    254     case kSignedGreaterThanOrEqual:
    255     case kUnsignedGreaterThanOrEqual:
    256       return ge;
    257     case kSignedLessThanOrEqual:
    258     case kUnsignedLessThanOrEqual:
    259       return le;
    260     case kSignedGreaterThan:
    261     case kUnsignedGreaterThan:
    262       return gt;
    263     case kOverflow:
    264       // Overflow checked for add/sub only.
    265       switch (op) {
    266 #if V8_TARGET_ARCH_PPC64
    267         case kPPC_Add32:
    268         case kPPC_Add64:
    269         case kPPC_Sub:
    270 #endif
    271         case kPPC_AddWithOverflow32:
    272         case kPPC_SubWithOverflow32:
    273           return lt;
    274         default:
    275           break;
    276       }
    277       break;
    278     case kNotOverflow:
    279       switch (op) {
    280 #if V8_TARGET_ARCH_PPC64
    281         case kPPC_Add32:
    282         case kPPC_Add64:
    283         case kPPC_Sub:
    284 #endif
    285         case kPPC_AddWithOverflow32:
    286         case kPPC_SubWithOverflow32:
    287           return ge;
    288         default:
    289           break;
    290       }
    291       break;
    292     default:
    293       break;
    294   }
    295   UNREACHABLE();
    296   return kNoCondition;
    297 }
    298 
    299 }  // namespace
    300 
    301 #define ASSEMBLE_FLOAT_UNOP_RC(asm_instr, round)                     \
    302   do {                                                               \
    303     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
    304                  i.OutputRCBit());                                   \
    305     if (round) {                                                     \
    306       __ frsp(i.OutputDoubleRegister(), i.OutputDoubleRegister());   \
    307     }                                                                \
    308   } while (0)
    309 
    310 #define ASSEMBLE_FLOAT_BINOP_RC(asm_instr, round)                    \
    311   do {                                                               \
    312     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
    313                  i.InputDoubleRegister(1), i.OutputRCBit());         \
    314     if (round) {                                                     \
    315       __ frsp(i.OutputDoubleRegister(), i.OutputDoubleRegister());   \
    316     }                                                                \
    317   } while (0)
    318 
    319 #define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm)           \
    320   do {                                                         \
    321     if (HasRegisterInput(instr, 1)) {                          \
    322       __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
    323                        i.InputRegister(1));                    \
    324     } else {                                                   \
    325       __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
    326                        i.InputImmediate(1));                   \
    327     }                                                          \
    328   } while (0)
    329 
    330 
    331 #define ASSEMBLE_BINOP_RC(asm_instr_reg, asm_instr_imm)        \
    332   do {                                                         \
    333     if (HasRegisterInput(instr, 1)) {                          \
    334       __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
    335                        i.InputRegister(1), i.OutputRCBit());   \
    336     } else {                                                   \
    337       __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
    338                        i.InputImmediate(1), i.OutputRCBit());  \
    339     }                                                          \
    340   } while (0)
    341 
    342 
    343 #define ASSEMBLE_BINOP_INT_RC(asm_instr_reg, asm_instr_imm)    \
    344   do {                                                         \
    345     if (HasRegisterInput(instr, 1)) {                          \
    346       __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
    347                        i.InputRegister(1), i.OutputRCBit());   \
    348     } else {                                                   \
    349       __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
    350                        i.InputInt32(1), i.OutputRCBit());      \
    351     }                                                          \
    352   } while (0)
    353 
    354 
    355 #define ASSEMBLE_ADD_WITH_OVERFLOW()                                    \
    356   do {                                                                  \
    357     if (HasRegisterInput(instr, 1)) {                                   \
    358       __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
    359                                 i.InputRegister(1), kScratchReg, r0);   \
    360     } else {                                                            \
    361       __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
    362                                 i.InputInt32(1), kScratchReg, r0);      \
    363     }                                                                   \
    364   } while (0)
    365 
    366 
    367 #define ASSEMBLE_SUB_WITH_OVERFLOW()                                    \
    368   do {                                                                  \
    369     if (HasRegisterInput(instr, 1)) {                                   \
    370       __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
    371                                 i.InputRegister(1), kScratchReg, r0);   \
    372     } else {                                                            \
    373       __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
    374                                 -i.InputInt32(1), kScratchReg, r0);     \
    375     }                                                                   \
    376   } while (0)
    377 
    378 
    379 #if V8_TARGET_ARCH_PPC64
    380 #define ASSEMBLE_ADD_WITH_OVERFLOW32()         \
    381   do {                                         \
    382     ASSEMBLE_ADD_WITH_OVERFLOW();              \
    383     __ extsw(kScratchReg, kScratchReg, SetRC); \
    384   } while (0)
    385 
    386 #define ASSEMBLE_SUB_WITH_OVERFLOW32()         \
    387   do {                                         \
    388     ASSEMBLE_SUB_WITH_OVERFLOW();              \
    389     __ extsw(kScratchReg, kScratchReg, SetRC); \
    390   } while (0)
    391 #else
    392 #define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
    393 #define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW
    394 #endif
    395 
    396 
    397 #define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr)                        \
    398   do {                                                                 \
    399     const CRegister cr = cr0;                                          \
    400     if (HasRegisterInput(instr, 1)) {                                  \
    401       if (i.CompareLogical()) {                                        \
    402         __ cmpl_instr(i.InputRegister(0), i.InputRegister(1), cr);     \
    403       } else {                                                         \
    404         __ cmp_instr(i.InputRegister(0), i.InputRegister(1), cr);      \
    405       }                                                                \
    406     } else {                                                           \
    407       if (i.CompareLogical()) {                                        \
    408         __ cmpl_instr##i(i.InputRegister(0), i.InputImmediate(1), cr); \
    409       } else {                                                         \
    410         __ cmp_instr##i(i.InputRegister(0), i.InputImmediate(1), cr);  \
    411       }                                                                \
    412     }                                                                  \
    413     DCHECK_EQ(SetRC, i.OutputRCBit());                                 \
    414   } while (0)
    415 
    416 
    417 #define ASSEMBLE_FLOAT_COMPARE(cmp_instr)                                 \
    418   do {                                                                    \
    419     const CRegister cr = cr0;                                             \
    420     __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1), cr); \
    421     DCHECK_EQ(SetRC, i.OutputRCBit());                                    \
    422   } while (0)
    423 
    424 
    425 #define ASSEMBLE_MODULO(div_instr, mul_instr)                        \
    426   do {                                                               \
    427     const Register scratch = kScratchReg;                            \
    428     __ div_instr(scratch, i.InputRegister(0), i.InputRegister(1));   \
    429     __ mul_instr(scratch, scratch, i.InputRegister(1));              \
    430     __ sub(i.OutputRegister(), i.InputRegister(0), scratch, LeaveOE, \
    431            i.OutputRCBit());                                         \
    432   } while (0)
    433 
    434 
    435 #define ASSEMBLE_FLOAT_MODULO()                                               \
    436   do {                                                                        \
    437     FrameScope scope(masm(), StackFrame::MANUAL);                             \
    438     __ PrepareCallCFunction(0, 2, kScratchReg);                               \
    439     __ MovToFloatParameters(i.InputDoubleRegister(0),                         \
    440                             i.InputDoubleRegister(1));                        \
    441     __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
    442                      0, 2);                                                   \
    443     __ MovFromFloatResult(i.OutputDoubleRegister());                          \
    444     DCHECK_EQ(LeaveRC, i.OutputRCBit());                                      \
    445   } while (0)
    446 
    447 #define ASSEMBLE_IEEE754_UNOP(name)                                            \
    448   do {                                                                         \
    449     /* TODO(bmeurer): We should really get rid of this special instruction, */ \
    450     /* and generate a CallAddress instruction instead. */                      \
    451     FrameScope scope(masm(), StackFrame::MANUAL);                              \
    452     __ PrepareCallCFunction(0, 1, kScratchReg);                                \
    453     __ MovToFloatParameter(i.InputDoubleRegister(0));                          \
    454     __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()),  \
    455                      0, 1);                                                    \
    456     /* Move the result in the double result register. */                       \
    457     __ MovFromFloatResult(i.OutputDoubleRegister());                           \
    458     DCHECK_EQ(LeaveRC, i.OutputRCBit());                                       \
    459   } while (0)
    460 
    461 #define ASSEMBLE_IEEE754_BINOP(name)                                           \
    462   do {                                                                         \
    463     /* TODO(bmeurer): We should really get rid of this special instruction, */ \
    464     /* and generate a CallAddress instruction instead. */                      \
    465     FrameScope scope(masm(), StackFrame::MANUAL);                              \
    466     __ PrepareCallCFunction(0, 2, kScratchReg);                                \
    467     __ MovToFloatParameters(i.InputDoubleRegister(0),                          \
    468                            i.InputDoubleRegister(1));                          \
    469     __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()),  \
    470                      0, 2);                                                    \
    471     /* Move the result in the double result register. */                       \
    472     __ MovFromFloatResult(i.OutputDoubleRegister());                           \
    473     DCHECK_EQ(LeaveRC, i.OutputRCBit());                                       \
    474   } while (0)
    475 
    476 #define ASSEMBLE_FLOAT_MAX()                                                  \
    477   do {                                                                        \
    478     DoubleRegister left_reg = i.InputDoubleRegister(0);                       \
    479     DoubleRegister right_reg = i.InputDoubleRegister(1);                      \
    480     DoubleRegister result_reg = i.OutputDoubleRegister();                     \
    481     Label check_nan_left, check_zero, return_left, return_right, done;        \
    482     __ fcmpu(left_reg, right_reg);                                            \
    483     __ bunordered(&check_nan_left);                                           \
    484     __ beq(&check_zero);                                                      \
    485     __ bge(&return_left);                                                     \
    486     __ b(&return_right);                                                      \
    487                                                                               \
    488     __ bind(&check_zero);                                                     \
    489     __ fcmpu(left_reg, kDoubleRegZero);                                       \
    490     /* left == right != 0. */                                                 \
    491     __ bne(&return_left);                                                     \
    492     /* At this point, both left and right are either 0 or -0. */              \
    493     __ fadd(result_reg, left_reg, right_reg);                                 \
    494     __ b(&done);                                                              \
    495                                                                               \
    496     __ bind(&check_nan_left);                                                 \
    497     __ fcmpu(left_reg, left_reg);                                             \
    498     /* left == NaN. */                                                        \
    499     __ bunordered(&return_left);                                              \
    500     __ bind(&return_right);                                                   \
    501     if (!right_reg.is(result_reg)) {                                          \
    502       __ fmr(result_reg, right_reg);                                          \
    503     }                                                                         \
    504     __ b(&done);                                                              \
    505                                                                               \
    506     __ bind(&return_left);                                                    \
    507     if (!left_reg.is(result_reg)) {                                           \
    508       __ fmr(result_reg, left_reg);                                           \
    509     }                                                                         \
    510     __ bind(&done);                                                           \
    511   } while (0)                                                                 \
    512 
    513 
    514 #define ASSEMBLE_FLOAT_MIN()                                                   \
    515   do {                                                                         \
    516     DoubleRegister left_reg = i.InputDoubleRegister(0);                        \
    517     DoubleRegister right_reg = i.InputDoubleRegister(1);                       \
    518     DoubleRegister result_reg = i.OutputDoubleRegister();                      \
    519     Label check_nan_left, check_zero, return_left, return_right, done;         \
    520     __ fcmpu(left_reg, right_reg);                                             \
    521     __ bunordered(&check_nan_left);                                            \
    522     __ beq(&check_zero);                                                       \
    523     __ ble(&return_left);                                                      \
    524     __ b(&return_right);                                                       \
    525                                                                                \
    526     __ bind(&check_zero);                                                      \
    527     __ fcmpu(left_reg, kDoubleRegZero);                                        \
    528     /* left == right != 0. */                                                  \
    529     __ bne(&return_left);                                                      \
    530     /* At this point, both left and right are either 0 or -0. */               \
    531     /* Min: The algorithm is: -((-L) + (-R)), which in case of L and R being */\
    532     /* different registers is most efficiently expressed as -((-L) - R). */    \
    533     __ fneg(left_reg, left_reg);                                               \
    534     if (left_reg.is(right_reg)) {                                              \
    535       __ fadd(result_reg, left_reg, right_reg);                                \
    536     } else {                                                                   \
    537       __ fsub(result_reg, left_reg, right_reg);                                \
    538     }                                                                          \
    539     __ fneg(result_reg, result_reg);                                           \
    540     __ b(&done);                                                               \
    541                                                                                \
    542     __ bind(&check_nan_left);                                                  \
    543     __ fcmpu(left_reg, left_reg);                                              \
    544     /* left == NaN. */                                                         \
    545     __ bunordered(&return_left);                                               \
    546                                                                                \
    547     __ bind(&return_right);                                                    \
    548     if (!right_reg.is(result_reg)) {                                           \
    549       __ fmr(result_reg, right_reg);                                           \
    550     }                                                                          \
    551     __ b(&done);                                                               \
    552                                                                                \
    553     __ bind(&return_left);                                                     \
    554     if (!left_reg.is(result_reg)) {                                            \
    555       __ fmr(result_reg, left_reg);                                            \
    556     }                                                                          \
    557     __ bind(&done);                                                            \
    558   } while (0)
    559 
    560 
    561 #define ASSEMBLE_LOAD_FLOAT(asm_instr, asm_instrx)    \
    562   do {                                                \
    563     DoubleRegister result = i.OutputDoubleRegister(); \
    564     AddressingMode mode = kMode_None;                 \
    565     MemOperand operand = i.MemoryOperand(&mode);      \
    566     if (mode == kMode_MRI) {                          \
    567       __ asm_instr(result, operand);                  \
    568     } else {                                          \
    569       __ asm_instrx(result, operand);                 \
    570     }                                                 \
    571     DCHECK_EQ(LeaveRC, i.OutputRCBit());              \
    572   } while (0)
    573 
    574 
    575 #define ASSEMBLE_LOAD_INTEGER(asm_instr, asm_instrx) \
    576   do {                                               \
    577     Register result = i.OutputRegister();            \
    578     AddressingMode mode = kMode_None;                \
    579     MemOperand operand = i.MemoryOperand(&mode);     \
    580     if (mode == kMode_MRI) {                         \
    581       __ asm_instr(result, operand);                 \
    582     } else {                                         \
    583       __ asm_instrx(result, operand);                \
    584     }                                                \
    585     DCHECK_EQ(LeaveRC, i.OutputRCBit());             \
    586   } while (0)
    587 
    588 
    589 #define ASSEMBLE_STORE_FLOAT32()                         \
    590   do {                                                   \
    591     size_t index = 0;                                    \
    592     AddressingMode mode = kMode_None;                    \
    593     MemOperand operand = i.MemoryOperand(&mode, &index); \
    594     DoubleRegister value = i.InputDoubleRegister(index); \
    595     __ frsp(kScratchDoubleReg, value);                   \
    596     if (mode == kMode_MRI) {                             \
    597       __ stfs(kScratchDoubleReg, operand);               \
    598     } else {                                             \
    599       __ stfsx(kScratchDoubleReg, operand);              \
    600     }                                                    \
    601     DCHECK_EQ(LeaveRC, i.OutputRCBit());                 \
    602   } while (0)
    603 
    604 
    605 #define ASSEMBLE_STORE_DOUBLE()                          \
    606   do {                                                   \
    607     size_t index = 0;                                    \
    608     AddressingMode mode = kMode_None;                    \
    609     MemOperand operand = i.MemoryOperand(&mode, &index); \
    610     DoubleRegister value = i.InputDoubleRegister(index); \
    611     if (mode == kMode_MRI) {                             \
    612       __ stfd(value, operand);                           \
    613     } else {                                             \
    614       __ stfdx(value, operand);                          \
    615     }                                                    \
    616     DCHECK_EQ(LeaveRC, i.OutputRCBit());                 \
    617   } while (0)
    618 
    619 
    620 #define ASSEMBLE_STORE_INTEGER(asm_instr, asm_instrx)    \
    621   do {                                                   \
    622     size_t index = 0;                                    \
    623     AddressingMode mode = kMode_None;                    \
    624     MemOperand operand = i.MemoryOperand(&mode, &index); \
    625     Register value = i.InputRegister(index);             \
    626     if (mode == kMode_MRI) {                             \
    627       __ asm_instr(value, operand);                      \
    628     } else {                                             \
    629       __ asm_instrx(value, operand);                     \
    630     }                                                    \
    631     DCHECK_EQ(LeaveRC, i.OutputRCBit());                 \
    632   } while (0)
    633 
    634 #if V8_TARGET_ARCH_PPC64
    635 // TODO(mbrandy): fix paths that produce garbage in offset's upper 32-bits.
    636 #define CleanUInt32(x) __ ClearLeftImm(x, x, Operand(32))
    637 #else
    638 #define CleanUInt32(x)
    639 #endif
    640 
    641 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, asm_instrx, width)  \
    642   do {                                                             \
    643     DoubleRegister result = i.OutputDoubleRegister();              \
    644     size_t index = 0;                                              \
    645     AddressingMode mode = kMode_None;                              \
    646     MemOperand operand = i.MemoryOperand(&mode, index);            \
    647     DCHECK_EQ(kMode_MRR, mode);                                    \
    648     Register offset = operand.rb();                                \
    649     if (HasRegisterInput(instr, 2)) {                              \
    650       __ cmplw(offset, i.InputRegister(2));                        \
    651     } else {                                                       \
    652       __ cmplwi(offset, i.InputImmediate(2));                      \
    653     }                                                              \
    654     auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
    655     __ bge(ool->entry());                                          \
    656     if (mode == kMode_MRI) {                                       \
    657       __ asm_instr(result, operand);                               \
    658     } else {                                                       \
    659       CleanUInt32(offset);                                         \
    660       __ asm_instrx(result, operand);                              \
    661     }                                                              \
    662     __ bind(ool->exit());                                          \
    663     DCHECK_EQ(LeaveRC, i.OutputRCBit());                           \
    664   } while (0)
    665 
    666 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr, asm_instrx) \
    667   do {                                                       \
    668     Register result = i.OutputRegister();                    \
    669     size_t index = 0;                                        \
    670     AddressingMode mode = kMode_None;                        \
    671     MemOperand operand = i.MemoryOperand(&mode, index);      \
    672     DCHECK_EQ(kMode_MRR, mode);                              \
    673     Register offset = operand.rb();                          \
    674     if (HasRegisterInput(instr, 2)) {                        \
    675       __ cmplw(offset, i.InputRegister(2));                  \
    676     } else {                                                 \
    677       __ cmplwi(offset, i.InputImmediate(2));                \
    678     }                                                        \
    679     auto ool = new (zone()) OutOfLineLoadZero(this, result); \
    680     __ bge(ool->entry());                                    \
    681     if (mode == kMode_MRI) {                                 \
    682       __ asm_instr(result, operand);                         \
    683     } else {                                                 \
    684       CleanUInt32(offset);                                   \
    685       __ asm_instrx(result, operand);                        \
    686     }                                                        \
    687     __ bind(ool->exit());                                    \
    688     DCHECK_EQ(LeaveRC, i.OutputRCBit());                     \
    689   } while (0)
    690 
    691 #define ASSEMBLE_CHECKED_STORE_FLOAT32()                \
    692   do {                                                  \
    693     Label done;                                         \
    694     size_t index = 0;                                   \
    695     AddressingMode mode = kMode_None;                   \
    696     MemOperand operand = i.MemoryOperand(&mode, index); \
    697     DCHECK_EQ(kMode_MRR, mode);                         \
    698     Register offset = operand.rb();                     \
    699     if (HasRegisterInput(instr, 2)) {                   \
    700       __ cmplw(offset, i.InputRegister(2));             \
    701     } else {                                            \
    702       __ cmplwi(offset, i.InputImmediate(2));           \
    703     }                                                   \
    704     __ bge(&done);                                      \
    705     DoubleRegister value = i.InputDoubleRegister(3);    \
    706     __ frsp(kScratchDoubleReg, value);                  \
    707     if (mode == kMode_MRI) {                            \
    708       __ stfs(kScratchDoubleReg, operand);              \
    709     } else {                                            \
    710       CleanUInt32(offset);                              \
    711       __ stfsx(kScratchDoubleReg, operand);             \
    712     }                                                   \
    713     __ bind(&done);                                     \
    714     DCHECK_EQ(LeaveRC, i.OutputRCBit());                \
    715   } while (0)
    716 
    717 #define ASSEMBLE_CHECKED_STORE_DOUBLE()                 \
    718   do {                                                  \
    719     Label done;                                         \
    720     size_t index = 0;                                   \
    721     AddressingMode mode = kMode_None;                   \
    722     MemOperand operand = i.MemoryOperand(&mode, index); \
    723     DCHECK_EQ(kMode_MRR, mode);                         \
    724     Register offset = operand.rb();                     \
    725     if (HasRegisterInput(instr, 2)) {                   \
    726       __ cmplw(offset, i.InputRegister(2));             \
    727     } else {                                            \
    728       __ cmplwi(offset, i.InputImmediate(2));           \
    729     }                                                   \
    730     __ bge(&done);                                      \
    731     DoubleRegister value = i.InputDoubleRegister(3);    \
    732     if (mode == kMode_MRI) {                            \
    733       __ stfd(value, operand);                          \
    734     } else {                                            \
    735       CleanUInt32(offset);                              \
    736       __ stfdx(value, operand);                         \
    737     }                                                   \
    738     __ bind(&done);                                     \
    739     DCHECK_EQ(LeaveRC, i.OutputRCBit());                \
    740   } while (0)
    741 
    742 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr, asm_instrx) \
    743   do {                                                        \
    744     Label done;                                               \
    745     size_t index = 0;                                         \
    746     AddressingMode mode = kMode_None;                         \
    747     MemOperand operand = i.MemoryOperand(&mode, index);       \
    748     DCHECK_EQ(kMode_MRR, mode);                               \
    749     Register offset = operand.rb();                           \
    750     if (HasRegisterInput(instr, 2)) {                         \
    751       __ cmplw(offset, i.InputRegister(2));                   \
    752     } else {                                                  \
    753       __ cmplwi(offset, i.InputImmediate(2));                 \
    754     }                                                         \
    755     __ bge(&done);                                            \
    756     Register value = i.InputRegister(3);                      \
    757     if (mode == kMode_MRI) {                                  \
    758       __ asm_instr(value, operand);                           \
    759     } else {                                                  \
    760       CleanUInt32(offset);                                    \
    761       __ asm_instrx(value, operand);                          \
    762     }                                                         \
    763     __ bind(&done);                                           \
    764     DCHECK_EQ(LeaveRC, i.OutputRCBit());                      \
    765   } while (0)
    766 
    767 #define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr, asm_instrx) \
    768   do {                                                      \
    769     Label done;                                             \
    770     Register result = i.OutputRegister();                   \
    771     AddressingMode mode = kMode_None;                       \
    772     MemOperand operand = i.MemoryOperand(&mode);            \
    773     if (mode == kMode_MRI) {                                \
    774       __ asm_instr(result, operand);                        \
    775     } else {                                                \
    776       __ asm_instrx(result, operand);                       \
    777     }                                                       \
    778     __ lwsync();                                            \
    779   } while (0)
    780 #define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, asm_instrx) \
    781   do {                                                       \
    782     size_t index = 0;                                        \
    783     AddressingMode mode = kMode_None;                        \
    784     MemOperand operand = i.MemoryOperand(&mode, &index);     \
    785     Register value = i.InputRegister(index);                 \
    786     __ lwsync();                                             \
    787     if (mode == kMode_MRI) {                                 \
    788       __ asm_instr(value, operand);                          \
    789     } else {                                                 \
    790       __ asm_instrx(value, operand);                         \
    791     }                                                        \
    792     __ sync();                                               \
    793     DCHECK_EQ(LeaveRC, i.OutputRCBit());                     \
    794   } while (0)
    795 
    796 void CodeGenerator::AssembleDeconstructFrame() {
    797   __ LeaveFrame(StackFrame::MANUAL);
    798 }
    799 
    800 void CodeGenerator::AssemblePrepareTailCall() {
    801   if (frame_access_state()->has_frame()) {
    802     __ RestoreFrameStateForTailCall();
    803   }
    804   frame_access_state()->SetFrameAccessToSP();
    805 }
    806 
    807 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
    808                                                      Register scratch1,
    809                                                      Register scratch2,
    810                                                      Register scratch3) {
    811   DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
    812   Label done;
    813 
    814   // Check if current frame is an arguments adaptor frame.
    815   __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
    816   __ cmpi(scratch1,
    817           Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
    818   __ bne(&done);
    819 
    820   // Load arguments count from current arguments adaptor frame (note, it
    821   // does not include receiver).
    822   Register caller_args_count_reg = scratch1;
    823   __ LoadP(caller_args_count_reg,
    824            MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
    825   __ SmiUntag(caller_args_count_reg);
    826 
    827   ParameterCount callee_args_count(args_reg);
    828   __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
    829                         scratch3);
    830   __ bind(&done);
    831 }
    832 
    833 namespace {
    834 
    835 void FlushPendingPushRegisters(MacroAssembler* masm,
    836                                FrameAccessState* frame_access_state,
    837                                ZoneVector<Register>* pending_pushes) {
    838   switch (pending_pushes->size()) {
    839     case 0:
    840       break;
    841     case 1:
    842       masm->Push((*pending_pushes)[0]);
    843       break;
    844     case 2:
    845       masm->Push((*pending_pushes)[0], (*pending_pushes)[1]);
    846       break;
    847     case 3:
    848       masm->Push((*pending_pushes)[0], (*pending_pushes)[1],
    849                  (*pending_pushes)[2]);
    850       break;
    851     default:
    852       UNREACHABLE();
    853       break;
    854   }
    855   frame_access_state->IncreaseSPDelta(pending_pushes->size());
    856   pending_pushes->resize(0);
    857 }
    858 
    859 void AddPendingPushRegister(MacroAssembler* masm,
    860                             FrameAccessState* frame_access_state,
    861                             ZoneVector<Register>* pending_pushes,
    862                             Register reg) {
    863   pending_pushes->push_back(reg);
    864   if (pending_pushes->size() == 3 || reg.is(ip)) {
    865     FlushPendingPushRegisters(masm, frame_access_state, pending_pushes);
    866   }
    867 }
    868 
    869 void AdjustStackPointerForTailCall(
    870     MacroAssembler* masm, FrameAccessState* state, int new_slot_above_sp,
    871     ZoneVector<Register>* pending_pushes = nullptr,
    872     bool allow_shrinkage = true) {
    873   int current_sp_offset = state->GetSPToFPSlotCount() +
    874                           StandardFrameConstants::kFixedSlotCountAboveFp;
    875   int stack_slot_delta = new_slot_above_sp - current_sp_offset;
    876   if (stack_slot_delta > 0) {
    877     if (pending_pushes != nullptr) {
    878       FlushPendingPushRegisters(masm, state, pending_pushes);
    879     }
    880     masm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
    881     state->IncreaseSPDelta(stack_slot_delta);
    882   } else if (allow_shrinkage && stack_slot_delta < 0) {
    883     if (pending_pushes != nullptr) {
    884       FlushPendingPushRegisters(masm, state, pending_pushes);
    885     }
    886     masm->Add(sp, sp, -stack_slot_delta * kPointerSize, r0);
    887     state->IncreaseSPDelta(stack_slot_delta);
    888   }
    889 }
    890 
    891 }  // namespace
    892 
    893 void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
    894                                               int first_unused_stack_slot) {
    895   CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
    896   ZoneVector<MoveOperands*> pushes(zone());
    897   GetPushCompatibleMoves(instr, flags, &pushes);
    898 
    899   if (!pushes.empty() &&
    900       (LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
    901        first_unused_stack_slot)) {
    902     PPCOperandConverter g(this, instr);
    903     ZoneVector<Register> pending_pushes(zone());
    904     for (auto move : pushes) {
    905       LocationOperand destination_location(
    906           LocationOperand::cast(move->destination()));
    907       InstructionOperand source(move->source());
    908       AdjustStackPointerForTailCall(
    909           masm(), frame_access_state(),
    910           destination_location.index() - pending_pushes.size(),
    911           &pending_pushes);
    912       if (source.IsStackSlot()) {
    913         LocationOperand source_location(LocationOperand::cast(source));
    914         __ LoadP(ip, g.SlotToMemOperand(source_location.index()));
    915         AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
    916                                ip);
    917       } else if (source.IsRegister()) {
    918         LocationOperand source_location(LocationOperand::cast(source));
    919         AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
    920                                source_location.GetRegister());
    921       } else if (source.IsImmediate()) {
    922         AddPendingPushRegister(masm(), frame_access_state(), &pending_pushes,
    923                                ip);
    924       } else {
    925         // Pushes of non-scalar data types is not supported.
    926         UNIMPLEMENTED();
    927       }
    928       move->Eliminate();
    929     }
    930     FlushPendingPushRegisters(masm(), frame_access_state(), &pending_pushes);
    931   }
    932   AdjustStackPointerForTailCall(masm(), frame_access_state(),
    933                                 first_unused_stack_slot, nullptr, false);
    934 }
    935 
    936 void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
    937                                              int first_unused_stack_slot) {
    938   AdjustStackPointerForTailCall(masm(), frame_access_state(),
    939                                 first_unused_stack_slot);
    940 }
    941 
    942 
    943 // Assembles an instruction after register allocation, producing machine code.
    944 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
    945     Instruction* instr) {
    946   PPCOperandConverter i(this, instr);
    947   ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
    948 
    949   switch (opcode) {
    950     case kArchCallCodeObject: {
    951       v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
    952           masm());
    953       EnsureSpaceForLazyDeopt();
    954       if (HasRegisterInput(instr, 0)) {
    955         __ addi(ip, i.InputRegister(0),
    956                 Operand(Code::kHeaderSize - kHeapObjectTag));
    957         __ Call(ip);
    958       } else {
    959         __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
    960                 RelocInfo::CODE_TARGET);
    961       }
    962       RecordCallPosition(instr);
    963       DCHECK_EQ(LeaveRC, i.OutputRCBit());
    964       frame_access_state()->ClearSPDelta();
    965       break;
    966     }
    967     case kArchTailCallCodeObjectFromJSFunction:
    968     case kArchTailCallCodeObject: {
    969       if (opcode == kArchTailCallCodeObjectFromJSFunction) {
    970         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
    971                                          i.TempRegister(0), i.TempRegister(1),
    972                                          i.TempRegister(2));
    973       }
    974       if (HasRegisterInput(instr, 0)) {
    975         __ addi(ip, i.InputRegister(0),
    976                 Operand(Code::kHeaderSize - kHeapObjectTag));
    977         __ Jump(ip);
    978       } else {
    979         // We cannot use the constant pool to load the target since
    980         // we've already restored the caller's frame.
    981         ConstantPoolUnavailableScope constant_pool_unavailable(masm());
    982         __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
    983                 RelocInfo::CODE_TARGET);
    984       }
    985       DCHECK_EQ(LeaveRC, i.OutputRCBit());
    986       frame_access_state()->ClearSPDelta();
    987       frame_access_state()->SetFrameAccessToDefault();
    988       break;
    989     }
    990     case kArchTailCallAddress: {
    991       CHECK(!instr->InputAt(0)->IsImmediate());
    992       __ Jump(i.InputRegister(0));
    993       frame_access_state()->ClearSPDelta();
    994       frame_access_state()->SetFrameAccessToDefault();
    995       break;
    996     }
    997     case kArchCallJSFunction: {
    998       v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
    999           masm());
   1000       EnsureSpaceForLazyDeopt();
   1001       Register func = i.InputRegister(0);
   1002       if (FLAG_debug_code) {
   1003         // Check the function's context matches the context argument.
   1004         __ LoadP(kScratchReg,
   1005                  FieldMemOperand(func, JSFunction::kContextOffset));
   1006         __ cmp(cp, kScratchReg);
   1007         __ Assert(eq, kWrongFunctionContext);
   1008       }
   1009       __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
   1010       __ Call(ip);
   1011       RecordCallPosition(instr);
   1012       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1013       frame_access_state()->ClearSPDelta();
   1014       break;
   1015     }
   1016     case kArchTailCallJSFunctionFromJSFunction: {
   1017       Register func = i.InputRegister(0);
   1018       if (FLAG_debug_code) {
   1019         // Check the function's context matches the context argument.
   1020         __ LoadP(kScratchReg,
   1021                  FieldMemOperand(func, JSFunction::kContextOffset));
   1022         __ cmp(cp, kScratchReg);
   1023         __ Assert(eq, kWrongFunctionContext);
   1024       }
   1025       AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
   1026                                        i.TempRegister(0), i.TempRegister(1),
   1027                                        i.TempRegister(2));
   1028       __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
   1029       __ Jump(ip);
   1030       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1031       frame_access_state()->ClearSPDelta();
   1032       frame_access_state()->SetFrameAccessToDefault();
   1033       break;
   1034     }
   1035     case kArchPrepareCallCFunction: {
   1036       int const num_parameters = MiscField::decode(instr->opcode());
   1037       __ PrepareCallCFunction(num_parameters, kScratchReg);
   1038       // Frame alignment requires using FP-relative frame addressing.
   1039       frame_access_state()->SetFrameAccessToFP();
   1040       break;
   1041     }
   1042     case kArchPrepareTailCall:
   1043       AssemblePrepareTailCall();
   1044       break;
   1045     case kArchComment: {
   1046       Address comment_string = i.InputExternalReference(0).address();
   1047       __ RecordComment(reinterpret_cast<const char*>(comment_string));
   1048       break;
   1049     }
   1050     case kArchCallCFunction: {
   1051       int const num_parameters = MiscField::decode(instr->opcode());
   1052       if (instr->InputAt(0)->IsImmediate()) {
   1053         ExternalReference ref = i.InputExternalReference(0);
   1054         __ CallCFunction(ref, num_parameters);
   1055       } else {
   1056         Register func = i.InputRegister(0);
   1057         __ CallCFunction(func, num_parameters);
   1058       }
   1059       frame_access_state()->SetFrameAccessToDefault();
   1060       frame_access_state()->ClearSPDelta();
   1061       break;
   1062     }
   1063     case kArchJmp:
   1064       AssembleArchJump(i.InputRpo(0));
   1065       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1066       break;
   1067     case kArchLookupSwitch:
   1068       AssembleArchLookupSwitch(instr);
   1069       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1070       break;
   1071     case kArchTableSwitch:
   1072       AssembleArchTableSwitch(instr);
   1073       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1074       break;
   1075     case kArchDebugBreak:
   1076       __ stop("kArchDebugBreak");
   1077       break;
   1078     case kArchNop:
   1079     case kArchThrowTerminator:
   1080       // don't emit code for nops.
   1081       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1082       break;
   1083     case kArchDeoptimize: {
   1084       int deopt_state_id =
   1085           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
   1086       CodeGenResult result =
   1087           AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
   1088       if (result != kSuccess) return result;
   1089       break;
   1090     }
   1091     case kArchRet:
   1092       AssembleReturn(instr->InputAt(0));
   1093       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1094       break;
   1095     case kArchStackPointer:
   1096       __ mr(i.OutputRegister(), sp);
   1097       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1098       break;
   1099     case kArchFramePointer:
   1100       __ mr(i.OutputRegister(), fp);
   1101       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1102       break;
   1103     case kArchParentFramePointer:
   1104       if (frame_access_state()->has_frame()) {
   1105         __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
   1106       } else {
   1107         __ mr(i.OutputRegister(), fp);
   1108       }
   1109       break;
   1110     case kArchTruncateDoubleToI:
   1111       // TODO(mbrandy): move slow call to stub out of line.
   1112       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
   1113       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1114       break;
   1115     case kArchStoreWithWriteBarrier: {
   1116       RecordWriteMode mode =
   1117           static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
   1118       Register object = i.InputRegister(0);
   1119       Register value = i.InputRegister(2);
   1120       Register scratch0 = i.TempRegister(0);
   1121       Register scratch1 = i.TempRegister(1);
   1122       OutOfLineRecordWrite* ool;
   1123 
   1124       AddressingMode addressing_mode =
   1125           AddressingModeField::decode(instr->opcode());
   1126       if (addressing_mode == kMode_MRI) {
   1127         int32_t offset = i.InputInt32(1);
   1128         ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
   1129                                                 scratch0, scratch1, mode);
   1130         __ StoreP(value, MemOperand(object, offset));
   1131       } else {
   1132         DCHECK_EQ(kMode_MRR, addressing_mode);
   1133         Register offset(i.InputRegister(1));
   1134         ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
   1135                                                 scratch0, scratch1, mode);
   1136         __ StorePX(value, MemOperand(object, offset));
   1137       }
   1138       __ CheckPageFlag(object, scratch0,
   1139                        MemoryChunk::kPointersFromHereAreInterestingMask, ne,
   1140                        ool->entry());
   1141       __ bind(ool->exit());
   1142       break;
   1143     }
   1144     case kArchStackSlot: {
   1145       FrameOffset offset =
   1146           frame_access_state()->GetFrameOffset(i.InputInt32(0));
   1147       __ addi(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
   1148               Operand(offset.offset()));
   1149       break;
   1150     }
   1151     case kPPC_And:
   1152       if (HasRegisterInput(instr, 1)) {
   1153         __ and_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
   1154                 i.OutputRCBit());
   1155       } else {
   1156         __ andi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
   1157       }
   1158       break;
   1159     case kPPC_AndComplement:
   1160       __ andc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
   1161               i.OutputRCBit());
   1162       break;
   1163     case kPPC_Or:
   1164       if (HasRegisterInput(instr, 1)) {
   1165         __ orx(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
   1166                i.OutputRCBit());
   1167       } else {
   1168         __ ori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
   1169         DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1170       }
   1171       break;
   1172     case kPPC_OrComplement:
   1173       __ orc(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
   1174              i.OutputRCBit());
   1175       break;
   1176     case kPPC_Xor:
   1177       if (HasRegisterInput(instr, 1)) {
   1178         __ xor_(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
   1179                 i.OutputRCBit());
   1180       } else {
   1181         __ xori(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
   1182         DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1183       }
   1184       break;
   1185     case kPPC_ShiftLeft32:
   1186       ASSEMBLE_BINOP_RC(slw, slwi);
   1187       break;
   1188 #if V8_TARGET_ARCH_PPC64
   1189     case kPPC_ShiftLeft64:
   1190       ASSEMBLE_BINOP_RC(sld, sldi);
   1191       break;
   1192 #endif
   1193     case kPPC_ShiftRight32:
   1194       ASSEMBLE_BINOP_RC(srw, srwi);
   1195       break;
   1196 #if V8_TARGET_ARCH_PPC64
   1197     case kPPC_ShiftRight64:
   1198       ASSEMBLE_BINOP_RC(srd, srdi);
   1199       break;
   1200 #endif
   1201     case kPPC_ShiftRightAlg32:
   1202       ASSEMBLE_BINOP_INT_RC(sraw, srawi);
   1203       break;
   1204 #if V8_TARGET_ARCH_PPC64
   1205     case kPPC_ShiftRightAlg64:
   1206       ASSEMBLE_BINOP_INT_RC(srad, sradi);
   1207       break;
   1208 #endif
   1209 #if !V8_TARGET_ARCH_PPC64
   1210     case kPPC_AddPair:
   1211       // i.InputRegister(0) ... left low word.
   1212       // i.InputRegister(1) ... left high word.
   1213       // i.InputRegister(2) ... right low word.
   1214       // i.InputRegister(3) ... right high word.
   1215       __ addc(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
   1216       __ adde(i.OutputRegister(1), i.InputRegister(1), i.InputRegister(3));
   1217       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1218       break;
   1219     case kPPC_SubPair:
   1220       // i.InputRegister(0) ... left low word.
   1221       // i.InputRegister(1) ... left high word.
   1222       // i.InputRegister(2) ... right low word.
   1223       // i.InputRegister(3) ... right high word.
   1224       __ subc(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
   1225       __ sube(i.OutputRegister(1), i.InputRegister(1), i.InputRegister(3));
   1226       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1227       break;
   1228     case kPPC_MulPair:
   1229       // i.InputRegister(0) ... left low word.
   1230       // i.InputRegister(1) ... left high word.
   1231       // i.InputRegister(2) ... right low word.
   1232       // i.InputRegister(3) ... right high word.
   1233       __ mullw(i.TempRegister(0), i.InputRegister(0), i.InputRegister(3));
   1234       __ mullw(i.TempRegister(1), i.InputRegister(2), i.InputRegister(1));
   1235       __ add(i.TempRegister(0), i.TempRegister(0), i.TempRegister(1));
   1236       __ mullw(i.OutputRegister(0), i.InputRegister(0), i.InputRegister(2));
   1237       __ mulhwu(i.OutputRegister(1), i.InputRegister(0), i.InputRegister(2));
   1238       __ add(i.OutputRegister(1), i.OutputRegister(1), i.TempRegister(0));
   1239       break;
   1240     case kPPC_ShiftLeftPair: {
   1241       Register second_output =
   1242           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
   1243       if (instr->InputAt(2)->IsImmediate()) {
   1244         __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
   1245                          i.InputRegister(1), i.InputInt32(2));
   1246       } else {
   1247         __ ShiftLeftPair(i.OutputRegister(0), second_output, i.InputRegister(0),
   1248                          i.InputRegister(1), kScratchReg, i.InputRegister(2));
   1249       }
   1250       break;
   1251     }
   1252     case kPPC_ShiftRightPair: {
   1253       Register second_output =
   1254           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
   1255       if (instr->InputAt(2)->IsImmediate()) {
   1256         __ ShiftRightPair(i.OutputRegister(0), second_output,
   1257                           i.InputRegister(0), i.InputRegister(1),
   1258                           i.InputInt32(2));
   1259       } else {
   1260         __ ShiftRightPair(i.OutputRegister(0), second_output,
   1261                           i.InputRegister(0), i.InputRegister(1), kScratchReg,
   1262                           i.InputRegister(2));
   1263       }
   1264       break;
   1265     }
   1266     case kPPC_ShiftRightAlgPair: {
   1267       Register second_output =
   1268           instr->OutputCount() >= 2 ? i.OutputRegister(1) : i.TempRegister(0);
   1269       if (instr->InputAt(2)->IsImmediate()) {
   1270         __ ShiftRightAlgPair(i.OutputRegister(0), second_output,
   1271                              i.InputRegister(0), i.InputRegister(1),
   1272                              i.InputInt32(2));
   1273       } else {
   1274         __ ShiftRightAlgPair(i.OutputRegister(0), second_output,
   1275                              i.InputRegister(0), i.InputRegister(1),
   1276                              kScratchReg, i.InputRegister(2));
   1277       }
   1278       break;
   1279     }
   1280 #endif
   1281     case kPPC_RotRight32:
   1282       if (HasRegisterInput(instr, 1)) {
   1283         __ subfic(kScratchReg, i.InputRegister(1), Operand(32));
   1284         __ rotlw(i.OutputRegister(), i.InputRegister(0), kScratchReg,
   1285                  i.OutputRCBit());
   1286       } else {
   1287         int sh = i.InputInt32(1);
   1288         __ rotrwi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
   1289       }
   1290       break;
   1291 #if V8_TARGET_ARCH_PPC64
   1292     case kPPC_RotRight64:
   1293       if (HasRegisterInput(instr, 1)) {
   1294         __ subfic(kScratchReg, i.InputRegister(1), Operand(64));
   1295         __ rotld(i.OutputRegister(), i.InputRegister(0), kScratchReg,
   1296                  i.OutputRCBit());
   1297       } else {
   1298         int sh = i.InputInt32(1);
   1299         __ rotrdi(i.OutputRegister(), i.InputRegister(0), sh, i.OutputRCBit());
   1300       }
   1301       break;
   1302 #endif
   1303     case kPPC_Not:
   1304       __ notx(i.OutputRegister(), i.InputRegister(0), i.OutputRCBit());
   1305       break;
   1306     case kPPC_RotLeftAndMask32:
   1307       __ rlwinm(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
   1308                 31 - i.InputInt32(2), 31 - i.InputInt32(3), i.OutputRCBit());
   1309       break;
   1310 #if V8_TARGET_ARCH_PPC64
   1311     case kPPC_RotLeftAndClear64:
   1312       __ rldic(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
   1313                63 - i.InputInt32(2), i.OutputRCBit());
   1314       break;
   1315     case kPPC_RotLeftAndClearLeft64:
   1316       __ rldicl(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
   1317                 63 - i.InputInt32(2), i.OutputRCBit());
   1318       break;
   1319     case kPPC_RotLeftAndClearRight64:
   1320       __ rldicr(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1),
   1321                 63 - i.InputInt32(2), i.OutputRCBit());
   1322       break;
   1323 #endif
   1324     case kPPC_Add32:
   1325 #if V8_TARGET_ARCH_PPC64
   1326       if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
   1327         ASSEMBLE_ADD_WITH_OVERFLOW();
   1328       } else {
   1329 #endif
   1330         if (HasRegisterInput(instr, 1)) {
   1331           __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
   1332                  LeaveOE, i.OutputRCBit());
   1333         } else {
   1334           __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
   1335           DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1336         }
   1337         __ extsw(i.OutputRegister(), i.OutputRegister());
   1338 #if V8_TARGET_ARCH_PPC64
   1339       }
   1340 #endif
   1341       break;
   1342 #if V8_TARGET_ARCH_PPC64
   1343     case kPPC_Add64:
   1344       if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
   1345         ASSEMBLE_ADD_WITH_OVERFLOW();
   1346       } else {
   1347         if (HasRegisterInput(instr, 1)) {
   1348           __ add(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
   1349                  LeaveOE, i.OutputRCBit());
   1350         } else {
   1351           __ addi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
   1352           DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1353         }
   1354       }
   1355       break;
   1356 #endif
   1357     case kPPC_AddWithOverflow32:
   1358       ASSEMBLE_ADD_WITH_OVERFLOW32();
   1359       break;
   1360     case kPPC_AddDouble:
   1361       ASSEMBLE_FLOAT_BINOP_RC(fadd, MiscField::decode(instr->opcode()));
   1362       break;
   1363     case kPPC_Sub:
   1364 #if V8_TARGET_ARCH_PPC64
   1365       if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
   1366         ASSEMBLE_SUB_WITH_OVERFLOW();
   1367       } else {
   1368 #endif
   1369         if (HasRegisterInput(instr, 1)) {
   1370           __ sub(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
   1371                  LeaveOE, i.OutputRCBit());
   1372         } else {
   1373           __ subi(i.OutputRegister(), i.InputRegister(0), i.InputImmediate(1));
   1374           DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1375         }
   1376 #if V8_TARGET_ARCH_PPC64
   1377       }
   1378 #endif
   1379       break;
   1380     case kPPC_SubWithOverflow32:
   1381       ASSEMBLE_SUB_WITH_OVERFLOW32();
   1382       break;
   1383     case kPPC_SubDouble:
   1384       ASSEMBLE_FLOAT_BINOP_RC(fsub, MiscField::decode(instr->opcode()));
   1385       break;
   1386     case kPPC_Mul32:
   1387       __ mullw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
   1388                LeaveOE, i.OutputRCBit());
   1389       break;
   1390 #if V8_TARGET_ARCH_PPC64
   1391     case kPPC_Mul64:
   1392       __ mulld(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
   1393                LeaveOE, i.OutputRCBit());
   1394       break;
   1395 #endif
   1396 
   1397     case kPPC_Mul32WithHigh32:
   1398       if (i.OutputRegister(0).is(i.InputRegister(0)) ||
   1399           i.OutputRegister(0).is(i.InputRegister(1)) ||
   1400           i.OutputRegister(1).is(i.InputRegister(0)) ||
   1401           i.OutputRegister(1).is(i.InputRegister(1))) {
   1402         __ mullw(kScratchReg,
   1403                  i.InputRegister(0), i.InputRegister(1));  // low
   1404         __ mulhw(i.OutputRegister(1),
   1405                  i.InputRegister(0), i.InputRegister(1));  // high
   1406         __ mr(i.OutputRegister(0), kScratchReg);
   1407       } else {
   1408         __ mullw(i.OutputRegister(0),
   1409                  i.InputRegister(0), i.InputRegister(1));  // low
   1410         __ mulhw(i.OutputRegister(1),
   1411                  i.InputRegister(0), i.InputRegister(1));  // high
   1412       }
   1413       break;
   1414     case kPPC_MulHigh32:
   1415       __ mulhw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
   1416                i.OutputRCBit());
   1417       break;
   1418     case kPPC_MulHighU32:
   1419       __ mulhwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
   1420                 i.OutputRCBit());
   1421       break;
   1422     case kPPC_MulDouble:
   1423       ASSEMBLE_FLOAT_BINOP_RC(fmul, MiscField::decode(instr->opcode()));
   1424       break;
   1425     case kPPC_Div32:
   1426       __ divw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
   1427       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1428       break;
   1429 #if V8_TARGET_ARCH_PPC64
   1430     case kPPC_Div64:
   1431       __ divd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
   1432       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1433       break;
   1434 #endif
   1435     case kPPC_DivU32:
   1436       __ divwu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
   1437       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1438       break;
   1439 #if V8_TARGET_ARCH_PPC64
   1440     case kPPC_DivU64:
   1441       __ divdu(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
   1442       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1443       break;
   1444 #endif
   1445     case kPPC_DivDouble:
   1446       ASSEMBLE_FLOAT_BINOP_RC(fdiv, MiscField::decode(instr->opcode()));
   1447       break;
   1448     case kPPC_Mod32:
   1449       if (CpuFeatures::IsSupported(MODULO)) {
   1450         __ modsw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
   1451       } else {
   1452         ASSEMBLE_MODULO(divw, mullw);
   1453       }
   1454       break;
   1455 #if V8_TARGET_ARCH_PPC64
   1456     case kPPC_Mod64:
   1457       if (CpuFeatures::IsSupported(MODULO)) {
   1458         __ modsd(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
   1459       } else {
   1460         ASSEMBLE_MODULO(divd, mulld);
   1461       }
   1462       break;
   1463 #endif
   1464     case kPPC_ModU32:
   1465       if (CpuFeatures::IsSupported(MODULO)) {
   1466         __ moduw(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
   1467       } else {
   1468         ASSEMBLE_MODULO(divwu, mullw);
   1469       }
   1470       break;
   1471 #if V8_TARGET_ARCH_PPC64
   1472     case kPPC_ModU64:
   1473       if (CpuFeatures::IsSupported(MODULO)) {
   1474         __ modud(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
   1475       } else {
   1476         ASSEMBLE_MODULO(divdu, mulld);
   1477       }
   1478       break;
   1479 #endif
   1480     case kPPC_ModDouble:
   1481       // TODO(bmeurer): We should really get rid of this special instruction,
   1482       // and generate a CallAddress instruction instead.
   1483       ASSEMBLE_FLOAT_MODULO();
   1484       break;
   1485     case kIeee754Float64Acos:
   1486       ASSEMBLE_IEEE754_UNOP(acos);
   1487       break;
   1488     case kIeee754Float64Acosh:
   1489       ASSEMBLE_IEEE754_UNOP(acosh);
   1490       break;
   1491     case kIeee754Float64Asin:
   1492       ASSEMBLE_IEEE754_UNOP(asin);
   1493       break;
   1494     case kIeee754Float64Asinh:
   1495       ASSEMBLE_IEEE754_UNOP(asinh);
   1496       break;
   1497     case kIeee754Float64Atan:
   1498       ASSEMBLE_IEEE754_UNOP(atan);
   1499       break;
   1500     case kIeee754Float64Atan2:
   1501       ASSEMBLE_IEEE754_BINOP(atan2);
   1502       break;
   1503     case kIeee754Float64Atanh:
   1504       ASSEMBLE_IEEE754_UNOP(atanh);
   1505       break;
   1506     case kIeee754Float64Tan:
   1507       ASSEMBLE_IEEE754_UNOP(tan);
   1508       break;
   1509     case kIeee754Float64Tanh:
   1510       ASSEMBLE_IEEE754_UNOP(tanh);
   1511       break;
   1512     case kIeee754Float64Cbrt:
   1513       ASSEMBLE_IEEE754_UNOP(cbrt);
   1514       break;
   1515     case kIeee754Float64Sin:
   1516       ASSEMBLE_IEEE754_UNOP(sin);
   1517       break;
   1518     case kIeee754Float64Sinh:
   1519       ASSEMBLE_IEEE754_UNOP(sinh);
   1520       break;
   1521     case kIeee754Float64Cos:
   1522       ASSEMBLE_IEEE754_UNOP(cos);
   1523       break;
   1524     case kIeee754Float64Cosh:
   1525       ASSEMBLE_IEEE754_UNOP(cosh);
   1526       break;
   1527     case kIeee754Float64Exp:
   1528       ASSEMBLE_IEEE754_UNOP(exp);
   1529       break;
   1530     case kIeee754Float64Expm1:
   1531       ASSEMBLE_IEEE754_UNOP(expm1);
   1532       break;
   1533     case kIeee754Float64Log:
   1534       ASSEMBLE_IEEE754_UNOP(log);
   1535       break;
   1536     case kIeee754Float64Log1p:
   1537       ASSEMBLE_IEEE754_UNOP(log1p);
   1538       break;
   1539     case kIeee754Float64Log2:
   1540       ASSEMBLE_IEEE754_UNOP(log2);
   1541       break;
   1542     case kIeee754Float64Log10:
   1543       ASSEMBLE_IEEE754_UNOP(log10);
   1544       break;
   1545     case kIeee754Float64Pow: {
   1546       MathPowStub stub(isolate(), MathPowStub::DOUBLE);
   1547       __ CallStub(&stub);
   1548       __ Move(d1, d3);
   1549       break;
   1550     }
   1551     case kPPC_Neg:
   1552       __ neg(i.OutputRegister(), i.InputRegister(0), LeaveOE, i.OutputRCBit());
   1553       break;
   1554     case kPPC_MaxDouble:
   1555       ASSEMBLE_FLOAT_MAX();
   1556       break;
   1557     case kPPC_MinDouble:
   1558       ASSEMBLE_FLOAT_MIN();
   1559       break;
   1560     case kPPC_AbsDouble:
   1561       ASSEMBLE_FLOAT_UNOP_RC(fabs, 0);
   1562       break;
   1563     case kPPC_SqrtDouble:
   1564       ASSEMBLE_FLOAT_UNOP_RC(fsqrt, MiscField::decode(instr->opcode()));
   1565       break;
   1566     case kPPC_FloorDouble:
   1567       ASSEMBLE_FLOAT_UNOP_RC(frim, MiscField::decode(instr->opcode()));
   1568       break;
   1569     case kPPC_CeilDouble:
   1570       ASSEMBLE_FLOAT_UNOP_RC(frip, MiscField::decode(instr->opcode()));
   1571       break;
   1572     case kPPC_TruncateDouble:
   1573       ASSEMBLE_FLOAT_UNOP_RC(friz, MiscField::decode(instr->opcode()));
   1574       break;
   1575     case kPPC_RoundDouble:
   1576       ASSEMBLE_FLOAT_UNOP_RC(frin, MiscField::decode(instr->opcode()));
   1577       break;
   1578     case kPPC_NegDouble:
   1579       ASSEMBLE_FLOAT_UNOP_RC(fneg, 0);
   1580       break;
   1581     case kPPC_Cntlz32:
   1582       __ cntlzw_(i.OutputRegister(), i.InputRegister(0));
   1583       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1584       break;
   1585 #if V8_TARGET_ARCH_PPC64
   1586     case kPPC_Cntlz64:
   1587       __ cntlzd_(i.OutputRegister(), i.InputRegister(0));
   1588       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1589       break;
   1590 #endif
   1591     case kPPC_Popcnt32:
   1592       __ popcntw(i.OutputRegister(), i.InputRegister(0));
   1593       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1594       break;
   1595 #if V8_TARGET_ARCH_PPC64
   1596     case kPPC_Popcnt64:
   1597       __ popcntd(i.OutputRegister(), i.InputRegister(0));
   1598       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1599       break;
   1600 #endif
   1601     case kPPC_Cmp32:
   1602       ASSEMBLE_COMPARE(cmpw, cmplw);
   1603       break;
   1604 #if V8_TARGET_ARCH_PPC64
   1605     case kPPC_Cmp64:
   1606       ASSEMBLE_COMPARE(cmp, cmpl);
   1607       break;
   1608 #endif
   1609     case kPPC_CmpDouble:
   1610       ASSEMBLE_FLOAT_COMPARE(fcmpu);
   1611       break;
   1612     case kPPC_Tst32:
   1613       if (HasRegisterInput(instr, 1)) {
   1614         __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
   1615       } else {
   1616         __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
   1617       }
   1618 #if V8_TARGET_ARCH_PPC64
   1619       __ extsw(r0, r0, i.OutputRCBit());
   1620 #endif
   1621       DCHECK_EQ(SetRC, i.OutputRCBit());
   1622       break;
   1623 #if V8_TARGET_ARCH_PPC64
   1624     case kPPC_Tst64:
   1625       if (HasRegisterInput(instr, 1)) {
   1626         __ and_(r0, i.InputRegister(0), i.InputRegister(1), i.OutputRCBit());
   1627       } else {
   1628         __ andi(r0, i.InputRegister(0), i.InputImmediate(1));
   1629       }
   1630       DCHECK_EQ(SetRC, i.OutputRCBit());
   1631       break;
   1632 #endif
   1633     case kPPC_Float64SilenceNaN: {
   1634       DoubleRegister value = i.InputDoubleRegister(0);
   1635       DoubleRegister result = i.OutputDoubleRegister();
   1636       __ CanonicalizeNaN(result, value);
   1637       break;
   1638     }
   1639     case kPPC_Push:
   1640       if (instr->InputAt(0)->IsFPRegister()) {
   1641         __ stfdu(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
   1642         frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
   1643       } else {
   1644         __ Push(i.InputRegister(0));
   1645         frame_access_state()->IncreaseSPDelta(1);
   1646       }
   1647       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1648       break;
   1649     case kPPC_PushFrame: {
   1650       int num_slots = i.InputInt32(1);
   1651       if (instr->InputAt(0)->IsFPRegister()) {
   1652         LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
   1653         if (op->representation() == MachineRepresentation::kFloat64) {
   1654           __ StoreDoubleU(i.InputDoubleRegister(0),
   1655                         MemOperand(sp, -num_slots * kPointerSize), r0);
   1656         } else {
   1657           DCHECK(op->representation() == MachineRepresentation::kFloat32);
   1658           __ StoreSingleU(i.InputDoubleRegister(0),
   1659                         MemOperand(sp, -num_slots * kPointerSize), r0);
   1660         }
   1661       } else {
   1662         __ StorePU(i.InputRegister(0),
   1663                    MemOperand(sp, -num_slots * kPointerSize), r0);
   1664       }
   1665       break;
   1666     }
   1667     case kPPC_StoreToStackSlot: {
   1668       int slot = i.InputInt32(1);
   1669       if (instr->InputAt(0)->IsFPRegister()) {
   1670         LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
   1671         if (op->representation() == MachineRepresentation::kFloat64) {
   1672           __ StoreDouble(i.InputDoubleRegister(0),
   1673                         MemOperand(sp, slot * kPointerSize), r0);
   1674         } else {
   1675           DCHECK(op->representation() == MachineRepresentation::kFloat32);
   1676           __ StoreSingle(i.InputDoubleRegister(0),
   1677                         MemOperand(sp, slot * kPointerSize), r0);
   1678         }
   1679       } else {
   1680         __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize), r0);
   1681       }
   1682       break;
   1683     }
   1684     case kPPC_ExtendSignWord8:
   1685       __ extsb(i.OutputRegister(), i.InputRegister(0));
   1686       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1687       break;
   1688     case kPPC_ExtendSignWord16:
   1689       __ extsh(i.OutputRegister(), i.InputRegister(0));
   1690       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1691       break;
   1692 #if V8_TARGET_ARCH_PPC64
   1693     case kPPC_ExtendSignWord32:
   1694       __ extsw(i.OutputRegister(), i.InputRegister(0));
   1695       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1696       break;
   1697     case kPPC_Uint32ToUint64:
   1698       // Zero extend
   1699       __ clrldi(i.OutputRegister(), i.InputRegister(0), Operand(32));
   1700       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1701       break;
   1702     case kPPC_Int64ToInt32:
   1703       __ extsw(i.OutputRegister(), i.InputRegister(0));
   1704       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1705       break;
   1706     case kPPC_Int64ToFloat32:
   1707       __ ConvertInt64ToFloat(i.InputRegister(0), i.OutputDoubleRegister());
   1708       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1709       break;
   1710     case kPPC_Int64ToDouble:
   1711       __ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
   1712       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1713       break;
   1714     case kPPC_Uint64ToFloat32:
   1715       __ ConvertUnsignedInt64ToFloat(i.InputRegister(0),
   1716                                      i.OutputDoubleRegister());
   1717       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1718       break;
   1719     case kPPC_Uint64ToDouble:
   1720       __ ConvertUnsignedInt64ToDouble(i.InputRegister(0),
   1721                                       i.OutputDoubleRegister());
   1722       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1723       break;
   1724 #endif
   1725     case kPPC_Int32ToFloat32:
   1726       __ ConvertIntToFloat(i.InputRegister(0), i.OutputDoubleRegister());
   1727       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1728       break;
   1729     case kPPC_Int32ToDouble:
   1730       __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
   1731       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1732       break;
   1733     case kPPC_Uint32ToFloat32:
   1734       __ ConvertUnsignedIntToFloat(i.InputRegister(0),
   1735                                    i.OutputDoubleRegister());
   1736       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1737       break;
   1738     case kPPC_Uint32ToDouble:
   1739       __ ConvertUnsignedIntToDouble(i.InputRegister(0),
   1740                                     i.OutputDoubleRegister());
   1741       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1742       break;
   1743     case kPPC_DoubleToInt32:
   1744     case kPPC_DoubleToUint32:
   1745     case kPPC_DoubleToInt64: {
   1746 #if V8_TARGET_ARCH_PPC64
   1747       bool check_conversion =
   1748           (opcode == kPPC_DoubleToInt64 && i.OutputCount() > 1);
   1749       if (check_conversion) {
   1750         __ mtfsb0(VXCVI);  // clear FPSCR:VXCVI bit
   1751       }
   1752 #endif
   1753       __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
   1754 #if !V8_TARGET_ARCH_PPC64
   1755                               kScratchReg,
   1756 #endif
   1757                               i.OutputRegister(0), kScratchDoubleReg);
   1758 #if V8_TARGET_ARCH_PPC64
   1759       if (check_conversion) {
   1760         // Set 2nd output to zero if conversion fails.
   1761         CRegister cr = cr7;
   1762         int crbit = v8::internal::Assembler::encode_crbit(
   1763             cr, static_cast<CRBit>(VXCVI % CRWIDTH));
   1764         __ mcrfs(cr, VXCVI);  // extract FPSCR field containing VXCVI into cr7
   1765         if (CpuFeatures::IsSupported(ISELECT)) {
   1766           __ li(i.OutputRegister(1), Operand(1));
   1767           __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
   1768         } else {
   1769           __ li(i.OutputRegister(1), Operand::Zero());
   1770           __ bc(v8::internal::Assembler::kInstrSize * 2, BT, crbit);
   1771           __ li(i.OutputRegister(1), Operand(1));
   1772         }
   1773       }
   1774 #endif
   1775       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1776       break;
   1777     }
   1778 #if V8_TARGET_ARCH_PPC64
   1779     case kPPC_DoubleToUint64: {
   1780       bool check_conversion = (i.OutputCount() > 1);
   1781       if (check_conversion) {
   1782         __ mtfsb0(VXCVI);  // clear FPSCR:VXCVI bit
   1783       }
   1784       __ ConvertDoubleToUnsignedInt64(i.InputDoubleRegister(0),
   1785                                       i.OutputRegister(0), kScratchDoubleReg);
   1786       if (check_conversion) {
   1787         // Set 2nd output to zero if conversion fails.
   1788         CRegister cr = cr7;
   1789         int crbit = v8::internal::Assembler::encode_crbit(
   1790             cr, static_cast<CRBit>(VXCVI % CRWIDTH));
   1791         __ mcrfs(cr, VXCVI);  // extract FPSCR field containing VXCVI into cr7
   1792         if (CpuFeatures::IsSupported(ISELECT)) {
   1793           __ li(i.OutputRegister(1), Operand(1));
   1794           __ isel(i.OutputRegister(1), r0, i.OutputRegister(1), crbit);
   1795         } else {
   1796           __ li(i.OutputRegister(1), Operand::Zero());
   1797           __ bc(v8::internal::Assembler::kInstrSize * 2, BT, crbit);
   1798           __ li(i.OutputRegister(1), Operand(1));
   1799         }
   1800       }
   1801       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1802       break;
   1803     }
   1804 #endif
   1805     case kPPC_DoubleToFloat32:
   1806       ASSEMBLE_FLOAT_UNOP_RC(frsp, 0);
   1807       break;
   1808     case kPPC_Float32ToDouble:
   1809       // Nothing to do.
   1810       __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1811       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1812       break;
   1813     case kPPC_DoubleExtractLowWord32:
   1814       __ MovDoubleLowToInt(i.OutputRegister(), i.InputDoubleRegister(0));
   1815       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1816       break;
   1817     case kPPC_DoubleExtractHighWord32:
   1818       __ MovDoubleHighToInt(i.OutputRegister(), i.InputDoubleRegister(0));
   1819       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1820       break;
   1821     case kPPC_DoubleInsertLowWord32:
   1822       __ InsertDoubleLow(i.OutputDoubleRegister(), i.InputRegister(1), r0);
   1823       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1824       break;
   1825     case kPPC_DoubleInsertHighWord32:
   1826       __ InsertDoubleHigh(i.OutputDoubleRegister(), i.InputRegister(1), r0);
   1827       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1828       break;
   1829     case kPPC_DoubleConstruct:
   1830 #if V8_TARGET_ARCH_PPC64
   1831       __ MovInt64ComponentsToDouble(i.OutputDoubleRegister(),
   1832                                     i.InputRegister(0), i.InputRegister(1), r0);
   1833 #else
   1834       __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0),
   1835                           i.InputRegister(1));
   1836 #endif
   1837       DCHECK_EQ(LeaveRC, i.OutputRCBit());
   1838       break;
   1839     case kPPC_BitcastFloat32ToInt32:
   1840       __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
   1841       break;
   1842     case kPPC_BitcastInt32ToFloat32:
   1843       __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
   1844       break;
   1845 #if V8_TARGET_ARCH_PPC64
   1846     case kPPC_BitcastDoubleToInt64:
   1847       __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
   1848       break;
   1849     case kPPC_BitcastInt64ToDouble:
   1850       __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
   1851       break;
   1852 #endif
   1853     case kPPC_LoadWordU8:
   1854       ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
   1855       break;
   1856     case kPPC_LoadWordS8:
   1857       ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
   1858       __ extsb(i.OutputRegister(), i.OutputRegister());
   1859       break;
   1860     case kPPC_LoadWordU16:
   1861       ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
   1862       break;
   1863     case kPPC_LoadWordS16:
   1864       ASSEMBLE_LOAD_INTEGER(lha, lhax);
   1865       break;
   1866     case kPPC_LoadWordU32:
   1867       ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
   1868       break;
   1869     case kPPC_LoadWordS32:
   1870       ASSEMBLE_LOAD_INTEGER(lwa, lwax);
   1871       break;
   1872 #if V8_TARGET_ARCH_PPC64
   1873     case kPPC_LoadWord64:
   1874       ASSEMBLE_LOAD_INTEGER(ld, ldx);
   1875       break;
   1876 #endif
   1877     case kPPC_LoadFloat32:
   1878       ASSEMBLE_LOAD_FLOAT(lfs, lfsx);
   1879       break;
   1880     case kPPC_LoadDouble:
   1881       ASSEMBLE_LOAD_FLOAT(lfd, lfdx);
   1882       break;
   1883     case kPPC_StoreWord8:
   1884       ASSEMBLE_STORE_INTEGER(stb, stbx);
   1885       break;
   1886     case kPPC_StoreWord16:
   1887       ASSEMBLE_STORE_INTEGER(sth, sthx);
   1888       break;
   1889     case kPPC_StoreWord32:
   1890       ASSEMBLE_STORE_INTEGER(stw, stwx);
   1891       break;
   1892 #if V8_TARGET_ARCH_PPC64
   1893     case kPPC_StoreWord64:
   1894       ASSEMBLE_STORE_INTEGER(std, stdx);
   1895       break;
   1896 #endif
   1897     case kPPC_StoreFloat32:
   1898       ASSEMBLE_STORE_FLOAT32();
   1899       break;
   1900     case kPPC_StoreDouble:
   1901       ASSEMBLE_STORE_DOUBLE();
   1902       break;
   1903     case kCheckedLoadInt8:
   1904       ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
   1905       __ extsb(i.OutputRegister(), i.OutputRegister());
   1906       break;
   1907     case kCheckedLoadUint8:
   1908       ASSEMBLE_CHECKED_LOAD_INTEGER(lbz, lbzx);
   1909       break;
   1910     case kCheckedLoadInt16:
   1911       ASSEMBLE_CHECKED_LOAD_INTEGER(lha, lhax);
   1912       break;
   1913     case kCheckedLoadUint16:
   1914       ASSEMBLE_CHECKED_LOAD_INTEGER(lhz, lhzx);
   1915       break;
   1916     case kCheckedLoadWord32:
   1917       ASSEMBLE_CHECKED_LOAD_INTEGER(lwz, lwzx);
   1918       break;
   1919     case kCheckedLoadWord64:
   1920 #if V8_TARGET_ARCH_PPC64
   1921       ASSEMBLE_CHECKED_LOAD_INTEGER(ld, ldx);
   1922 #else
   1923       UNREACHABLE();
   1924 #endif
   1925       break;
   1926     case kCheckedLoadFloat32:
   1927       ASSEMBLE_CHECKED_LOAD_FLOAT(lfs, lfsx, 32);
   1928       break;
   1929     case kCheckedLoadFloat64:
   1930       ASSEMBLE_CHECKED_LOAD_FLOAT(lfd, lfdx, 64);
   1931       break;
   1932     case kCheckedStoreWord8:
   1933       ASSEMBLE_CHECKED_STORE_INTEGER(stb, stbx);
   1934       break;
   1935     case kCheckedStoreWord16:
   1936       ASSEMBLE_CHECKED_STORE_INTEGER(sth, sthx);
   1937       break;
   1938     case kCheckedStoreWord32:
   1939       ASSEMBLE_CHECKED_STORE_INTEGER(stw, stwx);
   1940       break;
   1941     case kCheckedStoreWord64:
   1942 #if V8_TARGET_ARCH_PPC64
   1943       ASSEMBLE_CHECKED_STORE_INTEGER(std, stdx);
   1944 #else
   1945       UNREACHABLE();
   1946 #endif
   1947       break;
   1948     case kCheckedStoreFloat32:
   1949       ASSEMBLE_CHECKED_STORE_FLOAT32();
   1950       break;
   1951     case kCheckedStoreFloat64:
   1952       ASSEMBLE_CHECKED_STORE_DOUBLE();
   1953       break;
   1954 
   1955     case kAtomicLoadInt8:
   1956       ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
   1957       __ extsb(i.OutputRegister(), i.OutputRegister());
   1958       break;
   1959     case kAtomicLoadUint8:
   1960       ASSEMBLE_ATOMIC_LOAD_INTEGER(lbz, lbzx);
   1961       break;
   1962     case kAtomicLoadInt16:
   1963       ASSEMBLE_ATOMIC_LOAD_INTEGER(lha, lhax);
   1964       break;
   1965     case kAtomicLoadUint16:
   1966       ASSEMBLE_ATOMIC_LOAD_INTEGER(lhz, lhzx);
   1967       break;
   1968     case kAtomicLoadWord32:
   1969       ASSEMBLE_ATOMIC_LOAD_INTEGER(lwz, lwzx);
   1970       break;
   1971 
   1972     case kAtomicStoreWord8:
   1973       ASSEMBLE_ATOMIC_STORE_INTEGER(stb, stbx);
   1974       break;
   1975     case kAtomicStoreWord16:
   1976       ASSEMBLE_ATOMIC_STORE_INTEGER(sth, sthx);
   1977       break;
   1978     case kAtomicStoreWord32:
   1979       ASSEMBLE_ATOMIC_STORE_INTEGER(stw, stwx);
   1980       break;
   1981     default:
   1982       UNREACHABLE();
   1983       break;
   1984   }
   1985   return kSuccess;
   1986 }  // NOLINT(readability/fn_size)
   1987 
   1988 
   1989 // Assembles branches after an instruction.
   1990 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
   1991   PPCOperandConverter i(this, instr);
   1992   Label* tlabel = branch->true_label;
   1993   Label* flabel = branch->false_label;
   1994   ArchOpcode op = instr->arch_opcode();
   1995   FlagsCondition condition = branch->condition;
   1996   CRegister cr = cr0;
   1997 
   1998   Condition cond = FlagsConditionToCondition(condition, op);
   1999   if (op == kPPC_CmpDouble) {
   2000     // check for unordered if necessary
   2001     if (cond == le) {
   2002       __ bunordered(flabel, cr);
   2003       // Unnecessary for eq/lt since only FU bit will be set.
   2004     } else if (cond == gt) {
   2005       __ bunordered(tlabel, cr);
   2006       // Unnecessary for ne/ge since only FU bit will be set.
   2007     }
   2008   }
   2009   __ b(cond, tlabel, cr);
   2010   if (!branch->fallthru) __ b(flabel);  // no fallthru to flabel.
   2011 }
   2012 
   2013 
   2014 void CodeGenerator::AssembleArchJump(RpoNumber target) {
   2015   if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
   2016 }
   2017 
   2018 void CodeGenerator::AssembleArchTrap(Instruction* instr,
   2019                                      FlagsCondition condition) {
   2020   class OutOfLineTrap final : public OutOfLineCode {
   2021    public:
   2022     OutOfLineTrap(CodeGenerator* gen, bool frame_elided, Instruction* instr)
   2023         : OutOfLineCode(gen),
   2024           frame_elided_(frame_elided),
   2025           instr_(instr),
   2026           gen_(gen) {}
   2027 
   2028     void Generate() final {
   2029       PPCOperandConverter i(gen_, instr_);
   2030 
   2031       Builtins::Name trap_id =
   2032           static_cast<Builtins::Name>(i.InputInt32(instr_->InputCount() - 1));
   2033       bool old_has_frame = __ has_frame();
   2034       if (frame_elided_) {
   2035         __ set_has_frame(true);
   2036         __ EnterFrame(StackFrame::WASM_COMPILED, true);
   2037       }
   2038       GenerateCallToTrap(trap_id);
   2039       if (frame_elided_) {
   2040         __ set_has_frame(old_has_frame);
   2041       }
   2042     }
   2043 
   2044    private:
   2045     void GenerateCallToTrap(Builtins::Name trap_id) {
   2046       if (trap_id == Builtins::builtin_count) {
   2047         // We cannot test calls to the runtime in cctest/test-run-wasm.
   2048         // Therefore we emit a call to C here instead of a call to the runtime.
   2049         // We use the context register as the scratch register, because we do
   2050         // not have a context here.
   2051         __ PrepareCallCFunction(0, 0, cp);
   2052         __ CallCFunction(
   2053             ExternalReference::wasm_call_trap_callback_for_testing(isolate()),
   2054             0);
   2055         __ LeaveFrame(StackFrame::WASM_COMPILED);
   2056         __ Ret();
   2057       } else {
   2058         gen_->AssembleSourcePosition(instr_);
   2059         __ Call(handle(isolate()->builtins()->builtin(trap_id), isolate()),
   2060                 RelocInfo::CODE_TARGET);
   2061         ReferenceMap* reference_map =
   2062             new (gen_->zone()) ReferenceMap(gen_->zone());
   2063         gen_->RecordSafepoint(reference_map, Safepoint::kSimple, 0,
   2064                               Safepoint::kNoLazyDeopt);
   2065         if (FLAG_debug_code) {
   2066           __ stop(GetBailoutReason(kUnexpectedReturnFromWasmTrap));
   2067         }
   2068       }
   2069     }
   2070 
   2071     bool frame_elided_;
   2072     Instruction* instr_;
   2073     CodeGenerator* gen_;
   2074   };
   2075   bool frame_elided = !frame_access_state()->has_frame();
   2076   auto ool = new (zone()) OutOfLineTrap(this, frame_elided, instr);
   2077   Label* tlabel = ool->entry();
   2078   Label end;
   2079 
   2080   ArchOpcode op = instr->arch_opcode();
   2081   CRegister cr = cr0;
   2082   Condition cond = FlagsConditionToCondition(condition, op);
   2083   if (op == kPPC_CmpDouble) {
   2084     // check for unordered if necessary
   2085     if (cond == le) {
   2086       __ bunordered(&end, cr);
   2087       // Unnecessary for eq/lt since only FU bit will be set.
   2088     } else if (cond == gt) {
   2089       __ bunordered(tlabel, cr);
   2090       // Unnecessary for ne/ge since only FU bit will be set.
   2091     }
   2092   }
   2093   __ b(cond, tlabel, cr);
   2094   __ bind(&end);
   2095 }
   2096 
   2097 // Assembles boolean materializations after an instruction.
   2098 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
   2099                                         FlagsCondition condition) {
   2100   PPCOperandConverter i(this, instr);
   2101   Label done;
   2102   ArchOpcode op = instr->arch_opcode();
   2103   CRegister cr = cr0;
   2104   int reg_value = -1;
   2105 
   2106   // Materialize a full 32-bit 1 or 0 value. The result register is always the
   2107   // last output of the instruction.
   2108   DCHECK_NE(0u, instr->OutputCount());
   2109   Register reg = i.OutputRegister(instr->OutputCount() - 1);
   2110 
   2111   Condition cond = FlagsConditionToCondition(condition, op);
   2112   if (op == kPPC_CmpDouble) {
   2113     // check for unordered if necessary
   2114     if (cond == le) {
   2115       reg_value = 0;
   2116       __ li(reg, Operand::Zero());
   2117       __ bunordered(&done, cr);
   2118     } else if (cond == gt) {
   2119       reg_value = 1;
   2120       __ li(reg, Operand(1));
   2121       __ bunordered(&done, cr);
   2122     }
   2123     // Unnecessary for eq/lt & ne/ge since only FU bit will be set.
   2124   }
   2125 
   2126   if (CpuFeatures::IsSupported(ISELECT)) {
   2127     switch (cond) {
   2128       case eq:
   2129       case lt:
   2130       case gt:
   2131         if (reg_value != 1) __ li(reg, Operand(1));
   2132         __ li(kScratchReg, Operand::Zero());
   2133         __ isel(cond, reg, reg, kScratchReg, cr);
   2134         break;
   2135       case ne:
   2136       case ge:
   2137       case le:
   2138         if (reg_value != 1) __ li(reg, Operand(1));
   2139         // r0 implies logical zero in this form
   2140         __ isel(NegateCondition(cond), reg, r0, reg, cr);
   2141         break;
   2142     default:
   2143       UNREACHABLE();
   2144       break;
   2145     }
   2146   } else {
   2147     if (reg_value != 0) __ li(reg, Operand::Zero());
   2148     __ b(NegateCondition(cond), &done, cr);
   2149     __ li(reg, Operand(1));
   2150   }
   2151   __ bind(&done);
   2152 }
   2153 
   2154 
   2155 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
   2156   PPCOperandConverter i(this, instr);
   2157   Register input = i.InputRegister(0);
   2158   for (size_t index = 2; index < instr->InputCount(); index += 2) {
   2159     __ Cmpwi(input, Operand(i.InputInt32(index + 0)), r0);
   2160     __ beq(GetLabel(i.InputRpo(index + 1)));
   2161   }
   2162   AssembleArchJump(i.InputRpo(1));
   2163 }
   2164 
   2165 
   2166 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
   2167   PPCOperandConverter i(this, instr);
   2168   Register input = i.InputRegister(0);
   2169   int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
   2170   Label** cases = zone()->NewArray<Label*>(case_count);
   2171   for (int32_t index = 0; index < case_count; ++index) {
   2172     cases[index] = GetLabel(i.InputRpo(index + 2));
   2173   }
   2174   Label* const table = AddJumpTable(cases, case_count);
   2175   __ Cmpli(input, Operand(case_count), r0);
   2176   __ bge(GetLabel(i.InputRpo(1)));
   2177   __ mov_label_addr(kScratchReg, table);
   2178   __ ShiftLeftImm(r0, input, Operand(kPointerSizeLog2));
   2179   __ LoadPX(kScratchReg, MemOperand(kScratchReg, r0));
   2180   __ Jump(kScratchReg);
   2181 }
   2182 
   2183 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
   2184     int deoptimization_id, SourcePosition pos) {
   2185   DeoptimizeKind deoptimization_kind = GetDeoptimizationKind(deoptimization_id);
   2186   DeoptimizeReason deoptimization_reason =
   2187       GetDeoptimizationReason(deoptimization_id);
   2188   Deoptimizer::BailoutType bailout_type =
   2189       deoptimization_kind == DeoptimizeKind::kSoft ? Deoptimizer::SOFT
   2190                                                    : Deoptimizer::EAGER;
   2191   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
   2192       isolate(), deoptimization_id, bailout_type);
   2193   // TODO(turbofan): We should be able to generate better code by sharing the
   2194   // actual final call site and just bl'ing to it here, similar to what we do
   2195   // in the lithium backend.
   2196   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   2197   __ RecordDeoptReason(deoptimization_reason, pos, deoptimization_id);
   2198   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   2199   return kSuccess;
   2200 }
   2201 
   2202 void CodeGenerator::FinishFrame(Frame* frame) {
   2203   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   2204   const RegList double_saves = descriptor->CalleeSavedFPRegisters();
   2205 
   2206   // Save callee-saved Double registers.
   2207   if (double_saves != 0) {
   2208     frame->AlignSavedCalleeRegisterSlots();
   2209     DCHECK(kNumCalleeSavedDoubles ==
   2210            base::bits::CountPopulation32(double_saves));
   2211     frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
   2212                                              (kDoubleSize / kPointerSize));
   2213   }
   2214   // Save callee-saved registers.
   2215   const RegList saves =
   2216       FLAG_enable_embedded_constant_pool
   2217           ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
   2218           : descriptor->CalleeSavedRegisters();
   2219   if (saves != 0) {
   2220     // register save area does not include the fp or constant pool pointer.
   2221     const int num_saves =
   2222         kNumCalleeSaved - 1 - (FLAG_enable_embedded_constant_pool ? 1 : 0);
   2223     DCHECK(num_saves == base::bits::CountPopulation32(saves));
   2224     frame->AllocateSavedCalleeRegisterSlots(num_saves);
   2225   }
   2226 }
   2227 
   2228 void CodeGenerator::AssembleConstructFrame() {
   2229   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   2230   if (frame_access_state()->has_frame()) {
   2231     if (descriptor->IsCFunctionCall()) {
   2232       __ function_descriptor();
   2233       __ mflr(r0);
   2234       if (FLAG_enable_embedded_constant_pool) {
   2235         __ Push(r0, fp, kConstantPoolRegister);
   2236         // Adjust FP to point to saved FP.
   2237         __ subi(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
   2238       } else {
   2239         __ Push(r0, fp);
   2240         __ mr(fp, sp);
   2241       }
   2242     } else if (descriptor->IsJSFunctionCall()) {
   2243       __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
   2244       if (descriptor->PushArgumentCount()) {
   2245         __ Push(kJavaScriptCallArgCountRegister);
   2246       }
   2247     } else {
   2248       StackFrame::Type type = info()->GetOutputStackFrameType();
   2249       // TODO(mbrandy): Detect cases where ip is the entrypoint (for
   2250       // efficient intialization of the constant pool pointer register).
   2251       __ StubPrologue(type);
   2252     }
   2253   }
   2254 
   2255   int shrink_slots =
   2256       frame()->GetTotalFrameSlotCount() - descriptor->CalculateFixedFrameSize();
   2257   if (info()->is_osr()) {
   2258     // TurboFan OSR-compiled functions cannot be entered directly.
   2259     __ Abort(kShouldNotDirectlyEnterOsrFunction);
   2260 
   2261     // Unoptimized code jumps directly to this entrypoint while the unoptimized
   2262     // frame is still on the stack. Optimized code uses OSR values directly from
   2263     // the unoptimized frame. Thus, all that needs to be done is to allocate the
   2264     // remaining stack slots.
   2265     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
   2266     osr_pc_offset_ = __ pc_offset();
   2267     shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   2268   }
   2269 
   2270   const RegList double_saves = descriptor->CalleeSavedFPRegisters();
   2271   if (shrink_slots > 0) {
   2272     __ Add(sp, sp, -shrink_slots * kPointerSize, r0);
   2273   }
   2274 
   2275   // Save callee-saved Double registers.
   2276   if (double_saves != 0) {
   2277     __ MultiPushDoubles(double_saves);
   2278     DCHECK(kNumCalleeSavedDoubles ==
   2279            base::bits::CountPopulation32(double_saves));
   2280   }
   2281 
   2282   // Save callee-saved registers.
   2283   const RegList saves =
   2284       FLAG_enable_embedded_constant_pool
   2285           ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
   2286           : descriptor->CalleeSavedRegisters();
   2287   if (saves != 0) {
   2288     __ MultiPush(saves);
   2289     // register save area does not include the fp or constant pool pointer.
   2290   }
   2291 }
   2292 
   2293 void CodeGenerator::AssembleReturn(InstructionOperand* pop) {
   2294   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   2295   int pop_count = static_cast<int>(descriptor->StackParameterCount());
   2296 
   2297   // Restore registers.
   2298   const RegList saves =
   2299       FLAG_enable_embedded_constant_pool
   2300           ? descriptor->CalleeSavedRegisters() & ~kConstantPoolRegister.bit()
   2301           : descriptor->CalleeSavedRegisters();
   2302   if (saves != 0) {
   2303     __ MultiPop(saves);
   2304   }
   2305 
   2306   // Restore double registers.
   2307   const RegList double_saves = descriptor->CalleeSavedFPRegisters();
   2308   if (double_saves != 0) {
   2309     __ MultiPopDoubles(double_saves);
   2310   }
   2311   PPCOperandConverter g(this, nullptr);
   2312 
   2313   if (descriptor->IsCFunctionCall()) {
   2314     AssembleDeconstructFrame();
   2315   } else if (frame_access_state()->has_frame()) {
   2316     // Canonicalize JSFunction return sites for now unless they have an variable
   2317     // number of stack slot pops
   2318     if (pop->IsImmediate() && g.ToConstant(pop).ToInt32() == 0) {
   2319       if (return_label_.is_bound()) {
   2320         __ b(&return_label_);
   2321         return;
   2322       } else {
   2323         __ bind(&return_label_);
   2324         AssembleDeconstructFrame();
   2325       }
   2326     } else {
   2327       AssembleDeconstructFrame();
   2328     }
   2329   }
   2330   if (pop->IsImmediate()) {
   2331     DCHECK_EQ(Constant::kInt32, g.ToConstant(pop).type());
   2332     pop_count += g.ToConstant(pop).ToInt32();
   2333   } else {
   2334     __ Drop(g.ToRegister(pop));
   2335   }
   2336   __ Drop(pop_count);
   2337   __ Ret();
   2338 }
   2339 
   2340 
   2341 void CodeGenerator::AssembleMove(InstructionOperand* source,
   2342                                  InstructionOperand* destination) {
   2343   PPCOperandConverter g(this, nullptr);
   2344   // Dispatch on the source and destination operand kinds.  Not all
   2345   // combinations are possible.
   2346   if (source->IsRegister()) {
   2347     DCHECK(destination->IsRegister() || destination->IsStackSlot());
   2348     Register src = g.ToRegister(source);
   2349     if (destination->IsRegister()) {
   2350       __ Move(g.ToRegister(destination), src);
   2351     } else {
   2352       __ StoreP(src, g.ToMemOperand(destination), r0);
   2353     }
   2354   } else if (source->IsStackSlot()) {
   2355     DCHECK(destination->IsRegister() || destination->IsStackSlot());
   2356     MemOperand src = g.ToMemOperand(source);
   2357     if (destination->IsRegister()) {
   2358       __ LoadP(g.ToRegister(destination), src, r0);
   2359     } else {
   2360       Register temp = kScratchReg;
   2361       __ LoadP(temp, src, r0);
   2362       __ StoreP(temp, g.ToMemOperand(destination), r0);
   2363     }
   2364   } else if (source->IsConstant()) {
   2365     Constant src = g.ToConstant(source);
   2366     if (destination->IsRegister() || destination->IsStackSlot()) {
   2367       Register dst =
   2368           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
   2369       switch (src.type()) {
   2370         case Constant::kInt32:
   2371 #if V8_TARGET_ARCH_PPC64
   2372           if (RelocInfo::IsWasmSizeReference(src.rmode())) {
   2373 #else
   2374           if (RelocInfo::IsWasmReference(src.rmode())) {
   2375 #endif
   2376             __ mov(dst, Operand(src.ToInt32(), src.rmode()));
   2377           } else {
   2378             __ mov(dst, Operand(src.ToInt32()));
   2379           }
   2380           break;
   2381         case Constant::kInt64:
   2382 #if V8_TARGET_ARCH_PPC64
   2383           if (RelocInfo::IsWasmPtrReference(src.rmode())) {
   2384             __ mov(dst, Operand(src.ToInt64(), src.rmode()));
   2385           } else {
   2386             DCHECK(!RelocInfo::IsWasmSizeReference(src.rmode()));
   2387 #endif
   2388             __ mov(dst, Operand(src.ToInt64()));
   2389 #if V8_TARGET_ARCH_PPC64
   2390           }
   2391 #endif
   2392           break;
   2393         case Constant::kFloat32:
   2394           __ Move(dst,
   2395                   isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
   2396           break;
   2397         case Constant::kFloat64:
   2398           __ Move(dst,
   2399                   isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
   2400           break;
   2401         case Constant::kExternalReference:
   2402           __ mov(dst, Operand(src.ToExternalReference()));
   2403           break;
   2404         case Constant::kHeapObject: {
   2405           Handle<HeapObject> src_object = src.ToHeapObject();
   2406           Heap::RootListIndex index;
   2407           if (IsMaterializableFromRoot(src_object, &index)) {
   2408             __ LoadRoot(dst, index);
   2409           } else {
   2410             __ Move(dst, src_object);
   2411           }
   2412           break;
   2413         }
   2414         case Constant::kRpoNumber:
   2415           UNREACHABLE();  // TODO(dcarney): loading RPO constants on PPC.
   2416           break;
   2417       }
   2418       if (destination->IsStackSlot()) {
   2419         __ StoreP(dst, g.ToMemOperand(destination), r0);
   2420       }
   2421     } else {
   2422       DoubleRegister dst = destination->IsFPRegister()
   2423                                ? g.ToDoubleRegister(destination)
   2424                                : kScratchDoubleReg;
   2425       double value;
   2426 // bit_cast of snan is converted to qnan on ia32/x64
   2427 #if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
   2428       intptr_t valueInt = (src.type() == Constant::kFloat32)
   2429                               ? src.ToFloat32AsInt()
   2430                               : src.ToFloat64AsInt();
   2431       if (valueInt == ((src.type() == Constant::kFloat32)
   2432                            ? 0x7fa00000
   2433                            : 0x7fa0000000000000)) {
   2434         value = bit_cast<double, int64_t>(0x7ff4000000000000L);
   2435       } else {
   2436 #endif
   2437         value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
   2438                                                    : src.ToFloat64();
   2439 #if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
   2440       }
   2441 #endif
   2442       __ LoadDoubleLiteral(dst, value, kScratchReg);
   2443       if (destination->IsFPStackSlot()) {
   2444         __ StoreDouble(dst, g.ToMemOperand(destination), r0);
   2445       }
   2446     }
   2447   } else if (source->IsFPRegister()) {
   2448     DoubleRegister src = g.ToDoubleRegister(source);
   2449     if (destination->IsFPRegister()) {
   2450       DoubleRegister dst = g.ToDoubleRegister(destination);
   2451       __ Move(dst, src);
   2452     } else {
   2453       DCHECK(destination->IsFPStackSlot());
   2454       LocationOperand* op = LocationOperand::cast(source);
   2455       if (op->representation() == MachineRepresentation::kFloat64) {
   2456         __ StoreDouble(src, g.ToMemOperand(destination), r0);
   2457       } else {
   2458         __ StoreSingle(src, g.ToMemOperand(destination), r0);
   2459       }
   2460     }
   2461   } else if (source->IsFPStackSlot()) {
   2462     DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
   2463     MemOperand src = g.ToMemOperand(source);
   2464     if (destination->IsFPRegister()) {
   2465       LocationOperand* op = LocationOperand::cast(source);
   2466       if (op->representation() == MachineRepresentation::kFloat64) {
   2467         __ LoadDouble(g.ToDoubleRegister(destination), src, r0);
   2468       } else {
   2469         __ LoadSingle(g.ToDoubleRegister(destination), src, r0);
   2470       }
   2471     } else {
   2472       LocationOperand* op = LocationOperand::cast(source);
   2473       DoubleRegister temp = kScratchDoubleReg;
   2474       if (op->representation() == MachineRepresentation::kFloat64) {
   2475         __ LoadDouble(temp, src, r0);
   2476         __ StoreDouble(temp, g.ToMemOperand(destination), r0);
   2477       } else {
   2478         __ LoadSingle(temp, src, r0);
   2479         __ StoreSingle(temp, g.ToMemOperand(destination), r0);
   2480       }
   2481     }
   2482   } else {
   2483     UNREACHABLE();
   2484   }
   2485 }
   2486 
   2487 
   2488 void CodeGenerator::AssembleSwap(InstructionOperand* source,
   2489                                  InstructionOperand* destination) {
   2490   PPCOperandConverter g(this, nullptr);
   2491   // Dispatch on the source and destination operand kinds.  Not all
   2492   // combinations are possible.
   2493   if (source->IsRegister()) {
   2494     // Register-register.
   2495     Register temp = kScratchReg;
   2496     Register src = g.ToRegister(source);
   2497     if (destination->IsRegister()) {
   2498       Register dst = g.ToRegister(destination);
   2499       __ mr(temp, src);
   2500       __ mr(src, dst);
   2501       __ mr(dst, temp);
   2502     } else {
   2503       DCHECK(destination->IsStackSlot());
   2504       MemOperand dst = g.ToMemOperand(destination);
   2505       __ mr(temp, src);
   2506       __ LoadP(src, dst);
   2507       __ StoreP(temp, dst);
   2508     }
   2509 #if V8_TARGET_ARCH_PPC64
   2510   } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
   2511 #else
   2512   } else if (source->IsStackSlot()) {
   2513     DCHECK(destination->IsStackSlot());
   2514 #endif
   2515     Register temp_0 = kScratchReg;
   2516     Register temp_1 = r0;
   2517     MemOperand src = g.ToMemOperand(source);
   2518     MemOperand dst = g.ToMemOperand(destination);
   2519     __ LoadP(temp_0, src);
   2520     __ LoadP(temp_1, dst);
   2521     __ StoreP(temp_0, dst);
   2522     __ StoreP(temp_1, src);
   2523   } else if (source->IsFPRegister()) {
   2524     DoubleRegister temp = kScratchDoubleReg;
   2525     DoubleRegister src = g.ToDoubleRegister(source);
   2526     if (destination->IsFPRegister()) {
   2527       DoubleRegister dst = g.ToDoubleRegister(destination);
   2528       __ fmr(temp, src);
   2529       __ fmr(src, dst);
   2530       __ fmr(dst, temp);
   2531     } else {
   2532       DCHECK(destination->IsFPStackSlot());
   2533       MemOperand dst = g.ToMemOperand(destination);
   2534       __ fmr(temp, src);
   2535       __ lfd(src, dst);
   2536       __ stfd(temp, dst);
   2537     }
   2538 #if !V8_TARGET_ARCH_PPC64
   2539   } else if (source->IsFPStackSlot()) {
   2540     DCHECK(destination->IsFPStackSlot());
   2541     DoubleRegister temp_0 = kScratchDoubleReg;
   2542     DoubleRegister temp_1 = d0;
   2543     MemOperand src = g.ToMemOperand(source);
   2544     MemOperand dst = g.ToMemOperand(destination);
   2545     __ lfd(temp_0, src);
   2546     __ lfd(temp_1, dst);
   2547     __ stfd(temp_0, dst);
   2548     __ stfd(temp_1, src);
   2549 #endif
   2550   } else {
   2551     // No other combinations are possible.
   2552     UNREACHABLE();
   2553   }
   2554 }
   2555 
   2556 
   2557 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
   2558   for (size_t index = 0; index < target_count; ++index) {
   2559     __ emit_label_addr(targets[index]);
   2560   }
   2561 }
   2562 
   2563 
   2564 void CodeGenerator::EnsureSpaceForLazyDeopt() {
   2565   if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
   2566     return;
   2567   }
   2568 
   2569   int space_needed = Deoptimizer::patch_size();
   2570   // Ensure that we have enough space after the previous lazy-bailout
   2571   // instruction for patching the code here.
   2572   int current_pc = masm()->pc_offset();
   2573   if (current_pc < last_lazy_deopt_pc_ + space_needed) {
   2574     // Block tramoline pool emission for duration of padding.
   2575     v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
   2576         masm());
   2577     int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
   2578     DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
   2579     while (padding_size > 0) {
   2580       __ nop();
   2581       padding_size -= v8::internal::Assembler::kInstrSize;
   2582     }
   2583   }
   2584 }
   2585 
   2586 #undef __
   2587 
   2588 }  // namespace compiler
   2589 }  // namespace internal
   2590 }  // namespace v8
   2591