Home | History | Annotate | Download | only in ppc
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/crankshaft/ppc/lithium-codegen-ppc.h"
      6 
      7 #include "src/base/bits.h"
      8 #include "src/code-factory.h"
      9 #include "src/code-stubs.h"
     10 #include "src/crankshaft/hydrogen-osr.h"
     11 #include "src/crankshaft/ppc/lithium-gap-resolver-ppc.h"
     12 #include "src/ic/ic.h"
     13 #include "src/ic/stub-cache.h"
     14 #include "src/profiler/cpu-profiler.h"
     15 
     16 namespace v8 {
     17 namespace internal {
     18 
     19 
     20 class SafepointGenerator final : public CallWrapper {
     21  public:
     22   SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
     23                      Safepoint::DeoptMode mode)
     24       : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
     25   virtual ~SafepointGenerator() {}
     26 
     27   void BeforeCall(int call_size) const override {}
     28 
     29   void AfterCall() const override {
     30     codegen_->RecordSafepoint(pointers_, deopt_mode_);
     31   }
     32 
     33  private:
     34   LCodeGen* codegen_;
     35   LPointerMap* pointers_;
     36   Safepoint::DeoptMode deopt_mode_;
     37 };
     38 
     39 
     40 #define __ masm()->
     41 
     42 bool LCodeGen::GenerateCode() {
     43   LPhase phase("Z_Code generation", chunk());
     44   DCHECK(is_unused());
     45   status_ = GENERATING;
     46 
     47   // Open a frame scope to indicate that there is a frame on the stack.  The
     48   // NONE indicates that the scope shouldn't actually generate code to set up
     49   // the frame (that is done in GeneratePrologue).
     50   FrameScope frame_scope(masm_, StackFrame::NONE);
     51 
     52   bool rc = GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
     53             GenerateJumpTable() && GenerateSafepointTable();
     54   if (FLAG_enable_embedded_constant_pool && !rc) {
     55     masm()->AbortConstantPoolBuilding();
     56   }
     57   return rc;
     58 }
     59 
     60 
     61 void LCodeGen::FinishCode(Handle<Code> code) {
     62   DCHECK(is_done());
     63   code->set_stack_slots(GetStackSlotCount());
     64   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
     65   PopulateDeoptimizationData(code);
     66 }
     67 
     68 
     69 void LCodeGen::SaveCallerDoubles() {
     70   DCHECK(info()->saves_caller_doubles());
     71   DCHECK(NeedsEagerFrame());
     72   Comment(";;; Save clobbered callee double registers");
     73   int count = 0;
     74   BitVector* doubles = chunk()->allocated_double_registers();
     75   BitVector::Iterator save_iterator(doubles);
     76   while (!save_iterator.Done()) {
     77     __ stfd(DoubleRegister::from_code(save_iterator.Current()),
     78             MemOperand(sp, count * kDoubleSize));
     79     save_iterator.Advance();
     80     count++;
     81   }
     82 }
     83 
     84 
     85 void LCodeGen::RestoreCallerDoubles() {
     86   DCHECK(info()->saves_caller_doubles());
     87   DCHECK(NeedsEagerFrame());
     88   Comment(";;; Restore clobbered callee double registers");
     89   BitVector* doubles = chunk()->allocated_double_registers();
     90   BitVector::Iterator save_iterator(doubles);
     91   int count = 0;
     92   while (!save_iterator.Done()) {
     93     __ lfd(DoubleRegister::from_code(save_iterator.Current()),
     94            MemOperand(sp, count * kDoubleSize));
     95     save_iterator.Advance();
     96     count++;
     97   }
     98 }
     99 
    100 
    101 bool LCodeGen::GeneratePrologue() {
    102   DCHECK(is_generating());
    103 
    104   if (info()->IsOptimizing()) {
    105     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
    106 
    107 #ifdef DEBUG
    108     if (strlen(FLAG_stop_at) > 0 &&
    109         info_->literal()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
    110       __ stop("stop_at");
    111     }
    112 #endif
    113 
    114     // r4: Callee's JS function.
    115     // cp: Callee's context.
    116     // pp: Callee's constant pool pointer (if enabled)
    117     // fp: Caller's frame pointer.
    118     // lr: Caller's pc.
    119     // ip: Our own function entry (required by the prologue)
    120   }
    121 
    122   int prologue_offset = masm_->pc_offset();
    123 
    124   if (prologue_offset) {
    125     // Prologue logic requires it's starting address in ip and the
    126     // corresponding offset from the function entry.
    127     prologue_offset += Instruction::kInstrSize;
    128     __ addi(ip, ip, Operand(prologue_offset));
    129   }
    130   info()->set_prologue_offset(prologue_offset);
    131   if (NeedsEagerFrame()) {
    132     if (info()->IsStub()) {
    133       __ StubPrologue(ip, prologue_offset);
    134     } else {
    135       __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset);
    136     }
    137     frame_is_built_ = true;
    138   }
    139 
    140   // Reserve space for the stack slots needed by the code.
    141   int slots = GetStackSlotCount();
    142   if (slots > 0) {
    143     __ subi(sp, sp, Operand(slots * kPointerSize));
    144     if (FLAG_debug_code) {
    145       __ Push(r3, r4);
    146       __ li(r0, Operand(slots));
    147       __ mtctr(r0);
    148       __ addi(r3, sp, Operand((slots + 2) * kPointerSize));
    149       __ mov(r4, Operand(kSlotsZapValue));
    150       Label loop;
    151       __ bind(&loop);
    152       __ StorePU(r4, MemOperand(r3, -kPointerSize));
    153       __ bdnz(&loop);
    154       __ Pop(r3, r4);
    155     }
    156   }
    157 
    158   if (info()->saves_caller_doubles()) {
    159     SaveCallerDoubles();
    160   }
    161   return !is_aborted();
    162 }
    163 
    164 
    165 void LCodeGen::DoPrologue(LPrologue* instr) {
    166   Comment(";;; Prologue begin");
    167 
    168   // Possibly allocate a local context.
    169   if (info()->scope()->num_heap_slots() > 0) {
    170     Comment(";;; Allocate local context");
    171     bool need_write_barrier = true;
    172     // Argument to NewContext is the function, which is in r4.
    173     int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
    174     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
    175     if (info()->scope()->is_script_scope()) {
    176       __ push(r4);
    177       __ Push(info()->scope()->GetScopeInfo(info()->isolate()));
    178       __ CallRuntime(Runtime::kNewScriptContext);
    179       deopt_mode = Safepoint::kLazyDeopt;
    180     } else if (slots <= FastNewContextStub::kMaximumSlots) {
    181       FastNewContextStub stub(isolate(), slots);
    182       __ CallStub(&stub);
    183       // Result of FastNewContextStub is always in new space.
    184       need_write_barrier = false;
    185     } else {
    186       __ push(r4);
    187       __ CallRuntime(Runtime::kNewFunctionContext);
    188     }
    189     RecordSafepoint(deopt_mode);
    190 
    191     // Context is returned in both r3 and cp.  It replaces the context
    192     // passed to us.  It's saved in the stack and kept live in cp.
    193     __ mr(cp, r3);
    194     __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
    195     // Copy any necessary parameters into the context.
    196     int num_parameters = scope()->num_parameters();
    197     int first_parameter = scope()->has_this_declaration() ? -1 : 0;
    198     for (int i = first_parameter; i < num_parameters; i++) {
    199       Variable* var = (i == -1) ? scope()->receiver() : scope()->parameter(i);
    200       if (var->IsContextSlot()) {
    201         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
    202                                (num_parameters - 1 - i) * kPointerSize;
    203         // Load parameter from stack.
    204         __ LoadP(r3, MemOperand(fp, parameter_offset));
    205         // Store it in the context.
    206         MemOperand target = ContextMemOperand(cp, var->index());
    207         __ StoreP(r3, target, r0);
    208         // Update the write barrier. This clobbers r6 and r3.
    209         if (need_write_barrier) {
    210           __ RecordWriteContextSlot(cp, target.offset(), r3, r6,
    211                                     GetLinkRegisterState(), kSaveFPRegs);
    212         } else if (FLAG_debug_code) {
    213           Label done;
    214           __ JumpIfInNewSpace(cp, r3, &done);
    215           __ Abort(kExpectedNewSpaceObject);
    216           __ bind(&done);
    217         }
    218       }
    219     }
    220     Comment(";;; End allocate local context");
    221   }
    222 
    223   Comment(";;; Prologue end");
    224 }
    225 
    226 
    227 void LCodeGen::GenerateOsrPrologue() {
    228   // Generate the OSR entry prologue at the first unknown OSR value, or if there
    229   // are none, at the OSR entrypoint instruction.
    230   if (osr_pc_offset_ >= 0) return;
    231 
    232   osr_pc_offset_ = masm()->pc_offset();
    233 
    234   // Adjust the frame size, subsuming the unoptimized frame into the
    235   // optimized frame.
    236   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
    237   DCHECK(slots >= 0);
    238   __ subi(sp, sp, Operand(slots * kPointerSize));
    239 }
    240 
    241 
    242 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
    243   if (instr->IsCall()) {
    244     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
    245   }
    246   if (!instr->IsLazyBailout() && !instr->IsGap()) {
    247     safepoints_.BumpLastLazySafepointIndex();
    248   }
    249 }
    250 
    251 
    252 bool LCodeGen::GenerateDeferredCode() {
    253   DCHECK(is_generating());
    254   if (deferred_.length() > 0) {
    255     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
    256       LDeferredCode* code = deferred_[i];
    257 
    258       HValue* value =
    259           instructions_->at(code->instruction_index())->hydrogen_value();
    260       RecordAndWritePosition(
    261           chunk()->graph()->SourcePositionToScriptPosition(value->position()));
    262 
    263       Comment(
    264           ";;; <@%d,#%d> "
    265           "-------------------- Deferred %s --------------------",
    266           code->instruction_index(), code->instr()->hydrogen_value()->id(),
    267           code->instr()->Mnemonic());
    268       __ bind(code->entry());
    269       if (NeedsDeferredFrame()) {
    270         Comment(";;; Build frame");
    271         DCHECK(!frame_is_built_);
    272         DCHECK(info()->IsStub());
    273         frame_is_built_ = true;
    274         __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
    275         __ PushFixedFrame(scratch0());
    276         __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
    277         Comment(";;; Deferred code");
    278       }
    279       code->Generate();
    280       if (NeedsDeferredFrame()) {
    281         Comment(";;; Destroy frame");
    282         DCHECK(frame_is_built_);
    283         __ PopFixedFrame(ip);
    284         frame_is_built_ = false;
    285       }
    286       __ b(code->exit());
    287     }
    288   }
    289 
    290   return !is_aborted();
    291 }
    292 
    293 
    294 bool LCodeGen::GenerateJumpTable() {
    295   // Check that the jump table is accessible from everywhere in the function
    296   // code, i.e. that offsets to the table can be encoded in the 24bit signed
    297   // immediate of a branch instruction.
    298   // To simplify we consider the code size from the first instruction to the
    299   // end of the jump table. We also don't consider the pc load delta.
    300   // Each entry in the jump table generates one instruction and inlines one
    301   // 32bit data after it.
    302   if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
    303                 jump_table_.length() * 7)) {
    304     Abort(kGeneratedCodeIsTooLarge);
    305   }
    306 
    307   if (jump_table_.length() > 0) {
    308     Label needs_frame, call_deopt_entry;
    309 
    310     Comment(";;; -------------------- Jump table --------------------");
    311     Address base = jump_table_[0].address;
    312 
    313     Register entry_offset = scratch0();
    314 
    315     int length = jump_table_.length();
    316     for (int i = 0; i < length; i++) {
    317       Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
    318       __ bind(&table_entry->label);
    319 
    320       DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
    321       Address entry = table_entry->address;
    322       DeoptComment(table_entry->deopt_info);
    323 
    324       // Second-level deopt table entries are contiguous and small, so instead
    325       // of loading the full, absolute address of each one, load an immediate
    326       // offset which will be added to the base address later.
    327       __ mov(entry_offset, Operand(entry - base));
    328 
    329       if (table_entry->needs_frame) {
    330         DCHECK(!info()->saves_caller_doubles());
    331         Comment(";;; call deopt with frame");
    332         __ PushFixedFrame();
    333         __ b(&needs_frame, SetLK);
    334       } else {
    335         __ b(&call_deopt_entry, SetLK);
    336       }
    337       info()->LogDeoptCallPosition(masm()->pc_offset(),
    338                                    table_entry->deopt_info.inlining_id);
    339     }
    340 
    341     if (needs_frame.is_linked()) {
    342       __ bind(&needs_frame);
    343       // This variant of deopt can only be used with stubs. Since we don't
    344       // have a function pointer to install in the stack frame that we're
    345       // building, install a special marker there instead.
    346       DCHECK(info()->IsStub());
    347       __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
    348       __ push(ip);
    349       __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
    350     }
    351 
    352     Comment(";;; call deopt");
    353     __ bind(&call_deopt_entry);
    354 
    355     if (info()->saves_caller_doubles()) {
    356       DCHECK(info()->IsStub());
    357       RestoreCallerDoubles();
    358     }
    359 
    360     // Add the base address to the offset previously loaded in entry_offset.
    361     __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
    362     __ add(ip, entry_offset, ip);
    363     __ Jump(ip);
    364   }
    365 
    366   // The deoptimization jump table is the last part of the instruction
    367   // sequence. Mark the generated code as done unless we bailed out.
    368   if (!is_aborted()) status_ = DONE;
    369   return !is_aborted();
    370 }
    371 
    372 
    373 bool LCodeGen::GenerateSafepointTable() {
    374   DCHECK(is_done());
    375   safepoints_.Emit(masm(), GetStackSlotCount());
    376   return !is_aborted();
    377 }
    378 
    379 
    380 Register LCodeGen::ToRegister(int code) const {
    381   return Register::from_code(code);
    382 }
    383 
    384 
    385 DoubleRegister LCodeGen::ToDoubleRegister(int code) const {
    386   return DoubleRegister::from_code(code);
    387 }
    388 
    389 
    390 Register LCodeGen::ToRegister(LOperand* op) const {
    391   DCHECK(op->IsRegister());
    392   return ToRegister(op->index());
    393 }
    394 
    395 
    396 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
    397   if (op->IsRegister()) {
    398     return ToRegister(op->index());
    399   } else if (op->IsConstantOperand()) {
    400     LConstantOperand* const_op = LConstantOperand::cast(op);
    401     HConstant* constant = chunk_->LookupConstant(const_op);
    402     Handle<Object> literal = constant->handle(isolate());
    403     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    404     if (r.IsInteger32()) {
    405       AllowDeferredHandleDereference get_number;
    406       DCHECK(literal->IsNumber());
    407       __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
    408     } else if (r.IsDouble()) {
    409       Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
    410     } else {
    411       DCHECK(r.IsSmiOrTagged());
    412       __ Move(scratch, literal);
    413     }
    414     return scratch;
    415   } else if (op->IsStackSlot()) {
    416     __ LoadP(scratch, ToMemOperand(op));
    417     return scratch;
    418   }
    419   UNREACHABLE();
    420   return scratch;
    421 }
    422 
    423 
    424 void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
    425                                        Register dst) {
    426   DCHECK(IsInteger32(const_op));
    427   HConstant* constant = chunk_->LookupConstant(const_op);
    428   int32_t value = constant->Integer32Value();
    429   if (IsSmi(const_op)) {
    430     __ LoadSmiLiteral(dst, Smi::FromInt(value));
    431   } else {
    432     __ LoadIntLiteral(dst, value);
    433   }
    434 }
    435 
    436 
    437 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
    438   DCHECK(op->IsDoubleRegister());
    439   return ToDoubleRegister(op->index());
    440 }
    441 
    442 
    443 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
    444   HConstant* constant = chunk_->LookupConstant(op);
    445   DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
    446   return constant->handle(isolate());
    447 }
    448 
    449 
    450 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
    451   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
    452 }
    453 
    454 
    455 bool LCodeGen::IsSmi(LConstantOperand* op) const {
    456   return chunk_->LookupLiteralRepresentation(op).IsSmi();
    457 }
    458 
    459 
    460 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
    461   return ToRepresentation(op, Representation::Integer32());
    462 }
    463 
    464 
    465 intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
    466                                     const Representation& r) const {
    467   HConstant* constant = chunk_->LookupConstant(op);
    468   int32_t value = constant->Integer32Value();
    469   if (r.IsInteger32()) return value;
    470   DCHECK(r.IsSmiOrTagged());
    471   return reinterpret_cast<intptr_t>(Smi::FromInt(value));
    472 }
    473 
    474 
    475 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
    476   HConstant* constant = chunk_->LookupConstant(op);
    477   return Smi::FromInt(constant->Integer32Value());
    478 }
    479 
    480 
    481 double LCodeGen::ToDouble(LConstantOperand* op) const {
    482   HConstant* constant = chunk_->LookupConstant(op);
    483   DCHECK(constant->HasDoubleValue());
    484   return constant->DoubleValue();
    485 }
    486 
    487 
    488 Operand LCodeGen::ToOperand(LOperand* op) {
    489   if (op->IsConstantOperand()) {
    490     LConstantOperand* const_op = LConstantOperand::cast(op);
    491     HConstant* constant = chunk()->LookupConstant(const_op);
    492     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    493     if (r.IsSmi()) {
    494       DCHECK(constant->HasSmiValue());
    495       return Operand(Smi::FromInt(constant->Integer32Value()));
    496     } else if (r.IsInteger32()) {
    497       DCHECK(constant->HasInteger32Value());
    498       return Operand(constant->Integer32Value());
    499     } else if (r.IsDouble()) {
    500       Abort(kToOperandUnsupportedDoubleImmediate);
    501     }
    502     DCHECK(r.IsTagged());
    503     return Operand(constant->handle(isolate()));
    504   } else if (op->IsRegister()) {
    505     return Operand(ToRegister(op));
    506   } else if (op->IsDoubleRegister()) {
    507     Abort(kToOperandIsDoubleRegisterUnimplemented);
    508     return Operand::Zero();
    509   }
    510   // Stack slots not implemented, use ToMemOperand instead.
    511   UNREACHABLE();
    512   return Operand::Zero();
    513 }
    514 
    515 
    516 static int ArgumentsOffsetWithoutFrame(int index) {
    517   DCHECK(index < 0);
    518   return -(index + 1) * kPointerSize;
    519 }
    520 
    521 
    522 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
    523   DCHECK(!op->IsRegister());
    524   DCHECK(!op->IsDoubleRegister());
    525   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
    526   if (NeedsEagerFrame()) {
    527     return MemOperand(fp, StackSlotOffset(op->index()));
    528   } else {
    529     // Retrieve parameter without eager stack-frame relative to the
    530     // stack-pointer.
    531     return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
    532   }
    533 }
    534 
    535 
    536 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
    537   DCHECK(op->IsDoubleStackSlot());
    538   if (NeedsEagerFrame()) {
    539     return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
    540   } else {
    541     // Retrieve parameter without eager stack-frame relative to the
    542     // stack-pointer.
    543     return MemOperand(sp,
    544                       ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
    545   }
    546 }
    547 
    548 
    549 void LCodeGen::WriteTranslation(LEnvironment* environment,
    550                                 Translation* translation) {
    551   if (environment == NULL) return;
    552 
    553   // The translation includes one command per value in the environment.
    554   int translation_size = environment->translation_size();
    555 
    556   WriteTranslation(environment->outer(), translation);
    557   WriteTranslationFrame(environment, translation);
    558 
    559   int object_index = 0;
    560   int dematerialized_index = 0;
    561   for (int i = 0; i < translation_size; ++i) {
    562     LOperand* value = environment->values()->at(i);
    563     AddToTranslation(
    564         environment, translation, value, environment->HasTaggedValueAt(i),
    565         environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
    566   }
    567 }
    568 
    569 
    570 void LCodeGen::AddToTranslation(LEnvironment* environment,
    571                                 Translation* translation, LOperand* op,
    572                                 bool is_tagged, bool is_uint32,
    573                                 int* object_index_pointer,
    574                                 int* dematerialized_index_pointer) {
    575   if (op == LEnvironment::materialization_marker()) {
    576     int object_index = (*object_index_pointer)++;
    577     if (environment->ObjectIsDuplicateAt(object_index)) {
    578       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
    579       translation->DuplicateObject(dupe_of);
    580       return;
    581     }
    582     int object_length = environment->ObjectLengthAt(object_index);
    583     if (environment->ObjectIsArgumentsAt(object_index)) {
    584       translation->BeginArgumentsObject(object_length);
    585     } else {
    586       translation->BeginCapturedObject(object_length);
    587     }
    588     int dematerialized_index = *dematerialized_index_pointer;
    589     int env_offset = environment->translation_size() + dematerialized_index;
    590     *dematerialized_index_pointer += object_length;
    591     for (int i = 0; i < object_length; ++i) {
    592       LOperand* value = environment->values()->at(env_offset + i);
    593       AddToTranslation(environment, translation, value,
    594                        environment->HasTaggedValueAt(env_offset + i),
    595                        environment->HasUint32ValueAt(env_offset + i),
    596                        object_index_pointer, dematerialized_index_pointer);
    597     }
    598     return;
    599   }
    600 
    601   if (op->IsStackSlot()) {
    602     int index = op->index();
    603     if (index >= 0) {
    604       index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
    605     }
    606     if (is_tagged) {
    607       translation->StoreStackSlot(index);
    608     } else if (is_uint32) {
    609       translation->StoreUint32StackSlot(index);
    610     } else {
    611       translation->StoreInt32StackSlot(index);
    612     }
    613   } else if (op->IsDoubleStackSlot()) {
    614     int index = op->index();
    615     if (index >= 0) {
    616       index += StandardFrameConstants::kFixedFrameSize / kPointerSize;
    617     }
    618     translation->StoreDoubleStackSlot(index);
    619   } else if (op->IsRegister()) {
    620     Register reg = ToRegister(op);
    621     if (is_tagged) {
    622       translation->StoreRegister(reg);
    623     } else if (is_uint32) {
    624       translation->StoreUint32Register(reg);
    625     } else {
    626       translation->StoreInt32Register(reg);
    627     }
    628   } else if (op->IsDoubleRegister()) {
    629     DoubleRegister reg = ToDoubleRegister(op);
    630     translation->StoreDoubleRegister(reg);
    631   } else if (op->IsConstantOperand()) {
    632     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
    633     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
    634     translation->StoreLiteral(src_index);
    635   } else {
    636     UNREACHABLE();
    637   }
    638 }
    639 
    640 
    641 void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
    642                         LInstruction* instr) {
    643   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
    644 }
    645 
    646 
    647 void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
    648                                LInstruction* instr,
    649                                SafepointMode safepoint_mode) {
    650   DCHECK(instr != NULL);
    651   __ Call(code, mode);
    652   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
    653 
    654   // Signal that we don't inline smi code before these stubs in the
    655   // optimizing code generator.
    656   if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
    657     __ nop();
    658   }
    659 }
    660 
    661 
    662 void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
    663                            LInstruction* instr, SaveFPRegsMode save_doubles) {
    664   DCHECK(instr != NULL);
    665 
    666   __ CallRuntime(function, num_arguments, save_doubles);
    667 
    668   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
    669 }
    670 
    671 
    672 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
    673   if (context->IsRegister()) {
    674     __ Move(cp, ToRegister(context));
    675   } else if (context->IsStackSlot()) {
    676     __ LoadP(cp, ToMemOperand(context));
    677   } else if (context->IsConstantOperand()) {
    678     HConstant* constant =
    679         chunk_->LookupConstant(LConstantOperand::cast(context));
    680     __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
    681   } else {
    682     UNREACHABLE();
    683   }
    684 }
    685 
    686 
    687 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
    688                                        LInstruction* instr, LOperand* context) {
    689   LoadContextFromDeferred(context);
    690   __ CallRuntimeSaveDoubles(id);
    691   RecordSafepointWithRegisters(instr->pointer_map(), argc,
    692                                Safepoint::kNoLazyDeopt);
    693 }
    694 
    695 
    696 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
    697                                                     Safepoint::DeoptMode mode) {
    698   environment->set_has_been_used();
    699   if (!environment->HasBeenRegistered()) {
    700     // Physical stack frame layout:
    701     // -x ............. -4  0 ..................................... y
    702     // [incoming arguments] [spill slots] [pushed outgoing arguments]
    703 
    704     // Layout of the environment:
    705     // 0 ..................................................... size-1
    706     // [parameters] [locals] [expression stack including arguments]
    707 
    708     // Layout of the translation:
    709     // 0 ........................................................ size - 1 + 4
    710     // [expression stack including arguments] [locals] [4 words] [parameters]
    711     // |>------------  translation_size ------------<|
    712 
    713     int frame_count = 0;
    714     int jsframe_count = 0;
    715     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
    716       ++frame_count;
    717       if (e->frame_type() == JS_FUNCTION) {
    718         ++jsframe_count;
    719       }
    720     }
    721     Translation translation(&translations_, frame_count, jsframe_count, zone());
    722     WriteTranslation(environment, &translation);
    723     int deoptimization_index = deoptimizations_.length();
    724     int pc_offset = masm()->pc_offset();
    725     environment->Register(deoptimization_index, translation.index(),
    726                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
    727     deoptimizations_.Add(environment, zone());
    728   }
    729 }
    730 
    731 
    732 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
    733                             Deoptimizer::DeoptReason deopt_reason,
    734                             Deoptimizer::BailoutType bailout_type,
    735                             CRegister cr) {
    736   LEnvironment* environment = instr->environment();
    737   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
    738   DCHECK(environment->HasBeenRegistered());
    739   int id = environment->deoptimization_index();
    740   Address entry =
    741       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
    742   if (entry == NULL) {
    743     Abort(kBailoutWasNotPrepared);
    744     return;
    745   }
    746 
    747   if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
    748     CRegister alt_cr = cr6;
    749     Register scratch = scratch0();
    750     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
    751     Label no_deopt;
    752     DCHECK(!alt_cr.is(cr));
    753     __ Push(r4, scratch);
    754     __ mov(scratch, Operand(count));
    755     __ lwz(r4, MemOperand(scratch));
    756     __ subi(r4, r4, Operand(1));
    757     __ cmpi(r4, Operand::Zero(), alt_cr);
    758     __ bne(&no_deopt, alt_cr);
    759     __ li(r4, Operand(FLAG_deopt_every_n_times));
    760     __ stw(r4, MemOperand(scratch));
    761     __ Pop(r4, scratch);
    762 
    763     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
    764     __ bind(&no_deopt);
    765     __ stw(r4, MemOperand(scratch));
    766     __ Pop(r4, scratch);
    767   }
    768 
    769   if (info()->ShouldTrapOnDeopt()) {
    770     __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
    771   }
    772 
    773   Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason);
    774 
    775   DCHECK(info()->IsStub() || frame_is_built_);
    776   // Go through jump table if we need to handle condition, build frame, or
    777   // restore caller doubles.
    778   if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
    779     DeoptComment(deopt_info);
    780     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
    781     info()->LogDeoptCallPosition(masm()->pc_offset(), deopt_info.inlining_id);
    782   } else {
    783     Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
    784                                             !frame_is_built_);
    785     // We often have several deopts to the same entry, reuse the last
    786     // jump entry if this is the case.
    787     if (FLAG_trace_deopt || isolate()->cpu_profiler()->is_profiling() ||
    788         jump_table_.is_empty() ||
    789         !table_entry.IsEquivalentTo(jump_table_.last())) {
    790       jump_table_.Add(table_entry, zone());
    791     }
    792     __ b(cond, &jump_table_.last().label, cr);
    793   }
    794 }
    795 
    796 
    797 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
    798                             Deoptimizer::DeoptReason deopt_reason,
    799                             CRegister cr) {
    800   Deoptimizer::BailoutType bailout_type =
    801       info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
    802   DeoptimizeIf(condition, instr, deopt_reason, bailout_type, cr);
    803 }
    804 
    805 
    806 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
    807                                             SafepointMode safepoint_mode) {
    808   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
    809     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
    810   } else {
    811     DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
    812     RecordSafepointWithRegisters(instr->pointer_map(), 0,
    813                                  Safepoint::kLazyDeopt);
    814   }
    815 }
    816 
    817 
    818 void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
    819                                int arguments, Safepoint::DeoptMode deopt_mode) {
    820   DCHECK(expected_safepoint_kind_ == kind);
    821 
    822   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
    823   Safepoint safepoint =
    824       safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
    825   for (int i = 0; i < operands->length(); i++) {
    826     LOperand* pointer = operands->at(i);
    827     if (pointer->IsStackSlot()) {
    828       safepoint.DefinePointerSlot(pointer->index(), zone());
    829     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
    830       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
    831     }
    832   }
    833 }
    834 
    835 
    836 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
    837                                Safepoint::DeoptMode deopt_mode) {
    838   RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
    839 }
    840 
    841 
    842 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
    843   LPointerMap empty_pointers(zone());
    844   RecordSafepoint(&empty_pointers, deopt_mode);
    845 }
    846 
    847 
    848 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
    849                                             int arguments,
    850                                             Safepoint::DeoptMode deopt_mode) {
    851   RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
    852 }
    853 
    854 
    855 void LCodeGen::RecordAndWritePosition(int position) {
    856   if (position == RelocInfo::kNoPosition) return;
    857   masm()->positions_recorder()->RecordPosition(position);
    858   masm()->positions_recorder()->WriteRecordedPositions();
    859 }
    860 
    861 
    862 static const char* LabelType(LLabel* label) {
    863   if (label->is_loop_header()) return " (loop header)";
    864   if (label->is_osr_entry()) return " (OSR entry)";
    865   return "";
    866 }
    867 
    868 
    869 void LCodeGen::DoLabel(LLabel* label) {
    870   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
    871           current_instruction_, label->hydrogen_value()->id(),
    872           label->block_id(), LabelType(label));
    873   __ bind(label->label());
    874   current_block_ = label->block_id();
    875   DoGap(label);
    876 }
    877 
    878 
    879 void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
    880 
    881 
    882 void LCodeGen::DoGap(LGap* gap) {
    883   for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
    884        i++) {
    885     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
    886     LParallelMove* move = gap->GetParallelMove(inner_pos);
    887     if (move != NULL) DoParallelMove(move);
    888   }
    889 }
    890 
    891 
    892 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
    893 
    894 
    895 void LCodeGen::DoParameter(LParameter* instr) {
    896   // Nothing to do.
    897 }
    898 
    899 
    900 void LCodeGen::DoCallStub(LCallStub* instr) {
    901   DCHECK(ToRegister(instr->context()).is(cp));
    902   DCHECK(ToRegister(instr->result()).is(r3));
    903   switch (instr->hydrogen()->major_key()) {
    904     case CodeStub::RegExpExec: {
    905       RegExpExecStub stub(isolate());
    906       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    907       break;
    908     }
    909     case CodeStub::SubString: {
    910       SubStringStub stub(isolate());
    911       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
    912       break;
    913     }
    914     default:
    915       UNREACHABLE();
    916   }
    917 }
    918 
    919 
    920 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
    921   GenerateOsrPrologue();
    922 }
    923 
    924 
    925 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
    926   Register dividend = ToRegister(instr->dividend());
    927   int32_t divisor = instr->divisor();
    928   DCHECK(dividend.is(ToRegister(instr->result())));
    929 
    930   // Theoretically, a variation of the branch-free code for integer division by
    931   // a power of 2 (calculating the remainder via an additional multiplication
    932   // (which gets simplified to an 'and') and subtraction) should be faster, and
    933   // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
    934   // indicate that positive dividends are heavily favored, so the branching
    935   // version performs better.
    936   HMod* hmod = instr->hydrogen();
    937   int32_t shift = WhichPowerOf2Abs(divisor);
    938   Label dividend_is_not_negative, done;
    939   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
    940     __ cmpwi(dividend, Operand::Zero());
    941     __ bge(&dividend_is_not_negative);
    942     if (shift) {
    943       // Note that this is correct even for kMinInt operands.
    944       __ neg(dividend, dividend);
    945       __ ExtractBitRange(dividend, dividend, shift - 1, 0);
    946       __ neg(dividend, dividend, LeaveOE, SetRC);
    947       if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    948         DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
    949       }
    950     } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    951       __ li(dividend, Operand::Zero());
    952     } else {
    953       DeoptimizeIf(al, instr, Deoptimizer::kMinusZero);
    954     }
    955     __ b(&done);
    956   }
    957 
    958   __ bind(&dividend_is_not_negative);
    959   if (shift) {
    960     __ ExtractBitRange(dividend, dividend, shift - 1, 0);
    961   } else {
    962     __ li(dividend, Operand::Zero());
    963   }
    964   __ bind(&done);
    965 }
    966 
    967 
    968 void LCodeGen::DoModByConstI(LModByConstI* instr) {
    969   Register dividend = ToRegister(instr->dividend());
    970   int32_t divisor = instr->divisor();
    971   Register result = ToRegister(instr->result());
    972   DCHECK(!dividend.is(result));
    973 
    974   if (divisor == 0) {
    975     DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
    976     return;
    977   }
    978 
    979   __ TruncatingDiv(result, dividend, Abs(divisor));
    980   __ mov(ip, Operand(Abs(divisor)));
    981   __ mullw(result, result, ip);
    982   __ sub(result, dividend, result, LeaveOE, SetRC);
    983 
    984   // Check for negative zero.
    985   HMod* hmod = instr->hydrogen();
    986   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    987     Label remainder_not_zero;
    988     __ bne(&remainder_not_zero, cr0);
    989     __ cmpwi(dividend, Operand::Zero());
    990     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
    991     __ bind(&remainder_not_zero);
    992   }
    993 }
    994 
    995 
    996 void LCodeGen::DoModI(LModI* instr) {
    997   HMod* hmod = instr->hydrogen();
    998   Register left_reg = ToRegister(instr->left());
    999   Register right_reg = ToRegister(instr->right());
   1000   Register result_reg = ToRegister(instr->result());
   1001   Register scratch = scratch0();
   1002   bool can_overflow = hmod->CheckFlag(HValue::kCanOverflow);
   1003   Label done;
   1004 
   1005   if (can_overflow) {
   1006     __ li(r0, Operand::Zero());  // clear xer
   1007     __ mtxer(r0);
   1008   }
   1009 
   1010   __ divw(scratch, left_reg, right_reg, SetOE, SetRC);
   1011 
   1012   // Check for x % 0.
   1013   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
   1014     __ cmpwi(right_reg, Operand::Zero());
   1015     DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
   1016   }
   1017 
   1018   // Check for kMinInt % -1, divw will return undefined, which is not what we
   1019   // want. We have to deopt if we care about -0, because we can't return that.
   1020   if (can_overflow) {
   1021     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1022       DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0);
   1023     } else {
   1024       if (CpuFeatures::IsSupported(ISELECT)) {
   1025         __ isel(overflow, result_reg, r0, result_reg, cr0);
   1026         __ boverflow(&done, cr0);
   1027       } else {
   1028         Label no_overflow_possible;
   1029         __ bnooverflow(&no_overflow_possible, cr0);
   1030         __ li(result_reg, Operand::Zero());
   1031         __ b(&done);
   1032         __ bind(&no_overflow_possible);
   1033       }
   1034     }
   1035   }
   1036 
   1037   __ mullw(scratch, right_reg, scratch);
   1038   __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC);
   1039 
   1040   // If we care about -0, test if the dividend is <0 and the result is 0.
   1041   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1042     __ bne(&done, cr0);
   1043     __ cmpwi(left_reg, Operand::Zero());
   1044     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
   1045   }
   1046 
   1047   __ bind(&done);
   1048 }
   1049 
   1050 
   1051 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
   1052   Register dividend = ToRegister(instr->dividend());
   1053   int32_t divisor = instr->divisor();
   1054   Register result = ToRegister(instr->result());
   1055   DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
   1056   DCHECK(!result.is(dividend));
   1057 
   1058   // Check for (0 / -x) that will produce negative zero.
   1059   HDiv* hdiv = instr->hydrogen();
   1060   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1061     __ cmpwi(dividend, Operand::Zero());
   1062     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
   1063   }
   1064   // Check for (kMinInt / -1).
   1065   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
   1066     __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
   1067     __ cmpw(dividend, r0);
   1068     DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
   1069   }
   1070 
   1071   int32_t shift = WhichPowerOf2Abs(divisor);
   1072 
   1073   // Deoptimize if remainder will not be 0.
   1074   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
   1075     __ TestBitRange(dividend, shift - 1, 0, r0);
   1076     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0);
   1077   }
   1078 
   1079   if (divisor == -1) {  // Nice shortcut, not needed for correctness.
   1080     __ neg(result, dividend);
   1081     return;
   1082   }
   1083   if (shift == 0) {
   1084     __ mr(result, dividend);
   1085   } else {
   1086     if (shift == 1) {
   1087       __ srwi(result, dividend, Operand(31));
   1088     } else {
   1089       __ srawi(result, dividend, 31);
   1090       __ srwi(result, result, Operand(32 - shift));
   1091     }
   1092     __ add(result, dividend, result);
   1093     __ srawi(result, result, shift);
   1094   }
   1095   if (divisor < 0) __ neg(result, result);
   1096 }
   1097 
   1098 
   1099 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
   1100   Register dividend = ToRegister(instr->dividend());
   1101   int32_t divisor = instr->divisor();
   1102   Register result = ToRegister(instr->result());
   1103   DCHECK(!dividend.is(result));
   1104 
   1105   if (divisor == 0) {
   1106     DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
   1107     return;
   1108   }
   1109 
   1110   // Check for (0 / -x) that will produce negative zero.
   1111   HDiv* hdiv = instr->hydrogen();
   1112   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1113     __ cmpwi(dividend, Operand::Zero());
   1114     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
   1115   }
   1116 
   1117   __ TruncatingDiv(result, dividend, Abs(divisor));
   1118   if (divisor < 0) __ neg(result, result);
   1119 
   1120   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
   1121     Register scratch = scratch0();
   1122     __ mov(ip, Operand(divisor));
   1123     __ mullw(scratch, result, ip);
   1124     __ cmpw(scratch, dividend);
   1125     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
   1126   }
   1127 }
   1128 
   1129 
   1130 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
   1131 void LCodeGen::DoDivI(LDivI* instr) {
   1132   HBinaryOperation* hdiv = instr->hydrogen();
   1133   const Register dividend = ToRegister(instr->dividend());
   1134   const Register divisor = ToRegister(instr->divisor());
   1135   Register result = ToRegister(instr->result());
   1136   bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
   1137 
   1138   DCHECK(!dividend.is(result));
   1139   DCHECK(!divisor.is(result));
   1140 
   1141   if (can_overflow) {
   1142     __ li(r0, Operand::Zero());  // clear xer
   1143     __ mtxer(r0);
   1144   }
   1145 
   1146   __ divw(result, dividend, divisor, SetOE, SetRC);
   1147 
   1148   // Check for x / 0.
   1149   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1150     __ cmpwi(divisor, Operand::Zero());
   1151     DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
   1152   }
   1153 
   1154   // Check for (0 / -x) that will produce negative zero.
   1155   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1156     Label dividend_not_zero;
   1157     __ cmpwi(dividend, Operand::Zero());
   1158     __ bne(&dividend_not_zero);
   1159     __ cmpwi(divisor, Operand::Zero());
   1160     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
   1161     __ bind(&dividend_not_zero);
   1162   }
   1163 
   1164   // Check for (kMinInt / -1).
   1165   if (can_overflow) {
   1166     if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
   1167       DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
   1168     } else {
   1169       // When truncating, we want kMinInt / -1 = kMinInt.
   1170       if (CpuFeatures::IsSupported(ISELECT)) {
   1171         __ isel(overflow, result, dividend, result, cr0);
   1172       } else {
   1173         Label no_overflow_possible;
   1174         __ bnooverflow(&no_overflow_possible, cr0);
   1175         __ mr(result, dividend);
   1176         __ bind(&no_overflow_possible);
   1177       }
   1178     }
   1179   }
   1180 
   1181   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
   1182     // Deoptimize if remainder is not 0.
   1183     Register scratch = scratch0();
   1184     __ mullw(scratch, divisor, result);
   1185     __ cmpw(dividend, scratch);
   1186     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
   1187   }
   1188 }
   1189 
   1190 
   1191 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
   1192   HBinaryOperation* hdiv = instr->hydrogen();
   1193   Register dividend = ToRegister(instr->dividend());
   1194   Register result = ToRegister(instr->result());
   1195   int32_t divisor = instr->divisor();
   1196   bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
   1197 
   1198   // If the divisor is positive, things are easy: There can be no deopts and we
   1199   // can simply do an arithmetic right shift.
   1200   int32_t shift = WhichPowerOf2Abs(divisor);
   1201   if (divisor > 0) {
   1202     if (shift || !result.is(dividend)) {
   1203       __ srawi(result, dividend, shift);
   1204     }
   1205     return;
   1206   }
   1207 
   1208   // If the divisor is negative, we have to negate and handle edge cases.
   1209   OEBit oe = LeaveOE;
   1210 #if V8_TARGET_ARCH_PPC64
   1211   if (divisor == -1 && can_overflow) {
   1212     __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
   1213     __ cmpw(dividend, r0);
   1214     DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
   1215   }
   1216 #else
   1217   if (can_overflow) {
   1218     __ li(r0, Operand::Zero());  // clear xer
   1219     __ mtxer(r0);
   1220     oe = SetOE;
   1221   }
   1222 #endif
   1223 
   1224   __ neg(result, dividend, oe, SetRC);
   1225   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1226     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
   1227   }
   1228 
   1229 // If the negation could not overflow, simply shifting is OK.
   1230 #if !V8_TARGET_ARCH_PPC64
   1231   if (!can_overflow) {
   1232 #endif
   1233     if (shift) {
   1234       __ ShiftRightArithImm(result, result, shift);
   1235     }
   1236     return;
   1237 #if !V8_TARGET_ARCH_PPC64
   1238   }
   1239 
   1240   // Dividing by -1 is basically negation, unless we overflow.
   1241   if (divisor == -1) {
   1242     DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
   1243     return;
   1244   }
   1245 
   1246   Label overflow, done;
   1247   __ boverflow(&overflow, cr0);
   1248   __ srawi(result, result, shift);
   1249   __ b(&done);
   1250   __ bind(&overflow);
   1251   __ mov(result, Operand(kMinInt / divisor));
   1252   __ bind(&done);
   1253 #endif
   1254 }
   1255 
   1256 
   1257 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
   1258   Register dividend = ToRegister(instr->dividend());
   1259   int32_t divisor = instr->divisor();
   1260   Register result = ToRegister(instr->result());
   1261   DCHECK(!dividend.is(result));
   1262 
   1263   if (divisor == 0) {
   1264     DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
   1265     return;
   1266   }
   1267 
   1268   // Check for (0 / -x) that will produce negative zero.
   1269   HMathFloorOfDiv* hdiv = instr->hydrogen();
   1270   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1271     __ cmpwi(dividend, Operand::Zero());
   1272     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
   1273   }
   1274 
   1275   // Easy case: We need no dynamic check for the dividend and the flooring
   1276   // division is the same as the truncating division.
   1277   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
   1278       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
   1279     __ TruncatingDiv(result, dividend, Abs(divisor));
   1280     if (divisor < 0) __ neg(result, result);
   1281     return;
   1282   }
   1283 
   1284   // In the general case we may need to adjust before and after the truncating
   1285   // division to get a flooring division.
   1286   Register temp = ToRegister(instr->temp());
   1287   DCHECK(!temp.is(dividend) && !temp.is(result));
   1288   Label needs_adjustment, done;
   1289   __ cmpwi(dividend, Operand::Zero());
   1290   __ b(divisor > 0 ? lt : gt, &needs_adjustment);
   1291   __ TruncatingDiv(result, dividend, Abs(divisor));
   1292   if (divisor < 0) __ neg(result, result);
   1293   __ b(&done);
   1294   __ bind(&needs_adjustment);
   1295   __ addi(temp, dividend, Operand(divisor > 0 ? 1 : -1));
   1296   __ TruncatingDiv(result, temp, Abs(divisor));
   1297   if (divisor < 0) __ neg(result, result);
   1298   __ subi(result, result, Operand(1));
   1299   __ bind(&done);
   1300 }
   1301 
   1302 
   1303 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
   1304 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
   1305   HBinaryOperation* hdiv = instr->hydrogen();
   1306   const Register dividend = ToRegister(instr->dividend());
   1307   const Register divisor = ToRegister(instr->divisor());
   1308   Register result = ToRegister(instr->result());
   1309   bool can_overflow = hdiv->CheckFlag(HValue::kCanOverflow);
   1310 
   1311   DCHECK(!dividend.is(result));
   1312   DCHECK(!divisor.is(result));
   1313 
   1314   if (can_overflow) {
   1315     __ li(r0, Operand::Zero());  // clear xer
   1316     __ mtxer(r0);
   1317   }
   1318 
   1319   __ divw(result, dividend, divisor, SetOE, SetRC);
   1320 
   1321   // Check for x / 0.
   1322   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1323     __ cmpwi(divisor, Operand::Zero());
   1324     DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
   1325   }
   1326 
   1327   // Check for (0 / -x) that will produce negative zero.
   1328   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1329     Label dividend_not_zero;
   1330     __ cmpwi(dividend, Operand::Zero());
   1331     __ bne(&dividend_not_zero);
   1332     __ cmpwi(divisor, Operand::Zero());
   1333     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
   1334     __ bind(&dividend_not_zero);
   1335   }
   1336 
   1337   // Check for (kMinInt / -1).
   1338   if (can_overflow) {
   1339     if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
   1340       DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
   1341     } else {
   1342       // When truncating, we want kMinInt / -1 = kMinInt.
   1343       if (CpuFeatures::IsSupported(ISELECT)) {
   1344         __ isel(overflow, result, dividend, result, cr0);
   1345       } else {
   1346         Label no_overflow_possible;
   1347         __ bnooverflow(&no_overflow_possible, cr0);
   1348         __ mr(result, dividend);
   1349         __ bind(&no_overflow_possible);
   1350       }
   1351     }
   1352   }
   1353 
   1354   Label done;
   1355   Register scratch = scratch0();
   1356 // If both operands have the same sign then we are done.
   1357 #if V8_TARGET_ARCH_PPC64
   1358   __ xor_(scratch, dividend, divisor);
   1359   __ cmpwi(scratch, Operand::Zero());
   1360   __ bge(&done);
   1361 #else
   1362   __ xor_(scratch, dividend, divisor, SetRC);
   1363   __ bge(&done, cr0);
   1364 #endif
   1365 
   1366   // If there is no remainder then we are done.
   1367   __ mullw(scratch, divisor, result);
   1368   __ cmpw(dividend, scratch);
   1369   __ beq(&done);
   1370 
   1371   // We performed a truncating division. Correct the result.
   1372   __ subi(result, result, Operand(1));
   1373   __ bind(&done);
   1374 }
   1375 
   1376 
   1377 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
   1378   DoubleRegister addend = ToDoubleRegister(instr->addend());
   1379   DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
   1380   DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
   1381   DoubleRegister result = ToDoubleRegister(instr->result());
   1382 
   1383   __ fmadd(result, multiplier, multiplicand, addend);
   1384 }
   1385 
   1386 
   1387 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
   1388   DoubleRegister minuend = ToDoubleRegister(instr->minuend());
   1389   DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
   1390   DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
   1391   DoubleRegister result = ToDoubleRegister(instr->result());
   1392 
   1393   __ fmsub(result, multiplier, multiplicand, minuend);
   1394 }
   1395 
   1396 
   1397 void LCodeGen::DoMulI(LMulI* instr) {
   1398   Register scratch = scratch0();
   1399   Register result = ToRegister(instr->result());
   1400   // Note that result may alias left.
   1401   Register left = ToRegister(instr->left());
   1402   LOperand* right_op = instr->right();
   1403 
   1404   bool bailout_on_minus_zero =
   1405       instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
   1406   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1407 
   1408   if (right_op->IsConstantOperand()) {
   1409     int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
   1410 
   1411     if (bailout_on_minus_zero && (constant < 0)) {
   1412       // The case of a null constant will be handled separately.
   1413       // If constant is negative and left is null, the result should be -0.
   1414       __ cmpi(left, Operand::Zero());
   1415       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
   1416     }
   1417 
   1418     switch (constant) {
   1419       case -1:
   1420         if (can_overflow) {
   1421 #if V8_TARGET_ARCH_PPC64
   1422           if (instr->hydrogen()->representation().IsSmi()) {
   1423 #endif
   1424             __ li(r0, Operand::Zero());  // clear xer
   1425             __ mtxer(r0);
   1426             __ neg(result, left, SetOE, SetRC);
   1427             DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
   1428 #if V8_TARGET_ARCH_PPC64
   1429           } else {
   1430             __ neg(result, left);
   1431             __ TestIfInt32(result, r0);
   1432             DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
   1433           }
   1434 #endif
   1435         } else {
   1436           __ neg(result, left);
   1437         }
   1438         break;
   1439       case 0:
   1440         if (bailout_on_minus_zero) {
   1441 // If left is strictly negative and the constant is null, the
   1442 // result is -0. Deoptimize if required, otherwise return 0.
   1443 #if V8_TARGET_ARCH_PPC64
   1444           if (instr->hydrogen()->representation().IsSmi()) {
   1445 #endif
   1446             __ cmpi(left, Operand::Zero());
   1447 #if V8_TARGET_ARCH_PPC64
   1448           } else {
   1449             __ cmpwi(left, Operand::Zero());
   1450           }
   1451 #endif
   1452           DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
   1453         }
   1454         __ li(result, Operand::Zero());
   1455         break;
   1456       case 1:
   1457         __ Move(result, left);
   1458         break;
   1459       default:
   1460         // Multiplying by powers of two and powers of two plus or minus
   1461         // one can be done faster with shifted operands.
   1462         // For other constants we emit standard code.
   1463         int32_t mask = constant >> 31;
   1464         uint32_t constant_abs = (constant + mask) ^ mask;
   1465 
   1466         if (base::bits::IsPowerOfTwo32(constant_abs)) {
   1467           int32_t shift = WhichPowerOf2(constant_abs);
   1468           __ ShiftLeftImm(result, left, Operand(shift));
   1469           // Correct the sign of the result if the constant is negative.
   1470           if (constant < 0) __ neg(result, result);
   1471         } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
   1472           int32_t shift = WhichPowerOf2(constant_abs - 1);
   1473           __ ShiftLeftImm(scratch, left, Operand(shift));
   1474           __ add(result, scratch, left);
   1475           // Correct the sign of the result if the constant is negative.
   1476           if (constant < 0) __ neg(result, result);
   1477         } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
   1478           int32_t shift = WhichPowerOf2(constant_abs + 1);
   1479           __ ShiftLeftImm(scratch, left, Operand(shift));
   1480           __ sub(result, scratch, left);
   1481           // Correct the sign of the result if the constant is negative.
   1482           if (constant < 0) __ neg(result, result);
   1483         } else {
   1484           // Generate standard code.
   1485           __ mov(ip, Operand(constant));
   1486           __ Mul(result, left, ip);
   1487         }
   1488     }
   1489 
   1490   } else {
   1491     DCHECK(right_op->IsRegister());
   1492     Register right = ToRegister(right_op);
   1493 
   1494     if (can_overflow) {
   1495 #if V8_TARGET_ARCH_PPC64
   1496       // result = left * right.
   1497       if (instr->hydrogen()->representation().IsSmi()) {
   1498         __ SmiUntag(result, left);
   1499         __ SmiUntag(scratch, right);
   1500         __ Mul(result, result, scratch);
   1501       } else {
   1502         __ Mul(result, left, right);
   1503       }
   1504       __ TestIfInt32(result, r0);
   1505       DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
   1506       if (instr->hydrogen()->representation().IsSmi()) {
   1507         __ SmiTag(result);
   1508       }
   1509 #else
   1510       // scratch:result = left * right.
   1511       if (instr->hydrogen()->representation().IsSmi()) {
   1512         __ SmiUntag(result, left);
   1513         __ mulhw(scratch, result, right);
   1514         __ mullw(result, result, right);
   1515       } else {
   1516         __ mulhw(scratch, left, right);
   1517         __ mullw(result, left, right);
   1518       }
   1519       __ TestIfInt32(scratch, result, r0);
   1520       DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
   1521 #endif
   1522     } else {
   1523       if (instr->hydrogen()->representation().IsSmi()) {
   1524         __ SmiUntag(result, left);
   1525         __ Mul(result, result, right);
   1526       } else {
   1527         __ Mul(result, left, right);
   1528       }
   1529     }
   1530 
   1531     if (bailout_on_minus_zero) {
   1532       Label done;
   1533 #if V8_TARGET_ARCH_PPC64
   1534       if (instr->hydrogen()->representation().IsSmi()) {
   1535 #endif
   1536         __ xor_(r0, left, right, SetRC);
   1537         __ bge(&done, cr0);
   1538 #if V8_TARGET_ARCH_PPC64
   1539       } else {
   1540         __ xor_(r0, left, right);
   1541         __ cmpwi(r0, Operand::Zero());
   1542         __ bge(&done);
   1543       }
   1544 #endif
   1545       // Bail out if the result is minus zero.
   1546       __ cmpi(result, Operand::Zero());
   1547       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
   1548       __ bind(&done);
   1549     }
   1550   }
   1551 }
   1552 
   1553 
   1554 void LCodeGen::DoBitI(LBitI* instr) {
   1555   LOperand* left_op = instr->left();
   1556   LOperand* right_op = instr->right();
   1557   DCHECK(left_op->IsRegister());
   1558   Register left = ToRegister(left_op);
   1559   Register result = ToRegister(instr->result());
   1560   Operand right(no_reg);
   1561 
   1562   if (right_op->IsStackSlot()) {
   1563     right = Operand(EmitLoadRegister(right_op, ip));
   1564   } else {
   1565     DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
   1566     right = ToOperand(right_op);
   1567 
   1568     if (right_op->IsConstantOperand() && is_uint16(right.immediate())) {
   1569       switch (instr->op()) {
   1570         case Token::BIT_AND:
   1571           __ andi(result, left, right);
   1572           break;
   1573         case Token::BIT_OR:
   1574           __ ori(result, left, right);
   1575           break;
   1576         case Token::BIT_XOR:
   1577           __ xori(result, left, right);
   1578           break;
   1579         default:
   1580           UNREACHABLE();
   1581           break;
   1582       }
   1583       return;
   1584     }
   1585   }
   1586 
   1587   switch (instr->op()) {
   1588     case Token::BIT_AND:
   1589       __ And(result, left, right);
   1590       break;
   1591     case Token::BIT_OR:
   1592       __ Or(result, left, right);
   1593       break;
   1594     case Token::BIT_XOR:
   1595       if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
   1596         __ notx(result, left);
   1597       } else {
   1598         __ Xor(result, left, right);
   1599       }
   1600       break;
   1601     default:
   1602       UNREACHABLE();
   1603       break;
   1604   }
   1605 }
   1606 
   1607 
   1608 void LCodeGen::DoShiftI(LShiftI* instr) {
   1609   // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
   1610   // result may alias either of them.
   1611   LOperand* right_op = instr->right();
   1612   Register left = ToRegister(instr->left());
   1613   Register result = ToRegister(instr->result());
   1614   Register scratch = scratch0();
   1615   if (right_op->IsRegister()) {
   1616     // Mask the right_op operand.
   1617     __ andi(scratch, ToRegister(right_op), Operand(0x1F));
   1618     switch (instr->op()) {
   1619       case Token::ROR:
   1620         // rotate_right(a, b) == rotate_left(a, 32 - b)
   1621         __ subfic(scratch, scratch, Operand(32));
   1622         __ rotlw(result, left, scratch);
   1623         break;
   1624       case Token::SAR:
   1625         __ sraw(result, left, scratch);
   1626         break;
   1627       case Token::SHR:
   1628         if (instr->can_deopt()) {
   1629           __ srw(result, left, scratch, SetRC);
   1630 #if V8_TARGET_ARCH_PPC64
   1631           __ extsw(result, result, SetRC);
   1632 #endif
   1633           DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0);
   1634         } else {
   1635           __ srw(result, left, scratch);
   1636         }
   1637         break;
   1638       case Token::SHL:
   1639         __ slw(result, left, scratch);
   1640 #if V8_TARGET_ARCH_PPC64
   1641         __ extsw(result, result);
   1642 #endif
   1643         break;
   1644       default:
   1645         UNREACHABLE();
   1646         break;
   1647     }
   1648   } else {
   1649     // Mask the right_op operand.
   1650     int value = ToInteger32(LConstantOperand::cast(right_op));
   1651     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
   1652     switch (instr->op()) {
   1653       case Token::ROR:
   1654         if (shift_count != 0) {
   1655           __ rotrwi(result, left, shift_count);
   1656         } else {
   1657           __ Move(result, left);
   1658         }
   1659         break;
   1660       case Token::SAR:
   1661         if (shift_count != 0) {
   1662           __ srawi(result, left, shift_count);
   1663         } else {
   1664           __ Move(result, left);
   1665         }
   1666         break;
   1667       case Token::SHR:
   1668         if (shift_count != 0) {
   1669           __ srwi(result, left, Operand(shift_count));
   1670         } else {
   1671           if (instr->can_deopt()) {
   1672             __ cmpwi(left, Operand::Zero());
   1673             DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue);
   1674           }
   1675           __ Move(result, left);
   1676         }
   1677         break;
   1678       case Token::SHL:
   1679         if (shift_count != 0) {
   1680 #if V8_TARGET_ARCH_PPC64
   1681           if (instr->hydrogen_value()->representation().IsSmi()) {
   1682             __ sldi(result, left, Operand(shift_count));
   1683 #else
   1684           if (instr->hydrogen_value()->representation().IsSmi() &&
   1685               instr->can_deopt()) {
   1686             if (shift_count != 1) {
   1687               __ slwi(result, left, Operand(shift_count - 1));
   1688               __ SmiTagCheckOverflow(result, result, scratch);
   1689             } else {
   1690               __ SmiTagCheckOverflow(result, left, scratch);
   1691             }
   1692             DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
   1693 #endif
   1694           } else {
   1695             __ slwi(result, left, Operand(shift_count));
   1696 #if V8_TARGET_ARCH_PPC64
   1697             __ extsw(result, result);
   1698 #endif
   1699           }
   1700         } else {
   1701           __ Move(result, left);
   1702         }
   1703         break;
   1704       default:
   1705         UNREACHABLE();
   1706         break;
   1707     }
   1708   }
   1709 }
   1710 
   1711 
   1712 void LCodeGen::DoSubI(LSubI* instr) {
   1713   LOperand* right = instr->right();
   1714   Register left = ToRegister(instr->left());
   1715   Register result = ToRegister(instr->result());
   1716   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1717 #if V8_TARGET_ARCH_PPC64
   1718   const bool isInteger = !instr->hydrogen()->representation().IsSmi();
   1719 #else
   1720   const bool isInteger = false;
   1721 #endif
   1722   if (!can_overflow || isInteger) {
   1723     if (right->IsConstantOperand()) {
   1724       __ Add(result, left, -(ToOperand(right).immediate()), r0);
   1725     } else {
   1726       __ sub(result, left, EmitLoadRegister(right, ip));
   1727     }
   1728 #if V8_TARGET_ARCH_PPC64
   1729     if (can_overflow) {
   1730       __ TestIfInt32(result, r0);
   1731       DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
   1732     }
   1733 #endif
   1734   } else {
   1735     if (right->IsConstantOperand()) {
   1736       __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()),
   1737                                 scratch0(), r0);
   1738     } else {
   1739       __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
   1740                                 scratch0(), r0);
   1741     }
   1742     DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
   1743   }
   1744 }
   1745 
   1746 
   1747 void LCodeGen::DoRSubI(LRSubI* instr) {
   1748   LOperand* left = instr->left();
   1749   LOperand* right = instr->right();
   1750   LOperand* result = instr->result();
   1751 
   1752   DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
   1753          right->IsConstantOperand());
   1754 
   1755   Operand right_operand = ToOperand(right);
   1756   if (is_int16(right_operand.immediate())) {
   1757     __ subfic(ToRegister(result), ToRegister(left), right_operand);
   1758   } else {
   1759     __ mov(r0, right_operand);
   1760     __ sub(ToRegister(result), r0, ToRegister(left));
   1761   }
   1762 }
   1763 
   1764 
   1765 void LCodeGen::DoConstantI(LConstantI* instr) {
   1766   __ mov(ToRegister(instr->result()), Operand(instr->value()));
   1767 }
   1768 
   1769 
   1770 void LCodeGen::DoConstantS(LConstantS* instr) {
   1771   __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
   1772 }
   1773 
   1774 
   1775 void LCodeGen::DoConstantD(LConstantD* instr) {
   1776   DCHECK(instr->result()->IsDoubleRegister());
   1777   DoubleRegister result = ToDoubleRegister(instr->result());
   1778 #if V8_HOST_ARCH_IA32
   1779   // Need some crappy work-around for x87 sNaN -> qNaN breakage in simulator
   1780   // builds.
   1781   uint64_t bits = instr->bits();
   1782   if ((bits & V8_UINT64_C(0x7FF8000000000000)) ==
   1783       V8_UINT64_C(0x7FF0000000000000)) {
   1784     uint32_t lo = static_cast<uint32_t>(bits);
   1785     uint32_t hi = static_cast<uint32_t>(bits >> 32);
   1786     __ mov(ip, Operand(lo));
   1787     __ mov(scratch0(), Operand(hi));
   1788     __ MovInt64ToDouble(result, scratch0(), ip);
   1789     return;
   1790   }
   1791 #endif
   1792   double v = instr->value();
   1793   __ LoadDoubleLiteral(result, v, scratch0());
   1794 }
   1795 
   1796 
   1797 void LCodeGen::DoConstantE(LConstantE* instr) {
   1798   __ mov(ToRegister(instr->result()), Operand(instr->value()));
   1799 }
   1800 
   1801 
   1802 void LCodeGen::DoConstantT(LConstantT* instr) {
   1803   Handle<Object> object = instr->value(isolate());
   1804   AllowDeferredHandleDereference smi_check;
   1805   __ Move(ToRegister(instr->result()), object);
   1806 }
   1807 
   1808 
   1809 void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
   1810   Register result = ToRegister(instr->result());
   1811   Register map = ToRegister(instr->value());
   1812   __ EnumLength(result, map);
   1813 }
   1814 
   1815 
   1816 MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
   1817                                            String::Encoding encoding) {
   1818   if (index->IsConstantOperand()) {
   1819     int offset = ToInteger32(LConstantOperand::cast(index));
   1820     if (encoding == String::TWO_BYTE_ENCODING) {
   1821       offset *= kUC16Size;
   1822     }
   1823     STATIC_ASSERT(kCharSize == 1);
   1824     return FieldMemOperand(string, SeqString::kHeaderSize + offset);
   1825   }
   1826   Register scratch = scratch0();
   1827   DCHECK(!scratch.is(string));
   1828   DCHECK(!scratch.is(ToRegister(index)));
   1829   if (encoding == String::ONE_BYTE_ENCODING) {
   1830     __ add(scratch, string, ToRegister(index));
   1831   } else {
   1832     STATIC_ASSERT(kUC16Size == 2);
   1833     __ ShiftLeftImm(scratch, ToRegister(index), Operand(1));
   1834     __ add(scratch, string, scratch);
   1835   }
   1836   return FieldMemOperand(scratch, SeqString::kHeaderSize);
   1837 }
   1838 
   1839 
   1840 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
   1841   String::Encoding encoding = instr->hydrogen()->encoding();
   1842   Register string = ToRegister(instr->string());
   1843   Register result = ToRegister(instr->result());
   1844 
   1845   if (FLAG_debug_code) {
   1846     Register scratch = scratch0();
   1847     __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
   1848     __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   1849 
   1850     __ andi(scratch, scratch,
   1851             Operand(kStringRepresentationMask | kStringEncodingMask));
   1852     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1853     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1854     __ cmpi(scratch,
   1855             Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
   1856                                                           : two_byte_seq_type));
   1857     __ Check(eq, kUnexpectedStringType);
   1858   }
   1859 
   1860   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   1861   if (encoding == String::ONE_BYTE_ENCODING) {
   1862     __ lbz(result, operand);
   1863   } else {
   1864     __ lhz(result, operand);
   1865   }
   1866 }
   1867 
   1868 
   1869 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
   1870   String::Encoding encoding = instr->hydrogen()->encoding();
   1871   Register string = ToRegister(instr->string());
   1872   Register value = ToRegister(instr->value());
   1873 
   1874   if (FLAG_debug_code) {
   1875     Register index = ToRegister(instr->index());
   1876     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1877     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1878     int encoding_mask =
   1879         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
   1880             ? one_byte_seq_type
   1881             : two_byte_seq_type;
   1882     __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
   1883   }
   1884 
   1885   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   1886   if (encoding == String::ONE_BYTE_ENCODING) {
   1887     __ stb(value, operand);
   1888   } else {
   1889     __ sth(value, operand);
   1890   }
   1891 }
   1892 
   1893 
   1894 void LCodeGen::DoAddI(LAddI* instr) {
   1895   LOperand* right = instr->right();
   1896   Register left = ToRegister(instr->left());
   1897   Register result = ToRegister(instr->result());
   1898   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1899 #if V8_TARGET_ARCH_PPC64
   1900   const bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
   1901                            instr->hydrogen()->representation().IsExternal());
   1902 #else
   1903   const bool isInteger = false;
   1904 #endif
   1905 
   1906   if (!can_overflow || isInteger) {
   1907     if (right->IsConstantOperand()) {
   1908       __ Add(result, left, ToOperand(right).immediate(), r0);
   1909     } else {
   1910       __ add(result, left, EmitLoadRegister(right, ip));
   1911     }
   1912 #if V8_TARGET_ARCH_PPC64
   1913     if (can_overflow) {
   1914       __ TestIfInt32(result, r0);
   1915       DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
   1916     }
   1917 #endif
   1918   } else {
   1919     if (right->IsConstantOperand()) {
   1920       __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(),
   1921                                 scratch0(), r0);
   1922     } else {
   1923       __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
   1924                                 scratch0(), r0);
   1925     }
   1926     DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
   1927   }
   1928 }
   1929 
   1930 
   1931 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
   1932   LOperand* left = instr->left();
   1933   LOperand* right = instr->right();
   1934   HMathMinMax::Operation operation = instr->hydrogen()->operation();
   1935   Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
   1936   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
   1937     Register left_reg = ToRegister(left);
   1938     Register right_reg = EmitLoadRegister(right, ip);
   1939     Register result_reg = ToRegister(instr->result());
   1940     Label return_left, done;
   1941 #if V8_TARGET_ARCH_PPC64
   1942     if (instr->hydrogen_value()->representation().IsSmi()) {
   1943 #endif
   1944       __ cmp(left_reg, right_reg);
   1945 #if V8_TARGET_ARCH_PPC64
   1946     } else {
   1947       __ cmpw(left_reg, right_reg);
   1948     }
   1949 #endif
   1950     if (CpuFeatures::IsSupported(ISELECT)) {
   1951       __ isel(cond, result_reg, left_reg, right_reg);
   1952     } else {
   1953       __ b(cond, &return_left);
   1954       __ Move(result_reg, right_reg);
   1955       __ b(&done);
   1956       __ bind(&return_left);
   1957       __ Move(result_reg, left_reg);
   1958       __ bind(&done);
   1959     }
   1960   } else {
   1961     DCHECK(instr->hydrogen()->representation().IsDouble());
   1962     DoubleRegister left_reg = ToDoubleRegister(left);
   1963     DoubleRegister right_reg = ToDoubleRegister(right);
   1964     DoubleRegister result_reg = ToDoubleRegister(instr->result());
   1965     Label check_nan_left, check_zero, return_left, return_right, done;
   1966     __ fcmpu(left_reg, right_reg);
   1967     __ bunordered(&check_nan_left);
   1968     __ beq(&check_zero);
   1969     __ b(cond, &return_left);
   1970     __ b(&return_right);
   1971 
   1972     __ bind(&check_zero);
   1973     __ fcmpu(left_reg, kDoubleRegZero);
   1974     __ bne(&return_left);  // left == right != 0.
   1975 
   1976     // At this point, both left and right are either 0 or -0.
   1977     // N.B. The following works because +0 + -0 == +0
   1978     if (operation == HMathMinMax::kMathMin) {
   1979       // For min we want logical-or of sign bit: -(-L + -R)
   1980       __ fneg(left_reg, left_reg);
   1981       __ fsub(result_reg, left_reg, right_reg);
   1982       __ fneg(result_reg, result_reg);
   1983     } else {
   1984       // For max we want logical-and of sign bit: (L + R)
   1985       __ fadd(result_reg, left_reg, right_reg);
   1986     }
   1987     __ b(&done);
   1988 
   1989     __ bind(&check_nan_left);
   1990     __ fcmpu(left_reg, left_reg);
   1991     __ bunordered(&return_left);  // left == NaN.
   1992 
   1993     __ bind(&return_right);
   1994     if (!right_reg.is(result_reg)) {
   1995       __ fmr(result_reg, right_reg);
   1996     }
   1997     __ b(&done);
   1998 
   1999     __ bind(&return_left);
   2000     if (!left_reg.is(result_reg)) {
   2001       __ fmr(result_reg, left_reg);
   2002     }
   2003     __ bind(&done);
   2004   }
   2005 }
   2006 
   2007 
   2008 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
   2009   DoubleRegister left = ToDoubleRegister(instr->left());
   2010   DoubleRegister right = ToDoubleRegister(instr->right());
   2011   DoubleRegister result = ToDoubleRegister(instr->result());
   2012   switch (instr->op()) {
   2013     case Token::ADD:
   2014       __ fadd(result, left, right);
   2015       break;
   2016     case Token::SUB:
   2017       __ fsub(result, left, right);
   2018       break;
   2019     case Token::MUL:
   2020       __ fmul(result, left, right);
   2021       break;
   2022     case Token::DIV:
   2023       __ fdiv(result, left, right);
   2024       break;
   2025     case Token::MOD: {
   2026       __ PrepareCallCFunction(0, 2, scratch0());
   2027       __ MovToFloatParameters(left, right);
   2028       __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
   2029                        0, 2);
   2030       // Move the result in the double result register.
   2031       __ MovFromFloatResult(result);
   2032       break;
   2033     }
   2034     default:
   2035       UNREACHABLE();
   2036       break;
   2037   }
   2038 }
   2039 
   2040 
   2041 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
   2042   DCHECK(ToRegister(instr->context()).is(cp));
   2043   DCHECK(ToRegister(instr->left()).is(r4));
   2044   DCHECK(ToRegister(instr->right()).is(r3));
   2045   DCHECK(ToRegister(instr->result()).is(r3));
   2046 
   2047   Handle<Code> code =
   2048       CodeFactory::BinaryOpIC(isolate(), instr->op(), instr->strength()).code();
   2049   CallCode(code, RelocInfo::CODE_TARGET, instr);
   2050 }
   2051 
   2052 
   2053 template <class InstrType>
   2054 void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
   2055   int left_block = instr->TrueDestination(chunk_);
   2056   int right_block = instr->FalseDestination(chunk_);
   2057 
   2058   int next_block = GetNextEmittedBlock();
   2059 
   2060   if (right_block == left_block || cond == al) {
   2061     EmitGoto(left_block);
   2062   } else if (left_block == next_block) {
   2063     __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block), cr);
   2064   } else if (right_block == next_block) {
   2065     __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
   2066   } else {
   2067     __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
   2068     __ b(chunk_->GetAssemblyLabel(right_block));
   2069   }
   2070 }
   2071 
   2072 
   2073 template <class InstrType>
   2074 void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond, CRegister cr) {
   2075   int true_block = instr->TrueDestination(chunk_);
   2076   __ b(cond, chunk_->GetAssemblyLabel(true_block), cr);
   2077 }
   2078 
   2079 
   2080 template <class InstrType>
   2081 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) {
   2082   int false_block = instr->FalseDestination(chunk_);
   2083   __ b(cond, chunk_->GetAssemblyLabel(false_block), cr);
   2084 }
   2085 
   2086 
   2087 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
   2088 
   2089 
   2090 void LCodeGen::DoBranch(LBranch* instr) {
   2091   Representation r = instr->hydrogen()->value()->representation();
   2092   DoubleRegister dbl_scratch = double_scratch0();
   2093   const uint crZOrNaNBits = (1 << (31 - Assembler::encode_crbit(cr7, CR_EQ)) |
   2094                              1 << (31 - Assembler::encode_crbit(cr7, CR_FU)));
   2095 
   2096   if (r.IsInteger32()) {
   2097     DCHECK(!info()->IsStub());
   2098     Register reg = ToRegister(instr->value());
   2099     __ cmpwi(reg, Operand::Zero());
   2100     EmitBranch(instr, ne);
   2101   } else if (r.IsSmi()) {
   2102     DCHECK(!info()->IsStub());
   2103     Register reg = ToRegister(instr->value());
   2104     __ cmpi(reg, Operand::Zero());
   2105     EmitBranch(instr, ne);
   2106   } else if (r.IsDouble()) {
   2107     DCHECK(!info()->IsStub());
   2108     DoubleRegister reg = ToDoubleRegister(instr->value());
   2109     // Test the double value. Zero and NaN are false.
   2110     __ fcmpu(reg, kDoubleRegZero, cr7);
   2111     __ mfcr(r0);
   2112     __ andi(r0, r0, Operand(crZOrNaNBits));
   2113     EmitBranch(instr, eq, cr0);
   2114   } else {
   2115     DCHECK(r.IsTagged());
   2116     Register reg = ToRegister(instr->value());
   2117     HType type = instr->hydrogen()->value()->type();
   2118     if (type.IsBoolean()) {
   2119       DCHECK(!info()->IsStub());
   2120       __ CompareRoot(reg, Heap::kTrueValueRootIndex);
   2121       EmitBranch(instr, eq);
   2122     } else if (type.IsSmi()) {
   2123       DCHECK(!info()->IsStub());
   2124       __ cmpi(reg, Operand::Zero());
   2125       EmitBranch(instr, ne);
   2126     } else if (type.IsJSArray()) {
   2127       DCHECK(!info()->IsStub());
   2128       EmitBranch(instr, al);
   2129     } else if (type.IsHeapNumber()) {
   2130       DCHECK(!info()->IsStub());
   2131       __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
   2132       // Test the double value. Zero and NaN are false.
   2133       __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
   2134       __ mfcr(r0);
   2135       __ andi(r0, r0, Operand(crZOrNaNBits));
   2136       EmitBranch(instr, eq, cr0);
   2137     } else if (type.IsString()) {
   2138       DCHECK(!info()->IsStub());
   2139       __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
   2140       __ cmpi(ip, Operand::Zero());
   2141       EmitBranch(instr, ne);
   2142     } else {
   2143       ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
   2144       // Avoid deopts in the case where we've never executed this path before.
   2145       if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
   2146 
   2147       if (expected.Contains(ToBooleanStub::UNDEFINED)) {
   2148         // undefined -> false.
   2149         __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
   2150         __ beq(instr->FalseLabel(chunk_));
   2151       }
   2152       if (expected.Contains(ToBooleanStub::BOOLEAN)) {
   2153         // Boolean -> its value.
   2154         __ CompareRoot(reg, Heap::kTrueValueRootIndex);
   2155         __ beq(instr->TrueLabel(chunk_));
   2156         __ CompareRoot(reg, Heap::kFalseValueRootIndex);
   2157         __ beq(instr->FalseLabel(chunk_));
   2158       }
   2159       if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
   2160         // 'null' -> false.
   2161         __ CompareRoot(reg, Heap::kNullValueRootIndex);
   2162         __ beq(instr->FalseLabel(chunk_));
   2163       }
   2164 
   2165       if (expected.Contains(ToBooleanStub::SMI)) {
   2166         // Smis: 0 -> false, all other -> true.
   2167         __ cmpi(reg, Operand::Zero());
   2168         __ beq(instr->FalseLabel(chunk_));
   2169         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
   2170       } else if (expected.NeedsMap()) {
   2171         // If we need a map later and have a Smi -> deopt.
   2172         __ TestIfSmi(reg, r0);
   2173         DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
   2174       }
   2175 
   2176       const Register map = scratch0();
   2177       if (expected.NeedsMap()) {
   2178         __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
   2179 
   2180         if (expected.CanBeUndetectable()) {
   2181           // Undetectable -> false.
   2182           __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset));
   2183           __ TestBit(ip, Map::kIsUndetectable, r0);
   2184           __ bne(instr->FalseLabel(chunk_), cr0);
   2185         }
   2186       }
   2187 
   2188       if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
   2189         // spec object -> true.
   2190         __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
   2191         __ bge(instr->TrueLabel(chunk_));
   2192       }
   2193 
   2194       if (expected.Contains(ToBooleanStub::STRING)) {
   2195         // String value -> false iff empty.
   2196         Label not_string;
   2197         __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
   2198         __ bge(&not_string);
   2199         __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
   2200         __ cmpi(ip, Operand::Zero());
   2201         __ bne(instr->TrueLabel(chunk_));
   2202         __ b(instr->FalseLabel(chunk_));
   2203         __ bind(&not_string);
   2204       }
   2205 
   2206       if (expected.Contains(ToBooleanStub::SYMBOL)) {
   2207         // Symbol value -> true.
   2208         __ CompareInstanceType(map, ip, SYMBOL_TYPE);
   2209         __ beq(instr->TrueLabel(chunk_));
   2210       }
   2211 
   2212       if (expected.Contains(ToBooleanStub::SIMD_VALUE)) {
   2213         // SIMD value -> true.
   2214         Label not_simd;
   2215         __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
   2216         __ beq(instr->TrueLabel(chunk_));
   2217       }
   2218 
   2219       if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
   2220         // heap number -> false iff +0, -0, or NaN.
   2221         Label not_heap_number;
   2222         __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
   2223         __ bne(&not_heap_number);
   2224         __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
   2225         // Test the double value. Zero and NaN are false.
   2226         __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
   2227         __ mfcr(r0);
   2228         __ andi(r0, r0, Operand(crZOrNaNBits));
   2229         __ bne(instr->FalseLabel(chunk_), cr0);
   2230         __ b(instr->TrueLabel(chunk_));
   2231         __ bind(&not_heap_number);
   2232       }
   2233 
   2234       if (!expected.IsGeneric()) {
   2235         // We've seen something for the first time -> deopt.
   2236         // This can only happen if we are not generic already.
   2237         DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
   2238       }
   2239     }
   2240   }
   2241 }
   2242 
   2243 
   2244 void LCodeGen::EmitGoto(int block) {
   2245   if (!IsNextEmittedBlock(block)) {
   2246     __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
   2247   }
   2248 }
   2249 
   2250 
   2251 void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
   2252 
   2253 
   2254 Condition LCodeGen::TokenToCondition(Token::Value op) {
   2255   Condition cond = kNoCondition;
   2256   switch (op) {
   2257     case Token::EQ:
   2258     case Token::EQ_STRICT:
   2259       cond = eq;
   2260       break;
   2261     case Token::NE:
   2262     case Token::NE_STRICT:
   2263       cond = ne;
   2264       break;
   2265     case Token::LT:
   2266       cond = lt;
   2267       break;
   2268     case Token::GT:
   2269       cond = gt;
   2270       break;
   2271     case Token::LTE:
   2272       cond = le;
   2273       break;
   2274     case Token::GTE:
   2275       cond = ge;
   2276       break;
   2277     case Token::IN:
   2278     case Token::INSTANCEOF:
   2279     default:
   2280       UNREACHABLE();
   2281   }
   2282   return cond;
   2283 }
   2284 
   2285 
   2286 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
   2287   LOperand* left = instr->left();
   2288   LOperand* right = instr->right();
   2289   bool is_unsigned =
   2290       instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
   2291       instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
   2292   Condition cond = TokenToCondition(instr->op());
   2293 
   2294   if (left->IsConstantOperand() && right->IsConstantOperand()) {
   2295     // We can statically evaluate the comparison.
   2296     double left_val = ToDouble(LConstantOperand::cast(left));
   2297     double right_val = ToDouble(LConstantOperand::cast(right));
   2298     int next_block = EvalComparison(instr->op(), left_val, right_val)
   2299                          ? instr->TrueDestination(chunk_)
   2300                          : instr->FalseDestination(chunk_);
   2301     EmitGoto(next_block);
   2302   } else {
   2303     if (instr->is_double()) {
   2304       // Compare left and right operands as doubles and load the
   2305       // resulting flags into the normal status register.
   2306       __ fcmpu(ToDoubleRegister(left), ToDoubleRegister(right));
   2307       // If a NaN is involved, i.e. the result is unordered,
   2308       // jump to false block label.
   2309       __ bunordered(instr->FalseLabel(chunk_));
   2310     } else {
   2311       if (right->IsConstantOperand()) {
   2312         int32_t value = ToInteger32(LConstantOperand::cast(right));
   2313         if (instr->hydrogen_value()->representation().IsSmi()) {
   2314           if (is_unsigned) {
   2315             __ CmplSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
   2316           } else {
   2317             __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
   2318           }
   2319         } else {
   2320           if (is_unsigned) {
   2321             __ Cmplwi(ToRegister(left), Operand(value), r0);
   2322           } else {
   2323             __ Cmpwi(ToRegister(left), Operand(value), r0);
   2324           }
   2325         }
   2326       } else if (left->IsConstantOperand()) {
   2327         int32_t value = ToInteger32(LConstantOperand::cast(left));
   2328         if (instr->hydrogen_value()->representation().IsSmi()) {
   2329           if (is_unsigned) {
   2330             __ CmplSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
   2331           } else {
   2332             __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
   2333           }
   2334         } else {
   2335           if (is_unsigned) {
   2336             __ Cmplwi(ToRegister(right), Operand(value), r0);
   2337           } else {
   2338             __ Cmpwi(ToRegister(right), Operand(value), r0);
   2339           }
   2340         }
   2341         // We commuted the operands, so commute the condition.
   2342         cond = CommuteCondition(cond);
   2343       } else if (instr->hydrogen_value()->representation().IsSmi()) {
   2344         if (is_unsigned) {
   2345           __ cmpl(ToRegister(left), ToRegister(right));
   2346         } else {
   2347           __ cmp(ToRegister(left), ToRegister(right));
   2348         }
   2349       } else {
   2350         if (is_unsigned) {
   2351           __ cmplw(ToRegister(left), ToRegister(right));
   2352         } else {
   2353           __ cmpw(ToRegister(left), ToRegister(right));
   2354         }
   2355       }
   2356     }
   2357     EmitBranch(instr, cond);
   2358   }
   2359 }
   2360 
   2361 
   2362 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   2363   Register left = ToRegister(instr->left());
   2364   Register right = ToRegister(instr->right());
   2365 
   2366   __ cmp(left, right);
   2367   EmitBranch(instr, eq);
   2368 }
   2369 
   2370 
   2371 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
   2372   if (instr->hydrogen()->representation().IsTagged()) {
   2373     Register input_reg = ToRegister(instr->object());
   2374     __ mov(ip, Operand(factory()->the_hole_value()));
   2375     __ cmp(input_reg, ip);
   2376     EmitBranch(instr, eq);
   2377     return;
   2378   }
   2379 
   2380   DoubleRegister input_reg = ToDoubleRegister(instr->object());
   2381   __ fcmpu(input_reg, input_reg);
   2382   EmitFalseBranch(instr, ordered);
   2383 
   2384   Register scratch = scratch0();
   2385   __ MovDoubleHighToInt(scratch, input_reg);
   2386   __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
   2387   EmitBranch(instr, eq);
   2388 }
   2389 
   2390 
   2391 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
   2392   Representation rep = instr->hydrogen()->value()->representation();
   2393   DCHECK(!rep.IsInteger32());
   2394   Register scratch = ToRegister(instr->temp());
   2395 
   2396   if (rep.IsDouble()) {
   2397     DoubleRegister value = ToDoubleRegister(instr->value());
   2398     __ fcmpu(value, kDoubleRegZero);
   2399     EmitFalseBranch(instr, ne);
   2400 #if V8_TARGET_ARCH_PPC64
   2401     __ MovDoubleToInt64(scratch, value);
   2402 #else
   2403     __ MovDoubleHighToInt(scratch, value);
   2404 #endif
   2405     __ cmpi(scratch, Operand::Zero());
   2406     EmitBranch(instr, lt);
   2407   } else {
   2408     Register value = ToRegister(instr->value());
   2409     __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
   2410                 instr->FalseLabel(chunk()), DO_SMI_CHECK);
   2411 #if V8_TARGET_ARCH_PPC64
   2412     __ LoadP(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
   2413     __ li(ip, Operand(1));
   2414     __ rotrdi(ip, ip, 1);  // ip = 0x80000000_00000000
   2415     __ cmp(scratch, ip);
   2416 #else
   2417     __ lwz(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
   2418     __ lwz(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
   2419     Label skip;
   2420     __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
   2421     __ cmp(scratch, r0);
   2422     __ bne(&skip);
   2423     __ cmpi(ip, Operand::Zero());
   2424     __ bind(&skip);
   2425 #endif
   2426     EmitBranch(instr, eq);
   2427   }
   2428 }
   2429 
   2430 
   2431 Condition LCodeGen::EmitIsString(Register input, Register temp1,
   2432                                  Label* is_not_string,
   2433                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
   2434   if (check_needed == INLINE_SMI_CHECK) {
   2435     __ JumpIfSmi(input, is_not_string);
   2436   }
   2437   __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
   2438 
   2439   return lt;
   2440 }
   2441 
   2442 
   2443 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
   2444   Register reg = ToRegister(instr->value());
   2445   Register temp1 = ToRegister(instr->temp());
   2446 
   2447   SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
   2448                               ? OMIT_SMI_CHECK
   2449                               : INLINE_SMI_CHECK;
   2450   Condition true_cond =
   2451       EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
   2452 
   2453   EmitBranch(instr, true_cond);
   2454 }
   2455 
   2456 
   2457 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   2458   Register input_reg = EmitLoadRegister(instr->value(), ip);
   2459   __ TestIfSmi(input_reg, r0);
   2460   EmitBranch(instr, eq, cr0);
   2461 }
   2462 
   2463 
   2464 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
   2465   Register input = ToRegister(instr->value());
   2466   Register temp = ToRegister(instr->temp());
   2467 
   2468   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2469     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2470   }
   2471   __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
   2472   __ lbz(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
   2473   __ TestBit(temp, Map::kIsUndetectable, r0);
   2474   EmitBranch(instr, ne, cr0);
   2475 }
   2476 
   2477 
   2478 static Condition ComputeCompareCondition(Token::Value op) {
   2479   switch (op) {
   2480     case Token::EQ_STRICT:
   2481     case Token::EQ:
   2482       return eq;
   2483     case Token::LT:
   2484       return lt;
   2485     case Token::GT:
   2486       return gt;
   2487     case Token::LTE:
   2488       return le;
   2489     case Token::GTE:
   2490       return ge;
   2491     default:
   2492       UNREACHABLE();
   2493       return kNoCondition;
   2494   }
   2495 }
   2496 
   2497 
   2498 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   2499   DCHECK(ToRegister(instr->context()).is(cp));
   2500   DCHECK(ToRegister(instr->left()).is(r4));
   2501   DCHECK(ToRegister(instr->right()).is(r3));
   2502 
   2503   Handle<Code> code = CodeFactory::StringCompare(isolate()).code();
   2504   CallCode(code, RelocInfo::CODE_TARGET, instr);
   2505   __ cmpi(r3, Operand::Zero());
   2506 
   2507   EmitBranch(instr, ComputeCompareCondition(instr->op()));
   2508 }
   2509 
   2510 
   2511 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   2512   InstanceType from = instr->from();
   2513   InstanceType to = instr->to();
   2514   if (from == FIRST_TYPE) return to;
   2515   DCHECK(from == to || to == LAST_TYPE);
   2516   return from;
   2517 }
   2518 
   2519 
   2520 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
   2521   InstanceType from = instr->from();
   2522   InstanceType to = instr->to();
   2523   if (from == to) return eq;
   2524   if (to == LAST_TYPE) return ge;
   2525   if (from == FIRST_TYPE) return le;
   2526   UNREACHABLE();
   2527   return eq;
   2528 }
   2529 
   2530 
   2531 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   2532   Register scratch = scratch0();
   2533   Register input = ToRegister(instr->value());
   2534 
   2535   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2536     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2537   }
   2538 
   2539   __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
   2540   EmitBranch(instr, BranchCondition(instr->hydrogen()));
   2541 }
   2542 
   2543 
   2544 void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
   2545   Register input = ToRegister(instr->value());
   2546   Register result = ToRegister(instr->result());
   2547 
   2548   __ AssertString(input);
   2549 
   2550   __ lwz(result, FieldMemOperand(input, String::kHashFieldOffset));
   2551   __ IndexFromHash(result, result);
   2552 }
   2553 
   2554 
   2555 void LCodeGen::DoHasCachedArrayIndexAndBranch(
   2556     LHasCachedArrayIndexAndBranch* instr) {
   2557   Register input = ToRegister(instr->value());
   2558   Register scratch = scratch0();
   2559 
   2560   __ lwz(scratch, FieldMemOperand(input, String::kHashFieldOffset));
   2561   __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
   2562   __ and_(r0, scratch, r0, SetRC);
   2563   EmitBranch(instr, eq, cr0);
   2564 }
   2565 
   2566 
   2567 // Branches to a label or falls through with the answer in flags.  Trashes
   2568 // the temp registers, but not the input.
   2569 void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
   2570                                Handle<String> class_name, Register input,
   2571                                Register temp, Register temp2) {
   2572   DCHECK(!input.is(temp));
   2573   DCHECK(!input.is(temp2));
   2574   DCHECK(!temp.is(temp2));
   2575 
   2576   __ JumpIfSmi(input, is_false);
   2577 
   2578   __ CompareObjectType(input, temp, temp2, JS_FUNCTION_TYPE);
   2579   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
   2580     __ beq(is_true);
   2581   } else {
   2582     __ beq(is_false);
   2583   }
   2584 
   2585   // Check if the constructor in the map is a function.
   2586   Register instance_type = ip;
   2587   __ GetMapConstructor(temp, temp, temp2, instance_type);
   2588 
   2589   // Objects with a non-function constructor have class 'Object'.
   2590   __ cmpi(instance_type, Operand(JS_FUNCTION_TYPE));
   2591   if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
   2592     __ bne(is_true);
   2593   } else {
   2594     __ bne(is_false);
   2595   }
   2596 
   2597   // temp now contains the constructor function. Grab the
   2598   // instance class name from there.
   2599   __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
   2600   __ LoadP(temp,
   2601            FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
   2602   // The class name we are testing against is internalized since it's a literal.
   2603   // The name in the constructor is internalized because of the way the context
   2604   // is booted.  This routine isn't expected to work for random API-created
   2605   // classes and it doesn't have to because you can't access it with natives
   2606   // syntax.  Since both sides are internalized it is sufficient to use an
   2607   // identity comparison.
   2608   __ Cmpi(temp, Operand(class_name), r0);
   2609   // End with the answer in flags.
   2610 }
   2611 
   2612 
   2613 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   2614   Register input = ToRegister(instr->value());
   2615   Register temp = scratch0();
   2616   Register temp2 = ToRegister(instr->temp());
   2617   Handle<String> class_name = instr->hydrogen()->class_name();
   2618 
   2619   EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
   2620                   class_name, input, temp, temp2);
   2621 
   2622   EmitBranch(instr, eq);
   2623 }
   2624 
   2625 
   2626 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   2627   Register reg = ToRegister(instr->value());
   2628   Register temp = ToRegister(instr->temp());
   2629 
   2630   __ LoadP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
   2631   __ Cmpi(temp, Operand(instr->map()), r0);
   2632   EmitBranch(instr, eq);
   2633 }
   2634 
   2635 
   2636 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
   2637   DCHECK(ToRegister(instr->context()).is(cp));
   2638   DCHECK(ToRegister(instr->left()).is(InstanceOfDescriptor::LeftRegister()));
   2639   DCHECK(ToRegister(instr->right()).is(InstanceOfDescriptor::RightRegister()));
   2640   DCHECK(ToRegister(instr->result()).is(r3));
   2641   InstanceOfStub stub(isolate());
   2642   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   2643 }
   2644 
   2645 
   2646 void LCodeGen::DoHasInPrototypeChainAndBranch(
   2647     LHasInPrototypeChainAndBranch* instr) {
   2648   Register const object = ToRegister(instr->object());
   2649   Register const object_map = scratch0();
   2650   Register const object_instance_type = ip;
   2651   Register const object_prototype = object_map;
   2652   Register const prototype = ToRegister(instr->prototype());
   2653 
   2654   // The {object} must be a spec object.  It's sufficient to know that {object}
   2655   // is not a smi, since all other non-spec objects have {null} prototypes and
   2656   // will be ruled out below.
   2657   if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
   2658     __ TestIfSmi(object, r0);
   2659     EmitFalseBranch(instr, eq, cr0);
   2660   }
   2661 
   2662   // Loop through the {object}s prototype chain looking for the {prototype}.
   2663   __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
   2664   Label loop;
   2665   __ bind(&loop);
   2666 
   2667   // Deoptimize if the object needs to be access checked.
   2668   __ lbz(object_instance_type,
   2669          FieldMemOperand(object_map, Map::kBitFieldOffset));
   2670   __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
   2671   DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck, cr0);
   2672   // Deoptimize for proxies.
   2673   __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
   2674   DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
   2675   __ LoadP(object_prototype,
   2676            FieldMemOperand(object_map, Map::kPrototypeOffset));
   2677   __ cmp(object_prototype, prototype);
   2678   EmitTrueBranch(instr, eq);
   2679   __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
   2680   EmitFalseBranch(instr, eq);
   2681   __ LoadP(object_map,
   2682            FieldMemOperand(object_prototype, HeapObject::kMapOffset));
   2683   __ b(&loop);
   2684 }
   2685 
   2686 
   2687 void LCodeGen::DoCmpT(LCmpT* instr) {
   2688   DCHECK(ToRegister(instr->context()).is(cp));
   2689   Token::Value op = instr->op();
   2690 
   2691   Handle<Code> ic =
   2692       CodeFactory::CompareIC(isolate(), op, instr->strength()).code();
   2693   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2694   // This instruction also signals no smi code inlined
   2695   __ cmpi(r3, Operand::Zero());
   2696 
   2697   Condition condition = ComputeCompareCondition(op);
   2698   if (CpuFeatures::IsSupported(ISELECT)) {
   2699     __ LoadRoot(r4, Heap::kTrueValueRootIndex);
   2700     __ LoadRoot(r5, Heap::kFalseValueRootIndex);
   2701     __ isel(condition, ToRegister(instr->result()), r4, r5);
   2702   } else {
   2703     Label true_value, done;
   2704 
   2705     __ b(condition, &true_value);
   2706 
   2707     __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
   2708     __ b(&done);
   2709 
   2710     __ bind(&true_value);
   2711     __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
   2712 
   2713     __ bind(&done);
   2714   }
   2715 }
   2716 
   2717 
   2718 void LCodeGen::DoReturn(LReturn* instr) {
   2719   if (FLAG_trace && info()->IsOptimizing()) {
   2720     // Push the return value on the stack as the parameter.
   2721     // Runtime::TraceExit returns its parameter in r3.  We're leaving the code
   2722     // managed by the register allocator and tearing down the frame, it's
   2723     // safe to write to the context register.
   2724     __ push(r3);
   2725     __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   2726     __ CallRuntime(Runtime::kTraceExit);
   2727   }
   2728   if (info()->saves_caller_doubles()) {
   2729     RestoreCallerDoubles();
   2730   }
   2731   if (instr->has_constant_parameter_count()) {
   2732     int parameter_count = ToInteger32(instr->constant_parameter_count());
   2733     int32_t sp_delta = (parameter_count + 1) * kPointerSize;
   2734     if (NeedsEagerFrame()) {
   2735       masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
   2736     } else if (sp_delta != 0) {
   2737       __ addi(sp, sp, Operand(sp_delta));
   2738     }
   2739   } else {
   2740     DCHECK(info()->IsStub());  // Functions would need to drop one more value.
   2741     Register reg = ToRegister(instr->parameter_count());
   2742     // The argument count parameter is a smi
   2743     if (NeedsEagerFrame()) {
   2744       masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
   2745     }
   2746     __ SmiToPtrArrayOffset(r0, reg);
   2747     __ add(sp, sp, r0);
   2748   }
   2749 
   2750   __ blr();
   2751 }
   2752 
   2753 
   2754 template <class T>
   2755 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
   2756   Register vector_register = ToRegister(instr->temp_vector());
   2757   Register slot_register = LoadDescriptor::SlotRegister();
   2758   DCHECK(vector_register.is(LoadWithVectorDescriptor::VectorRegister()));
   2759   DCHECK(slot_register.is(r3));
   2760 
   2761   AllowDeferredHandleDereference vector_structure_check;
   2762   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
   2763   __ Move(vector_register, vector);
   2764   // No need to allocate this register.
   2765   FeedbackVectorSlot slot = instr->hydrogen()->slot();
   2766   int index = vector->GetIndex(slot);
   2767   __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
   2768 }
   2769 
   2770 
   2771 template <class T>
   2772 void LCodeGen::EmitVectorStoreICRegisters(T* instr) {
   2773   Register vector_register = ToRegister(instr->temp_vector());
   2774   Register slot_register = ToRegister(instr->temp_slot());
   2775 
   2776   AllowDeferredHandleDereference vector_structure_check;
   2777   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
   2778   __ Move(vector_register, vector);
   2779   FeedbackVectorSlot slot = instr->hydrogen()->slot();
   2780   int index = vector->GetIndex(slot);
   2781   __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
   2782 }
   2783 
   2784 
   2785 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
   2786   DCHECK(ToRegister(instr->context()).is(cp));
   2787   DCHECK(ToRegister(instr->global_object())
   2788              .is(LoadDescriptor::ReceiverRegister()));
   2789   DCHECK(ToRegister(instr->result()).is(r3));
   2790 
   2791   __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
   2792   EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
   2793   Handle<Code> ic =
   2794       CodeFactory::LoadICInOptimizedCode(isolate(), instr->typeof_mode(),
   2795                                          SLOPPY, PREMONOMORPHIC).code();
   2796   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2797 }
   2798 
   2799 
   2800 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   2801   Register context = ToRegister(instr->context());
   2802   Register result = ToRegister(instr->result());
   2803   __ LoadP(result, ContextMemOperand(context, instr->slot_index()));
   2804   if (instr->hydrogen()->RequiresHoleCheck()) {
   2805     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   2806     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2807       __ cmp(result, ip);
   2808       DeoptimizeIf(eq, instr, Deoptimizer::kHole);
   2809     } else {
   2810       if (CpuFeatures::IsSupported(ISELECT)) {
   2811         Register scratch = scratch0();
   2812         __ mov(scratch, Operand(factory()->undefined_value()));
   2813         __ cmp(result, ip);
   2814         __ isel(eq, result, scratch, result);
   2815       } else {
   2816         Label skip;
   2817         __ cmp(result, ip);
   2818         __ bne(&skip);
   2819         __ mov(result, Operand(factory()->undefined_value()));
   2820         __ bind(&skip);
   2821       }
   2822     }
   2823   }
   2824 }
   2825 
   2826 
   2827 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   2828   Register context = ToRegister(instr->context());
   2829   Register value = ToRegister(instr->value());
   2830   Register scratch = scratch0();
   2831   MemOperand target = ContextMemOperand(context, instr->slot_index());
   2832 
   2833   Label skip_assignment;
   2834 
   2835   if (instr->hydrogen()->RequiresHoleCheck()) {
   2836     __ LoadP(scratch, target);
   2837     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   2838     __ cmp(scratch, ip);
   2839     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2840       DeoptimizeIf(eq, instr, Deoptimizer::kHole);
   2841     } else {
   2842       __ bne(&skip_assignment);
   2843     }
   2844   }
   2845 
   2846   __ StoreP(value, target, r0);
   2847   if (instr->hydrogen()->NeedsWriteBarrier()) {
   2848     SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
   2849                                 ? OMIT_SMI_CHECK
   2850                                 : INLINE_SMI_CHECK;
   2851     __ RecordWriteContextSlot(context, target.offset(), value, scratch,
   2852                               GetLinkRegisterState(), kSaveFPRegs,
   2853                               EMIT_REMEMBERED_SET, check_needed);
   2854   }
   2855 
   2856   __ bind(&skip_assignment);
   2857 }
   2858 
   2859 
   2860 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   2861   HObjectAccess access = instr->hydrogen()->access();
   2862   int offset = access.offset();
   2863   Register object = ToRegister(instr->object());
   2864 
   2865   if (access.IsExternalMemory()) {
   2866     Register result = ToRegister(instr->result());
   2867     MemOperand operand = MemOperand(object, offset);
   2868     __ LoadRepresentation(result, operand, access.representation(), r0);
   2869     return;
   2870   }
   2871 
   2872   if (instr->hydrogen()->representation().IsDouble()) {
   2873     DCHECK(access.IsInobject());
   2874     DoubleRegister result = ToDoubleRegister(instr->result());
   2875     __ lfd(result, FieldMemOperand(object, offset));
   2876     return;
   2877   }
   2878 
   2879   Register result = ToRegister(instr->result());
   2880   if (!access.IsInobject()) {
   2881     __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   2882     object = result;
   2883   }
   2884 
   2885   Representation representation = access.representation();
   2886 
   2887 #if V8_TARGET_ARCH_PPC64
   2888   // 64-bit Smi optimization
   2889   if (representation.IsSmi() &&
   2890       instr->hydrogen()->representation().IsInteger32()) {
   2891     // Read int value directly from upper half of the smi.
   2892     offset = SmiWordOffset(offset);
   2893     representation = Representation::Integer32();
   2894   }
   2895 #endif
   2896 
   2897   __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
   2898                         r0);
   2899 }
   2900 
   2901 
   2902 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
   2903   DCHECK(ToRegister(instr->context()).is(cp));
   2904   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   2905   DCHECK(ToRegister(instr->result()).is(r3));
   2906 
   2907   // Name is always in r5.
   2908   __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
   2909   EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
   2910   Handle<Code> ic =
   2911       CodeFactory::LoadICInOptimizedCode(
   2912           isolate(), NOT_INSIDE_TYPEOF, instr->hydrogen()->language_mode(),
   2913           instr->hydrogen()->initialization_state()).code();
   2914   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2915 }
   2916 
   2917 
   2918 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   2919   Register scratch = scratch0();
   2920   Register function = ToRegister(instr->function());
   2921   Register result = ToRegister(instr->result());
   2922 
   2923   // Get the prototype or initial map from the function.
   2924   __ LoadP(result,
   2925            FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   2926 
   2927   // Check that the function has a prototype or an initial map.
   2928   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   2929   __ cmp(result, ip);
   2930   DeoptimizeIf(eq, instr, Deoptimizer::kHole);
   2931 
   2932   // If the function does not have an initial map, we're done.
   2933   if (CpuFeatures::IsSupported(ISELECT)) {
   2934     // Get the prototype from the initial map (optimistic).
   2935     __ LoadP(ip, FieldMemOperand(result, Map::kPrototypeOffset));
   2936     __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
   2937     __ isel(eq, result, ip, result);
   2938   } else {
   2939     Label done;
   2940     __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
   2941     __ bne(&done);
   2942 
   2943     // Get the prototype from the initial map.
   2944     __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
   2945 
   2946     // All done.
   2947     __ bind(&done);
   2948   }
   2949 }
   2950 
   2951 
   2952 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
   2953   Register result = ToRegister(instr->result());
   2954   __ LoadRoot(result, instr->index());
   2955 }
   2956 
   2957 
   2958 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
   2959   Register arguments = ToRegister(instr->arguments());
   2960   Register result = ToRegister(instr->result());
   2961   // There are two words between the frame pointer and the last argument.
   2962   // Subtracting from length accounts for one of them add one more.
   2963   if (instr->length()->IsConstantOperand()) {
   2964     int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
   2965     if (instr->index()->IsConstantOperand()) {
   2966       int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   2967       int index = (const_length - const_index) + 1;
   2968       __ LoadP(result, MemOperand(arguments, index * kPointerSize), r0);
   2969     } else {
   2970       Register index = ToRegister(instr->index());
   2971       __ subfic(result, index, Operand(const_length + 1));
   2972       __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
   2973       __ LoadPX(result, MemOperand(arguments, result));
   2974     }
   2975   } else if (instr->index()->IsConstantOperand()) {
   2976     Register length = ToRegister(instr->length());
   2977     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   2978     int loc = const_index - 1;
   2979     if (loc != 0) {
   2980       __ subi(result, length, Operand(loc));
   2981       __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
   2982       __ LoadPX(result, MemOperand(arguments, result));
   2983     } else {
   2984       __ ShiftLeftImm(result, length, Operand(kPointerSizeLog2));
   2985       __ LoadPX(result, MemOperand(arguments, result));
   2986     }
   2987   } else {
   2988     Register length = ToRegister(instr->length());
   2989     Register index = ToRegister(instr->index());
   2990     __ sub(result, length, index);
   2991     __ addi(result, result, Operand(1));
   2992     __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
   2993     __ LoadPX(result, MemOperand(arguments, result));
   2994   }
   2995 }
   2996 
   2997 
   2998 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
   2999   Register external_pointer = ToRegister(instr->elements());
   3000   Register key = no_reg;
   3001   ElementsKind elements_kind = instr->elements_kind();
   3002   bool key_is_constant = instr->key()->IsConstantOperand();
   3003   int constant_key = 0;
   3004   if (key_is_constant) {
   3005     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   3006     if (constant_key & 0xF0000000) {
   3007       Abort(kArrayIndexConstantValueTooBig);
   3008     }
   3009   } else {
   3010     key = ToRegister(instr->key());
   3011   }
   3012   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   3013   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
   3014   int base_offset = instr->base_offset();
   3015 
   3016   if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
   3017     DoubleRegister result = ToDoubleRegister(instr->result());
   3018     if (key_is_constant) {
   3019       __ Add(scratch0(), external_pointer, constant_key << element_size_shift,
   3020              r0);
   3021     } else {
   3022       __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
   3023       __ add(scratch0(), external_pointer, r0);
   3024     }
   3025     if (elements_kind == FLOAT32_ELEMENTS) {
   3026       __ lfs(result, MemOperand(scratch0(), base_offset));
   3027     } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
   3028       __ lfd(result, MemOperand(scratch0(), base_offset));
   3029     }
   3030   } else {
   3031     Register result = ToRegister(instr->result());
   3032     MemOperand mem_operand =
   3033         PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
   3034                             constant_key, element_size_shift, base_offset);
   3035     switch (elements_kind) {
   3036       case INT8_ELEMENTS:
   3037         if (key_is_constant) {
   3038           __ LoadByte(result, mem_operand, r0);
   3039         } else {
   3040           __ lbzx(result, mem_operand);
   3041         }
   3042         __ extsb(result, result);
   3043         break;
   3044       case UINT8_ELEMENTS:
   3045       case UINT8_CLAMPED_ELEMENTS:
   3046         if (key_is_constant) {
   3047           __ LoadByte(result, mem_operand, r0);
   3048         } else {
   3049           __ lbzx(result, mem_operand);
   3050         }
   3051         break;
   3052       case INT16_ELEMENTS:
   3053         if (key_is_constant) {
   3054           __ LoadHalfWordArith(result, mem_operand, r0);
   3055         } else {
   3056           __ lhax(result, mem_operand);
   3057         }
   3058         break;
   3059       case UINT16_ELEMENTS:
   3060         if (key_is_constant) {
   3061           __ LoadHalfWord(result, mem_operand, r0);
   3062         } else {
   3063           __ lhzx(result, mem_operand);
   3064         }
   3065         break;
   3066       case INT32_ELEMENTS:
   3067         if (key_is_constant) {
   3068           __ LoadWordArith(result, mem_operand, r0);
   3069         } else {
   3070           __ lwax(result, mem_operand);
   3071         }
   3072         break;
   3073       case UINT32_ELEMENTS:
   3074         if (key_is_constant) {
   3075           __ LoadWord(result, mem_operand, r0);
   3076         } else {
   3077           __ lwzx(result, mem_operand);
   3078         }
   3079         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
   3080           __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
   3081           __ cmplw(result, r0);
   3082           DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue);
   3083         }
   3084         break;
   3085       case FLOAT32_ELEMENTS:
   3086       case FLOAT64_ELEMENTS:
   3087       case FAST_HOLEY_DOUBLE_ELEMENTS:
   3088       case FAST_HOLEY_ELEMENTS:
   3089       case FAST_HOLEY_SMI_ELEMENTS:
   3090       case FAST_DOUBLE_ELEMENTS:
   3091       case FAST_ELEMENTS:
   3092       case FAST_SMI_ELEMENTS:
   3093       case DICTIONARY_ELEMENTS:
   3094       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
   3095       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
   3096         UNREACHABLE();
   3097         break;
   3098     }
   3099   }
   3100 }
   3101 
   3102 
   3103 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
   3104   Register elements = ToRegister(instr->elements());
   3105   bool key_is_constant = instr->key()->IsConstantOperand();
   3106   Register key = no_reg;
   3107   DoubleRegister result = ToDoubleRegister(instr->result());
   3108   Register scratch = scratch0();
   3109 
   3110   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   3111   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
   3112   int constant_key = 0;
   3113   if (key_is_constant) {
   3114     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   3115     if (constant_key & 0xF0000000) {
   3116       Abort(kArrayIndexConstantValueTooBig);
   3117     }
   3118   } else {
   3119     key = ToRegister(instr->key());
   3120   }
   3121 
   3122   int base_offset = instr->base_offset() + constant_key * kDoubleSize;
   3123   if (!key_is_constant) {
   3124     __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
   3125     __ add(scratch, elements, r0);
   3126     elements = scratch;
   3127   }
   3128   if (!is_int16(base_offset)) {
   3129     __ Add(scratch, elements, base_offset, r0);
   3130     base_offset = 0;
   3131     elements = scratch;
   3132   }
   3133   __ lfd(result, MemOperand(elements, base_offset));
   3134 
   3135   if (instr->hydrogen()->RequiresHoleCheck()) {
   3136     if (is_int16(base_offset + Register::kExponentOffset)) {
   3137       __ lwz(scratch,
   3138              MemOperand(elements, base_offset + Register::kExponentOffset));
   3139     } else {
   3140       __ addi(scratch, elements, Operand(base_offset));
   3141       __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
   3142     }
   3143     __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
   3144     DeoptimizeIf(eq, instr, Deoptimizer::kHole);
   3145   }
   3146 }
   3147 
   3148 
   3149 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
   3150   HLoadKeyed* hinstr = instr->hydrogen();
   3151   Register elements = ToRegister(instr->elements());
   3152   Register result = ToRegister(instr->result());
   3153   Register scratch = scratch0();
   3154   Register store_base = scratch;
   3155   int offset = instr->base_offset();
   3156 
   3157   if (instr->key()->IsConstantOperand()) {
   3158     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   3159     offset += ToInteger32(const_operand) * kPointerSize;
   3160     store_base = elements;
   3161   } else {
   3162     Register key = ToRegister(instr->key());
   3163     // Even though the HLoadKeyed instruction forces the input
   3164     // representation for the key to be an integer, the input gets replaced
   3165     // during bound check elimination with the index argument to the bounds
   3166     // check, which can be tagged, so that case must be handled here, too.
   3167     if (hinstr->key()->representation().IsSmi()) {
   3168       __ SmiToPtrArrayOffset(r0, key);
   3169     } else {
   3170       __ ShiftLeftImm(r0, key, Operand(kPointerSizeLog2));
   3171     }
   3172     __ add(scratch, elements, r0);
   3173   }
   3174 
   3175   bool requires_hole_check = hinstr->RequiresHoleCheck();
   3176   Representation representation = hinstr->representation();
   3177 
   3178 #if V8_TARGET_ARCH_PPC64
   3179   // 64-bit Smi optimization
   3180   if (representation.IsInteger32() &&
   3181       hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
   3182     DCHECK(!requires_hole_check);
   3183     // Read int value directly from upper half of the smi.
   3184     offset = SmiWordOffset(offset);
   3185   }
   3186 #endif
   3187 
   3188   __ LoadRepresentation(result, MemOperand(store_base, offset), representation,
   3189                         r0);
   3190 
   3191   // Check for the hole value.
   3192   if (requires_hole_check) {
   3193     if (IsFastSmiElementsKind(hinstr->elements_kind())) {
   3194       __ TestIfSmi(result, r0);
   3195       DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
   3196     } else {
   3197       __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
   3198       __ cmp(result, scratch);
   3199       DeoptimizeIf(eq, instr, Deoptimizer::kHole);
   3200     }
   3201   } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
   3202     DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
   3203     Label done;
   3204     __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
   3205     __ cmp(result, scratch);
   3206     __ bne(&done);
   3207     if (info()->IsStub()) {
   3208       // A stub can safely convert the hole to undefined only if the array
   3209       // protector cell contains (Smi) Isolate::kArrayProtectorValid. Otherwise
   3210       // it needs to bail out.
   3211       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
   3212       __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
   3213       __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kArrayProtectorValid), r0);
   3214       DeoptimizeIf(ne, instr, Deoptimizer::kHole);
   3215     }
   3216     __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
   3217     __ bind(&done);
   3218   }
   3219 }
   3220 
   3221 
   3222 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
   3223   if (instr->is_fixed_typed_array()) {
   3224     DoLoadKeyedExternalArray(instr);
   3225   } else if (instr->hydrogen()->representation().IsDouble()) {
   3226     DoLoadKeyedFixedDoubleArray(instr);
   3227   } else {
   3228     DoLoadKeyedFixedArray(instr);
   3229   }
   3230 }
   3231 
   3232 
   3233 MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
   3234                                          bool key_is_constant, bool key_is_smi,
   3235                                          int constant_key,
   3236                                          int element_size_shift,
   3237                                          int base_offset) {
   3238   Register scratch = scratch0();
   3239 
   3240   if (key_is_constant) {
   3241     return MemOperand(base, (constant_key << element_size_shift) + base_offset);
   3242   }
   3243 
   3244   bool needs_shift =
   3245       (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
   3246 
   3247   if (!(base_offset || needs_shift)) {
   3248     return MemOperand(base, key);
   3249   }
   3250 
   3251   if (needs_shift) {
   3252     __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
   3253     key = scratch;
   3254   }
   3255 
   3256   if (base_offset) {
   3257     __ Add(scratch, key, base_offset, r0);
   3258   }
   3259 
   3260   return MemOperand(base, scratch);
   3261 }
   3262 
   3263 
   3264 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
   3265   DCHECK(ToRegister(instr->context()).is(cp));
   3266   DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
   3267   DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
   3268 
   3269   if (instr->hydrogen()->HasVectorAndSlot()) {
   3270     EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
   3271   }
   3272 
   3273   Handle<Code> ic = CodeFactory::KeyedLoadICInOptimizedCode(
   3274                         isolate(), instr->hydrogen()->language_mode(),
   3275                         instr->hydrogen()->initialization_state()).code();
   3276   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3277 }
   3278 
   3279 
   3280 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   3281   Register scratch = scratch0();
   3282   Register result = ToRegister(instr->result());
   3283 
   3284   if (instr->hydrogen()->from_inlined()) {
   3285     __ subi(result, sp, Operand(2 * kPointerSize));
   3286   } else {
   3287     // Check if the calling frame is an arguments adaptor frame.
   3288     __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3289     __ LoadP(result,
   3290              MemOperand(scratch, StandardFrameConstants::kContextOffset));
   3291     __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
   3292 
   3293     // Result is the frame pointer for the frame if not adapted and for the real
   3294     // frame below the adaptor frame if adapted.
   3295     if (CpuFeatures::IsSupported(ISELECT)) {
   3296       __ isel(eq, result, scratch, fp);
   3297     } else {
   3298       Label done, adapted;
   3299       __ beq(&adapted);
   3300       __ mr(result, fp);
   3301       __ b(&done);
   3302 
   3303       __ bind(&adapted);
   3304       __ mr(result, scratch);
   3305       __ bind(&done);
   3306     }
   3307   }
   3308 }
   3309 
   3310 
   3311 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
   3312   Register elem = ToRegister(instr->elements());
   3313   Register result = ToRegister(instr->result());
   3314 
   3315   Label done;
   3316 
   3317   // If no arguments adaptor frame the number of arguments is fixed.
   3318   __ cmp(fp, elem);
   3319   __ mov(result, Operand(scope()->num_parameters()));
   3320   __ beq(&done);
   3321 
   3322   // Arguments adaptor frame present. Get argument length from there.
   3323   __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3324   __ LoadP(result,
   3325            MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
   3326   __ SmiUntag(result);
   3327 
   3328   // Argument length is in result register.
   3329   __ bind(&done);
   3330 }
   3331 
   3332 
   3333 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
   3334   Register receiver = ToRegister(instr->receiver());
   3335   Register function = ToRegister(instr->function());
   3336   Register result = ToRegister(instr->result());
   3337   Register scratch = scratch0();
   3338 
   3339   // If the receiver is null or undefined, we have to pass the global
   3340   // object as a receiver to normal functions. Values have to be
   3341   // passed unchanged to builtins and strict-mode functions.
   3342   Label global_object, result_in_receiver;
   3343 
   3344   if (!instr->hydrogen()->known_function()) {
   3345     // Do not transform the receiver to object for strict mode
   3346     // functions or builtins.
   3347     __ LoadP(scratch,
   3348              FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
   3349     __ lwz(scratch,
   3350            FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
   3351     __ andi(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
   3352                                  (1 << SharedFunctionInfo::kNativeBit)));
   3353     __ bne(&result_in_receiver, cr0);
   3354   }
   3355 
   3356   // Normal function. Replace undefined or null with global receiver.
   3357   __ LoadRoot(scratch, Heap::kNullValueRootIndex);
   3358   __ cmp(receiver, scratch);
   3359   __ beq(&global_object);
   3360   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
   3361   __ cmp(receiver, scratch);
   3362   __ beq(&global_object);
   3363 
   3364   // Deoptimize if the receiver is not a JS object.
   3365   __ TestIfSmi(receiver, r0);
   3366   DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
   3367   __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
   3368   DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
   3369 
   3370   __ b(&result_in_receiver);
   3371   __ bind(&global_object);
   3372   __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
   3373   __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
   3374   __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
   3375 
   3376   if (result.is(receiver)) {
   3377     __ bind(&result_in_receiver);
   3378   } else {
   3379     Label result_ok;
   3380     __ b(&result_ok);
   3381     __ bind(&result_in_receiver);
   3382     __ mr(result, receiver);
   3383     __ bind(&result_ok);
   3384   }
   3385 }
   3386 
   3387 
   3388 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   3389   Register receiver = ToRegister(instr->receiver());
   3390   Register function = ToRegister(instr->function());
   3391   Register length = ToRegister(instr->length());
   3392   Register elements = ToRegister(instr->elements());
   3393   Register scratch = scratch0();
   3394   DCHECK(receiver.is(r3));  // Used for parameter count.
   3395   DCHECK(function.is(r4));  // Required by InvokeFunction.
   3396   DCHECK(ToRegister(instr->result()).is(r3));
   3397 
   3398   // Copy the arguments to this function possibly from the
   3399   // adaptor frame below it.
   3400   const uint32_t kArgumentsLimit = 1 * KB;
   3401   __ cmpli(length, Operand(kArgumentsLimit));
   3402   DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments);
   3403 
   3404   // Push the receiver and use the register to keep the original
   3405   // number of arguments.
   3406   __ push(receiver);
   3407   __ mr(receiver, length);
   3408   // The arguments are at a one pointer size offset from elements.
   3409   __ addi(elements, elements, Operand(1 * kPointerSize));
   3410 
   3411   // Loop through the arguments pushing them onto the execution
   3412   // stack.
   3413   Label invoke, loop;
   3414   // length is a small non-negative integer, due to the test above.
   3415   __ cmpi(length, Operand::Zero());
   3416   __ beq(&invoke);
   3417   __ mtctr(length);
   3418   __ bind(&loop);
   3419   __ ShiftLeftImm(r0, length, Operand(kPointerSizeLog2));
   3420   __ LoadPX(scratch, MemOperand(elements, r0));
   3421   __ push(scratch);
   3422   __ addi(length, length, Operand(-1));
   3423   __ bdnz(&loop);
   3424 
   3425   __ bind(&invoke);
   3426   DCHECK(instr->HasPointerMap());
   3427   LPointerMap* pointers = instr->pointer_map();
   3428   SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
   3429   // The number of arguments is stored in receiver which is r3, as expected
   3430   // by InvokeFunction.
   3431   ParameterCount actual(receiver);
   3432   __ InvokeFunction(function, no_reg, actual, CALL_FUNCTION,
   3433                     safepoint_generator);
   3434 }
   3435 
   3436 
   3437 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   3438   LOperand* argument = instr->value();
   3439   if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
   3440     Abort(kDoPushArgumentNotImplementedForDoubleType);
   3441   } else {
   3442     Register argument_reg = EmitLoadRegister(argument, ip);
   3443     __ push(argument_reg);
   3444   }
   3445 }
   3446 
   3447 
   3448 void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
   3449 
   3450 
   3451 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   3452   Register result = ToRegister(instr->result());
   3453   __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   3454 }
   3455 
   3456 
   3457 void LCodeGen::DoContext(LContext* instr) {
   3458   // If there is a non-return use, the context must be moved to a register.
   3459   Register result = ToRegister(instr->result());
   3460   if (info()->IsOptimizing()) {
   3461     __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
   3462   } else {
   3463     // If there is no frame, the context must be in cp.
   3464     DCHECK(result.is(cp));
   3465   }
   3466 }
   3467 
   3468 
   3469 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   3470   DCHECK(ToRegister(instr->context()).is(cp));
   3471   __ Move(scratch0(), instr->hydrogen()->pairs());
   3472   __ push(scratch0());
   3473   __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
   3474   __ push(scratch0());
   3475   CallRuntime(Runtime::kDeclareGlobals, instr);
   3476 }
   3477 
   3478 
   3479 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   3480                                  int formal_parameter_count, int arity,
   3481                                  LInstruction* instr) {
   3482   bool dont_adapt_arguments =
   3483       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   3484   bool can_invoke_directly =
   3485       dont_adapt_arguments || formal_parameter_count == arity;
   3486 
   3487   Register function_reg = r4;
   3488 
   3489   LPointerMap* pointers = instr->pointer_map();
   3490 
   3491   if (can_invoke_directly) {
   3492     // Change context.
   3493     __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
   3494 
   3495     // Always initialize new target and number of actual arguments.
   3496     __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
   3497     __ mov(r3, Operand(arity));
   3498 
   3499     bool is_self_call = function.is_identical_to(info()->closure());
   3500 
   3501     // Invoke function.
   3502     if (is_self_call) {
   3503       __ CallSelf();
   3504     } else {
   3505       __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
   3506       __ CallJSEntry(ip);
   3507     }
   3508 
   3509     // Set up deoptimization.
   3510     RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   3511   } else {
   3512     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3513     ParameterCount count(arity);
   3514     ParameterCount expected(formal_parameter_count);
   3515     __ InvokeFunction(function_reg, expected, count, CALL_FUNCTION, generator);
   3516   }
   3517 }
   3518 
   3519 
   3520 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
   3521   DCHECK(instr->context() != NULL);
   3522   DCHECK(ToRegister(instr->context()).is(cp));
   3523   Register input = ToRegister(instr->value());
   3524   Register result = ToRegister(instr->result());
   3525   Register scratch = scratch0();
   3526 
   3527   // Deoptimize if not a heap number.
   3528   __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   3529   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
   3530   __ cmp(scratch, ip);
   3531   DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
   3532 
   3533   Label done;
   3534   Register exponent = scratch0();
   3535   scratch = no_reg;
   3536   __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
   3537   // Check the sign of the argument. If the argument is positive, just
   3538   // return it.
   3539   __ cmpwi(exponent, Operand::Zero());
   3540   // Move the input to the result if necessary.
   3541   __ Move(result, input);
   3542   __ bge(&done);
   3543 
   3544   // Input is negative. Reverse its sign.
   3545   // Preserve the value of all registers.
   3546   {
   3547     PushSafepointRegistersScope scope(this);
   3548 
   3549     // Registers were saved at the safepoint, so we can use
   3550     // many scratch registers.
   3551     Register tmp1 = input.is(r4) ? r3 : r4;
   3552     Register tmp2 = input.is(r5) ? r3 : r5;
   3553     Register tmp3 = input.is(r6) ? r3 : r6;
   3554     Register tmp4 = input.is(r7) ? r3 : r7;
   3555 
   3556     // exponent: floating point exponent value.
   3557 
   3558     Label allocated, slow;
   3559     __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
   3560     __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
   3561     __ b(&allocated);
   3562 
   3563     // Slow case: Call the runtime system to do the number allocation.
   3564     __ bind(&slow);
   3565 
   3566     CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
   3567                             instr->context());
   3568     // Set the pointer to the new heap number in tmp.
   3569     if (!tmp1.is(r3)) __ mr(tmp1, r3);
   3570     // Restore input_reg after call to runtime.
   3571     __ LoadFromSafepointRegisterSlot(input, input);
   3572     __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
   3573 
   3574     __ bind(&allocated);
   3575     // exponent: floating point exponent value.
   3576     // tmp1: allocated heap number.
   3577     STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
   3578     __ clrlwi(exponent, exponent, Operand(1));  // clear sign bit
   3579     __ stw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
   3580     __ lwz(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
   3581     __ stw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
   3582 
   3583     __ StoreToSafepointRegisterSlot(tmp1, result);
   3584   }
   3585 
   3586   __ bind(&done);
   3587 }
   3588 
   3589 
   3590 void LCodeGen::EmitMathAbs(LMathAbs* instr) {
   3591   Register input = ToRegister(instr->value());
   3592   Register result = ToRegister(instr->result());
   3593   Label done;
   3594   __ cmpi(input, Operand::Zero());
   3595   __ Move(result, input);
   3596   __ bge(&done);
   3597   __ li(r0, Operand::Zero());  // clear xer
   3598   __ mtxer(r0);
   3599   __ neg(result, result, SetOE, SetRC);
   3600   // Deoptimize on overflow.
   3601   DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
   3602   __ bind(&done);
   3603 }
   3604 
   3605 
   3606 #if V8_TARGET_ARCH_PPC64
   3607 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
   3608   Register input = ToRegister(instr->value());
   3609   Register result = ToRegister(instr->result());
   3610   Label done;
   3611   __ cmpwi(input, Operand::Zero());
   3612   __ Move(result, input);
   3613   __ bge(&done);
   3614 
   3615   // Deoptimize on overflow.
   3616   __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
   3617   __ cmpw(input, r0);
   3618   DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
   3619 
   3620   __ neg(result, result);
   3621   __ bind(&done);
   3622 }
   3623 #endif
   3624 
   3625 
   3626 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   3627   // Class for deferred case.
   3628   class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
   3629    public:
   3630     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
   3631         : LDeferredCode(codegen), instr_(instr) {}
   3632     void Generate() override {
   3633       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
   3634     }
   3635     LInstruction* instr() override { return instr_; }
   3636 
   3637    private:
   3638     LMathAbs* instr_;
   3639   };
   3640 
   3641   Representation r = instr->hydrogen()->value()->representation();
   3642   if (r.IsDouble()) {
   3643     DoubleRegister input = ToDoubleRegister(instr->value());
   3644     DoubleRegister result = ToDoubleRegister(instr->result());
   3645     __ fabs(result, input);
   3646 #if V8_TARGET_ARCH_PPC64
   3647   } else if (r.IsInteger32()) {
   3648     EmitInteger32MathAbs(instr);
   3649   } else if (r.IsSmi()) {
   3650 #else
   3651   } else if (r.IsSmiOrInteger32()) {
   3652 #endif
   3653     EmitMathAbs(instr);
   3654   } else {
   3655     // Representation is tagged.
   3656     DeferredMathAbsTaggedHeapNumber* deferred =
   3657         new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
   3658     Register input = ToRegister(instr->value());
   3659     // Smi check.
   3660     __ JumpIfNotSmi(input, deferred->entry());
   3661     // If smi, handle it directly.
   3662     EmitMathAbs(instr);
   3663     __ bind(deferred->exit());
   3664   }
   3665 }
   3666 
   3667 
   3668 void LCodeGen::DoMathFloor(LMathFloor* instr) {
   3669   DoubleRegister input = ToDoubleRegister(instr->value());
   3670   Register result = ToRegister(instr->result());
   3671   Register input_high = scratch0();
   3672   Register scratch = ip;
   3673   Label done, exact;
   3674 
   3675   __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
   3676                    &exact);
   3677   DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
   3678 
   3679   __ bind(&exact);
   3680   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3681     // Test for -0.
   3682     __ cmpi(result, Operand::Zero());
   3683     __ bne(&done);
   3684     __ cmpwi(input_high, Operand::Zero());
   3685     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
   3686   }
   3687   __ bind(&done);
   3688 }
   3689 
   3690 
   3691 void LCodeGen::DoMathRound(LMathRound* instr) {
   3692   DoubleRegister input = ToDoubleRegister(instr->value());
   3693   Register result = ToRegister(instr->result());
   3694   DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
   3695   DoubleRegister input_plus_dot_five = double_scratch1;
   3696   Register scratch1 = scratch0();
   3697   Register scratch2 = ip;
   3698   DoubleRegister dot_five = double_scratch0();
   3699   Label convert, done;
   3700 
   3701   __ LoadDoubleLiteral(dot_five, 0.5, r0);
   3702   __ fabs(double_scratch1, input);
   3703   __ fcmpu(double_scratch1, dot_five);
   3704   DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN);
   3705   // If input is in [-0.5, -0], the result is -0.
   3706   // If input is in [+0, +0.5[, the result is +0.
   3707   // If the input is +0.5, the result is 1.
   3708   __ bgt(&convert);  // Out of [-0.5, +0.5].
   3709   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3710 #if V8_TARGET_ARCH_PPC64
   3711     __ MovDoubleToInt64(scratch1, input);
   3712 #else
   3713     __ MovDoubleHighToInt(scratch1, input);
   3714 #endif
   3715     __ cmpi(scratch1, Operand::Zero());
   3716     // [-0.5, -0].
   3717     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
   3718   }
   3719   __ fcmpu(input, dot_five);
   3720   if (CpuFeatures::IsSupported(ISELECT)) {
   3721     __ li(result, Operand(1));
   3722     __ isel(lt, result, r0, result);
   3723     __ b(&done);
   3724   } else {
   3725     Label return_zero;
   3726     __ bne(&return_zero);
   3727     __ li(result, Operand(1));  // +0.5.
   3728     __ b(&done);
   3729     // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
   3730     // flag kBailoutOnMinusZero.
   3731     __ bind(&return_zero);
   3732     __ li(result, Operand::Zero());
   3733     __ b(&done);
   3734   }
   3735 
   3736   __ bind(&convert);
   3737   __ fadd(input_plus_dot_five, input, dot_five);
   3738   // Reuse dot_five (double_scratch0) as we no longer need this value.
   3739   __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
   3740                    double_scratch0(), &done, &done);
   3741   DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
   3742   __ bind(&done);
   3743 }
   3744 
   3745 
   3746 void LCodeGen::DoMathFround(LMathFround* instr) {
   3747   DoubleRegister input_reg = ToDoubleRegister(instr->value());
   3748   DoubleRegister output_reg = ToDoubleRegister(instr->result());
   3749   __ frsp(output_reg, input_reg);
   3750 }
   3751 
   3752 
   3753 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   3754   DoubleRegister input = ToDoubleRegister(instr->value());
   3755   DoubleRegister result = ToDoubleRegister(instr->result());
   3756   __ fsqrt(result, input);
   3757 }
   3758 
   3759 
   3760 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   3761   DoubleRegister input = ToDoubleRegister(instr->value());
   3762   DoubleRegister result = ToDoubleRegister(instr->result());
   3763   DoubleRegister temp = double_scratch0();
   3764 
   3765   // Note that according to ECMA-262 15.8.2.13:
   3766   // Math.pow(-Infinity, 0.5) == Infinity
   3767   // Math.sqrt(-Infinity) == NaN
   3768   Label skip, done;
   3769 
   3770   __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
   3771   __ fcmpu(input, temp);
   3772   __ bne(&skip);
   3773   __ fneg(result, temp);
   3774   __ b(&done);
   3775 
   3776   // Add +0 to convert -0 to +0.
   3777   __ bind(&skip);
   3778   __ fadd(result, input, kDoubleRegZero);
   3779   __ fsqrt(result, result);
   3780   __ bind(&done);
   3781 }
   3782 
   3783 
   3784 void LCodeGen::DoPower(LPower* instr) {
   3785   Representation exponent_type = instr->hydrogen()->right()->representation();
   3786 // Having marked this as a call, we can use any registers.
   3787 // Just make sure that the input/output registers are the expected ones.
   3788   Register tagged_exponent = MathPowTaggedDescriptor::exponent();
   3789   DCHECK(!instr->right()->IsDoubleRegister() ||
   3790          ToDoubleRegister(instr->right()).is(d2));
   3791   DCHECK(!instr->right()->IsRegister() ||
   3792          ToRegister(instr->right()).is(tagged_exponent));
   3793   DCHECK(ToDoubleRegister(instr->left()).is(d1));
   3794   DCHECK(ToDoubleRegister(instr->result()).is(d3));
   3795 
   3796   if (exponent_type.IsSmi()) {
   3797     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3798     __ CallStub(&stub);
   3799   } else if (exponent_type.IsTagged()) {
   3800     Label no_deopt;
   3801     __ JumpIfSmi(tagged_exponent, &no_deopt);
   3802     DCHECK(!r10.is(tagged_exponent));
   3803     __ LoadP(r10, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
   3804     __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
   3805     __ cmp(r10, ip);
   3806     DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
   3807     __ bind(&no_deopt);
   3808     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3809     __ CallStub(&stub);
   3810   } else if (exponent_type.IsInteger32()) {
   3811     MathPowStub stub(isolate(), MathPowStub::INTEGER);
   3812     __ CallStub(&stub);
   3813   } else {
   3814     DCHECK(exponent_type.IsDouble());
   3815     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
   3816     __ CallStub(&stub);
   3817   }
   3818 }
   3819 
   3820 
   3821 void LCodeGen::DoMathExp(LMathExp* instr) {
   3822   DoubleRegister input = ToDoubleRegister(instr->value());
   3823   DoubleRegister result = ToDoubleRegister(instr->result());
   3824   DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
   3825   DoubleRegister double_scratch2 = double_scratch0();
   3826   Register temp1 = ToRegister(instr->temp1());
   3827   Register temp2 = ToRegister(instr->temp2());
   3828 
   3829   MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1,
   3830                                 double_scratch2, temp1, temp2, scratch0());
   3831 }
   3832 
   3833 
   3834 void LCodeGen::DoMathLog(LMathLog* instr) {
   3835   __ PrepareCallCFunction(0, 1, scratch0());
   3836   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
   3837   __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0,
   3838                    1);
   3839   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
   3840 }
   3841 
   3842 
   3843 void LCodeGen::DoMathClz32(LMathClz32* instr) {
   3844   Register input = ToRegister(instr->value());
   3845   Register result = ToRegister(instr->result());
   3846   __ cntlzw_(result, input);
   3847 }
   3848 
   3849 
   3850 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   3851   DCHECK(ToRegister(instr->context()).is(cp));
   3852   DCHECK(ToRegister(instr->function()).is(r4));
   3853   DCHECK(instr->HasPointerMap());
   3854 
   3855   Handle<JSFunction> known_function = instr->hydrogen()->known_function();
   3856   if (known_function.is_null()) {
   3857     LPointerMap* pointers = instr->pointer_map();
   3858     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3859     ParameterCount count(instr->arity());
   3860     __ InvokeFunction(r4, no_reg, count, CALL_FUNCTION, generator);
   3861   } else {
   3862     CallKnownFunction(known_function,
   3863                       instr->hydrogen()->formal_parameter_count(),
   3864                       instr->arity(), instr);
   3865   }
   3866 }
   3867 
   3868 
   3869 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   3870   DCHECK(ToRegister(instr->result()).is(r3));
   3871 
   3872   if (instr->hydrogen()->IsTailCall()) {
   3873     if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
   3874 
   3875     if (instr->target()->IsConstantOperand()) {
   3876       LConstantOperand* target = LConstantOperand::cast(instr->target());
   3877       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   3878       __ Jump(code, RelocInfo::CODE_TARGET);
   3879     } else {
   3880       DCHECK(instr->target()->IsRegister());
   3881       Register target = ToRegister(instr->target());
   3882       __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
   3883       __ JumpToJSEntry(ip);
   3884     }
   3885   } else {
   3886     LPointerMap* pointers = instr->pointer_map();
   3887     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3888 
   3889     if (instr->target()->IsConstantOperand()) {
   3890       LConstantOperand* target = LConstantOperand::cast(instr->target());
   3891       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   3892       generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
   3893       __ Call(code, RelocInfo::CODE_TARGET);
   3894     } else {
   3895       DCHECK(instr->target()->IsRegister());
   3896       Register target = ToRegister(instr->target());
   3897       generator.BeforeCall(__ CallSize(target));
   3898       __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
   3899       __ CallJSEntry(ip);
   3900     }
   3901     generator.AfterCall();
   3902   }
   3903 }
   3904 
   3905 
   3906 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
   3907   DCHECK(ToRegister(instr->function()).is(r4));
   3908   DCHECK(ToRegister(instr->result()).is(r3));
   3909 
   3910   // Change context.
   3911   __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
   3912 
   3913   // Always initialize new target and number of actual arguments.
   3914   __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
   3915   __ mov(r3, Operand(instr->arity()));
   3916 
   3917   bool is_self_call = false;
   3918   if (instr->hydrogen()->function()->IsConstant()) {
   3919     HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
   3920     Handle<JSFunction> jsfun =
   3921         Handle<JSFunction>::cast(fun_const->handle(isolate()));
   3922     is_self_call = jsfun.is_identical_to(info()->closure());
   3923   }
   3924 
   3925   if (is_self_call) {
   3926     __ CallSelf();
   3927   } else {
   3928     __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
   3929     __ CallJSEntry(ip);
   3930   }
   3931 
   3932   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   3933 }
   3934 
   3935 
   3936 void LCodeGen::DoCallFunction(LCallFunction* instr) {
   3937   DCHECK(ToRegister(instr->context()).is(cp));
   3938   DCHECK(ToRegister(instr->function()).is(r4));
   3939   DCHECK(ToRegister(instr->result()).is(r3));
   3940 
   3941   int arity = instr->arity();
   3942   ConvertReceiverMode mode = instr->hydrogen()->convert_mode();
   3943   if (instr->hydrogen()->HasVectorAndSlot()) {
   3944     Register slot_register = ToRegister(instr->temp_slot());
   3945     Register vector_register = ToRegister(instr->temp_vector());
   3946     DCHECK(slot_register.is(r6));
   3947     DCHECK(vector_register.is(r5));
   3948 
   3949     AllowDeferredHandleDereference vector_structure_check;
   3950     Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
   3951     int index = vector->GetIndex(instr->hydrogen()->slot());
   3952 
   3953     __ Move(vector_register, vector);
   3954     __ LoadSmiLiteral(slot_register, Smi::FromInt(index));
   3955 
   3956     Handle<Code> ic =
   3957         CodeFactory::CallICInOptimizedCode(isolate(), arity, mode).code();
   3958     CallCode(ic, RelocInfo::CODE_TARGET, instr);
   3959   } else {
   3960     __ mov(r3, Operand(arity));
   3961     CallCode(isolate()->builtins()->Call(mode), RelocInfo::CODE_TARGET, instr);
   3962   }
   3963 }
   3964 
   3965 
   3966 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   3967   DCHECK(ToRegister(instr->context()).is(cp));
   3968   DCHECK(ToRegister(instr->constructor()).is(r4));
   3969   DCHECK(ToRegister(instr->result()).is(r3));
   3970 
   3971   __ mov(r3, Operand(instr->arity()));
   3972   if (instr->arity() == 1) {
   3973     // We only need the allocation site for the case we have a length argument.
   3974     // The case may bail out to the runtime, which will determine the correct
   3975     // elements kind with the site.
   3976     __ Move(r5, instr->hydrogen()->site());
   3977   } else {
   3978     __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
   3979   }
   3980   ElementsKind kind = instr->hydrogen()->elements_kind();
   3981   AllocationSiteOverrideMode override_mode =
   3982       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
   3983           ? DISABLE_ALLOCATION_SITES
   3984           : DONT_OVERRIDE;
   3985 
   3986   if (instr->arity() == 0) {
   3987     ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
   3988     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3989   } else if (instr->arity() == 1) {
   3990     Label done;
   3991     if (IsFastPackedElementsKind(kind)) {
   3992       Label packed_case;
   3993       // We might need a change here
   3994       // look at the first argument
   3995       __ LoadP(r8, MemOperand(sp, 0));
   3996       __ cmpi(r8, Operand::Zero());
   3997       __ beq(&packed_case);
   3998 
   3999       ElementsKind holey_kind = GetHoleyElementsKind(kind);
   4000       ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
   4001                                               override_mode);
   4002       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   4003       __ b(&done);
   4004       __ bind(&packed_case);
   4005     }
   4006 
   4007     ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
   4008     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   4009     __ bind(&done);
   4010   } else {
   4011     ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
   4012     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   4013   }
   4014 }
   4015 
   4016 
   4017 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
   4018   CallRuntime(instr->function(), instr->arity(), instr);
   4019 }
   4020 
   4021 
   4022 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
   4023   Register function = ToRegister(instr->function());
   4024   Register code_object = ToRegister(instr->code_object());
   4025   __ addi(code_object, code_object,
   4026           Operand(Code::kHeaderSize - kHeapObjectTag));
   4027   __ StoreP(code_object,
   4028             FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
   4029 }
   4030 
   4031 
   4032 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
   4033   Register result = ToRegister(instr->result());
   4034   Register base = ToRegister(instr->base_object());
   4035   if (instr->offset()->IsConstantOperand()) {
   4036     LConstantOperand* offset = LConstantOperand::cast(instr->offset());
   4037     __ Add(result, base, ToInteger32(offset), r0);
   4038   } else {
   4039     Register offset = ToRegister(instr->offset());
   4040     __ add(result, base, offset);
   4041   }
   4042 }
   4043 
   4044 
   4045 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
   4046   HStoreNamedField* hinstr = instr->hydrogen();
   4047   Representation representation = instr->representation();
   4048 
   4049   Register object = ToRegister(instr->object());
   4050   Register scratch = scratch0();
   4051   HObjectAccess access = hinstr->access();
   4052   int offset = access.offset();
   4053 
   4054   if (access.IsExternalMemory()) {
   4055     Register value = ToRegister(instr->value());
   4056     MemOperand operand = MemOperand(object, offset);
   4057     __ StoreRepresentation(value, operand, representation, r0);
   4058     return;
   4059   }
   4060 
   4061   __ AssertNotSmi(object);
   4062 
   4063 #if V8_TARGET_ARCH_PPC64
   4064   DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
   4065          IsInteger32(LConstantOperand::cast(instr->value())));
   4066 #else
   4067   DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
   4068          IsSmi(LConstantOperand::cast(instr->value())));
   4069 #endif
   4070   if (!FLAG_unbox_double_fields && representation.IsDouble()) {
   4071     DCHECK(access.IsInobject());
   4072     DCHECK(!hinstr->has_transition());
   4073     DCHECK(!hinstr->NeedsWriteBarrier());
   4074     DoubleRegister value = ToDoubleRegister(instr->value());
   4075     __ stfd(value, FieldMemOperand(object, offset));
   4076     return;
   4077   }
   4078 
   4079   if (hinstr->has_transition()) {
   4080     Handle<Map> transition = hinstr->transition_map();
   4081     AddDeprecationDependency(transition);
   4082     __ mov(scratch, Operand(transition));
   4083     __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
   4084     if (hinstr->NeedsWriteBarrierForMap()) {
   4085       Register temp = ToRegister(instr->temp());
   4086       // Update the write barrier for the map field.
   4087       __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
   4088                            kSaveFPRegs);
   4089     }
   4090   }
   4091 
   4092   // Do the store.
   4093   Register record_dest = object;
   4094   Register record_value = no_reg;
   4095   Register record_scratch = scratch;
   4096 #if V8_TARGET_ARCH_PPC64
   4097   if (FLAG_unbox_double_fields && representation.IsDouble()) {
   4098     DCHECK(access.IsInobject());
   4099     DoubleRegister value = ToDoubleRegister(instr->value());
   4100     __ stfd(value, FieldMemOperand(object, offset));
   4101     if (hinstr->NeedsWriteBarrier()) {
   4102       record_value = ToRegister(instr->value());
   4103     }
   4104   } else {
   4105     if (representation.IsSmi() &&
   4106         hinstr->value()->representation().IsInteger32()) {
   4107       DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
   4108       // 64-bit Smi optimization
   4109       // Store int value directly to upper half of the smi.
   4110       offset = SmiWordOffset(offset);
   4111       representation = Representation::Integer32();
   4112     }
   4113 #endif
   4114     if (access.IsInobject()) {
   4115       Register value = ToRegister(instr->value());
   4116       MemOperand operand = FieldMemOperand(object, offset);
   4117       __ StoreRepresentation(value, operand, representation, r0);
   4118       record_value = value;
   4119     } else {
   4120       Register value = ToRegister(instr->value());
   4121       __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
   4122       MemOperand operand = FieldMemOperand(scratch, offset);
   4123       __ StoreRepresentation(value, operand, representation, r0);
   4124       record_dest = scratch;
   4125       record_value = value;
   4126       record_scratch = object;
   4127     }
   4128 #if V8_TARGET_ARCH_PPC64
   4129   }
   4130 #endif
   4131 
   4132   if (hinstr->NeedsWriteBarrier()) {
   4133     __ RecordWriteField(record_dest, offset, record_value, record_scratch,
   4134                         GetLinkRegisterState(), kSaveFPRegs,
   4135                         EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
   4136                         hinstr->PointersToHereCheckForValue());
   4137   }
   4138 }
   4139 
   4140 
   4141 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
   4142   DCHECK(ToRegister(instr->context()).is(cp));
   4143   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
   4144   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
   4145 
   4146   if (instr->hydrogen()->HasVectorAndSlot()) {
   4147     EmitVectorStoreICRegisters<LStoreNamedGeneric>(instr);
   4148   }
   4149 
   4150   __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
   4151   Handle<Code> ic = CodeFactory::StoreICInOptimizedCode(
   4152                         isolate(), instr->language_mode(),
   4153                         instr->hydrogen()->initialization_state()).code();
   4154   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   4155 }
   4156 
   4157 
   4158 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   4159   Representation representation = instr->hydrogen()->length()->representation();
   4160   DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
   4161   DCHECK(representation.IsSmiOrInteger32());
   4162 
   4163   Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
   4164   if (instr->length()->IsConstantOperand()) {
   4165     int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
   4166     Register index = ToRegister(instr->index());
   4167     if (representation.IsSmi()) {
   4168       __ Cmpli(index, Operand(Smi::FromInt(length)), r0);
   4169     } else {
   4170       __ Cmplwi(index, Operand(length), r0);
   4171     }
   4172     cc = CommuteCondition(cc);
   4173   } else if (instr->index()->IsConstantOperand()) {
   4174     int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
   4175     Register length = ToRegister(instr->length());
   4176     if (representation.IsSmi()) {
   4177       __ Cmpli(length, Operand(Smi::FromInt(index)), r0);
   4178     } else {
   4179       __ Cmplwi(length, Operand(index), r0);
   4180     }
   4181   } else {
   4182     Register index = ToRegister(instr->index());
   4183     Register length = ToRegister(instr->length());
   4184     if (representation.IsSmi()) {
   4185       __ cmpl(length, index);
   4186     } else {
   4187       __ cmplw(length, index);
   4188     }
   4189   }
   4190   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
   4191     Label done;
   4192     __ b(NegateCondition(cc), &done);
   4193     __ stop("eliminated bounds check failed");
   4194     __ bind(&done);
   4195   } else {
   4196     DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
   4197   }
   4198 }
   4199 
   4200 
   4201 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
   4202   Register external_pointer = ToRegister(instr->elements());
   4203   Register key = no_reg;
   4204   ElementsKind elements_kind = instr->elements_kind();
   4205   bool key_is_constant = instr->key()->IsConstantOperand();
   4206   int constant_key = 0;
   4207   if (key_is_constant) {
   4208     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   4209     if (constant_key & 0xF0000000) {
   4210       Abort(kArrayIndexConstantValueTooBig);
   4211     }
   4212   } else {
   4213     key = ToRegister(instr->key());
   4214   }
   4215   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   4216   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
   4217   int base_offset = instr->base_offset();
   4218 
   4219   if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
   4220     Register address = scratch0();
   4221     DoubleRegister value(ToDoubleRegister(instr->value()));
   4222     if (key_is_constant) {
   4223       if (constant_key != 0) {
   4224         __ Add(address, external_pointer, constant_key << element_size_shift,
   4225                r0);
   4226       } else {
   4227         address = external_pointer;
   4228       }
   4229     } else {
   4230       __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
   4231       __ add(address, external_pointer, r0);
   4232     }
   4233     if (elements_kind == FLOAT32_ELEMENTS) {
   4234       __ frsp(double_scratch0(), value);
   4235       __ stfs(double_scratch0(), MemOperand(address, base_offset));
   4236     } else {  // Storing doubles, not floats.
   4237       __ stfd(value, MemOperand(address, base_offset));
   4238     }
   4239   } else {
   4240     Register value(ToRegister(instr->value()));
   4241     MemOperand mem_operand =
   4242         PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
   4243                             constant_key, element_size_shift, base_offset);
   4244     switch (elements_kind) {
   4245       case UINT8_ELEMENTS:
   4246       case UINT8_CLAMPED_ELEMENTS:
   4247       case INT8_ELEMENTS:
   4248         if (key_is_constant) {
   4249           __ StoreByte(value, mem_operand, r0);
   4250         } else {
   4251           __ stbx(value, mem_operand);
   4252         }
   4253         break;
   4254       case INT16_ELEMENTS:
   4255       case UINT16_ELEMENTS:
   4256         if (key_is_constant) {
   4257           __ StoreHalfWord(value, mem_operand, r0);
   4258         } else {
   4259           __ sthx(value, mem_operand);
   4260         }
   4261         break;
   4262       case INT32_ELEMENTS:
   4263       case UINT32_ELEMENTS:
   4264         if (key_is_constant) {
   4265           __ StoreWord(value, mem_operand, r0);
   4266         } else {
   4267           __ stwx(value, mem_operand);
   4268         }
   4269         break;
   4270       case FLOAT32_ELEMENTS:
   4271       case FLOAT64_ELEMENTS:
   4272       case FAST_DOUBLE_ELEMENTS:
   4273       case FAST_ELEMENTS:
   4274       case FAST_SMI_ELEMENTS:
   4275       case FAST_HOLEY_DOUBLE_ELEMENTS:
   4276       case FAST_HOLEY_ELEMENTS:
   4277       case FAST_HOLEY_SMI_ELEMENTS:
   4278       case DICTIONARY_ELEMENTS:
   4279       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
   4280       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
   4281         UNREACHABLE();
   4282         break;
   4283     }
   4284   }
   4285 }
   4286 
   4287 
   4288 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
   4289   DoubleRegister value = ToDoubleRegister(instr->value());
   4290   Register elements = ToRegister(instr->elements());
   4291   Register key = no_reg;
   4292   Register scratch = scratch0();
   4293   DoubleRegister double_scratch = double_scratch0();
   4294   bool key_is_constant = instr->key()->IsConstantOperand();
   4295   int constant_key = 0;
   4296 
   4297   // Calculate the effective address of the slot in the array to store the
   4298   // double value.
   4299   if (key_is_constant) {
   4300     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   4301     if (constant_key & 0xF0000000) {
   4302       Abort(kArrayIndexConstantValueTooBig);
   4303     }
   4304   } else {
   4305     key = ToRegister(instr->key());
   4306   }
   4307   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   4308   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
   4309   int base_offset = instr->base_offset() + constant_key * kDoubleSize;
   4310   if (!key_is_constant) {
   4311     __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
   4312     __ add(scratch, elements, scratch);
   4313     elements = scratch;
   4314   }
   4315   if (!is_int16(base_offset)) {
   4316     __ Add(scratch, elements, base_offset, r0);
   4317     base_offset = 0;
   4318     elements = scratch;
   4319   }
   4320 
   4321   if (instr->NeedsCanonicalization()) {
   4322     // Turn potential sNaN value into qNaN.
   4323     __ CanonicalizeNaN(double_scratch, value);
   4324     __ stfd(double_scratch, MemOperand(elements, base_offset));
   4325   } else {
   4326     __ stfd(value, MemOperand(elements, base_offset));
   4327   }
   4328 }
   4329 
   4330 
   4331 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
   4332   HStoreKeyed* hinstr = instr->hydrogen();
   4333   Register value = ToRegister(instr->value());
   4334   Register elements = ToRegister(instr->elements());
   4335   Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
   4336   Register scratch = scratch0();
   4337   Register store_base = scratch;
   4338   int offset = instr->base_offset();
   4339 
   4340   // Do the store.
   4341   if (instr->key()->IsConstantOperand()) {
   4342     DCHECK(!hinstr->NeedsWriteBarrier());
   4343     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   4344     offset += ToInteger32(const_operand) * kPointerSize;
   4345     store_base = elements;
   4346   } else {
   4347     // Even though the HLoadKeyed instruction forces the input
   4348     // representation for the key to be an integer, the input gets replaced
   4349     // during bound check elimination with the index argument to the bounds
   4350     // check, which can be tagged, so that case must be handled here, too.
   4351     if (hinstr->key()->representation().IsSmi()) {
   4352       __ SmiToPtrArrayOffset(scratch, key);
   4353     } else {
   4354       __ ShiftLeftImm(scratch, key, Operand(kPointerSizeLog2));
   4355     }
   4356     __ add(scratch, elements, scratch);
   4357   }
   4358 
   4359   Representation representation = hinstr->value()->representation();
   4360 
   4361 #if V8_TARGET_ARCH_PPC64
   4362   // 64-bit Smi optimization
   4363   if (representation.IsInteger32()) {
   4364     DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
   4365     DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
   4366     // Store int value directly to upper half of the smi.
   4367     offset = SmiWordOffset(offset);
   4368   }
   4369 #endif
   4370 
   4371   __ StoreRepresentation(value, MemOperand(store_base, offset), representation,
   4372                          r0);
   4373 
   4374   if (hinstr->NeedsWriteBarrier()) {
   4375     SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
   4376                                 ? OMIT_SMI_CHECK
   4377                                 : INLINE_SMI_CHECK;
   4378     // Compute address of modified element and store it into key register.
   4379     __ Add(key, store_base, offset, r0);
   4380     __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
   4381                    EMIT_REMEMBERED_SET, check_needed,
   4382                    hinstr->PointersToHereCheckForValue());
   4383   }
   4384 }
   4385 
   4386 
   4387 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
   4388   // By cases: external, fast double
   4389   if (instr->is_fixed_typed_array()) {
   4390     DoStoreKeyedExternalArray(instr);
   4391   } else if (instr->hydrogen()->value()->representation().IsDouble()) {
   4392     DoStoreKeyedFixedDoubleArray(instr);
   4393   } else {
   4394     DoStoreKeyedFixedArray(instr);
   4395   }
   4396 }
   4397 
   4398 
   4399 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
   4400   DCHECK(ToRegister(instr->context()).is(cp));
   4401   DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
   4402   DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
   4403   DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
   4404 
   4405   if (instr->hydrogen()->HasVectorAndSlot()) {
   4406     EmitVectorStoreICRegisters<LStoreKeyedGeneric>(instr);
   4407   }
   4408 
   4409   Handle<Code> ic = CodeFactory::KeyedStoreICInOptimizedCode(
   4410                         isolate(), instr->language_mode(),
   4411                         instr->hydrogen()->initialization_state()).code();
   4412   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   4413 }
   4414 
   4415 
   4416 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
   4417   class DeferredMaybeGrowElements final : public LDeferredCode {
   4418    public:
   4419     DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
   4420         : LDeferredCode(codegen), instr_(instr) {}
   4421     void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
   4422     LInstruction* instr() override { return instr_; }
   4423 
   4424    private:
   4425     LMaybeGrowElements* instr_;
   4426   };
   4427 
   4428   Register result = r3;
   4429   DeferredMaybeGrowElements* deferred =
   4430       new (zone()) DeferredMaybeGrowElements(this, instr);
   4431   LOperand* key = instr->key();
   4432   LOperand* current_capacity = instr->current_capacity();
   4433 
   4434   DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
   4435   DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
   4436   DCHECK(key->IsConstantOperand() || key->IsRegister());
   4437   DCHECK(current_capacity->IsConstantOperand() ||
   4438          current_capacity->IsRegister());
   4439 
   4440   if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
   4441     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
   4442     int32_t constant_capacity =
   4443         ToInteger32(LConstantOperand::cast(current_capacity));
   4444     if (constant_key >= constant_capacity) {
   4445       // Deferred case.
   4446       __ b(deferred->entry());
   4447     }
   4448   } else if (key->IsConstantOperand()) {
   4449     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
   4450     __ Cmpwi(ToRegister(current_capacity), Operand(constant_key), r0);
   4451     __ ble(deferred->entry());
   4452   } else if (current_capacity->IsConstantOperand()) {
   4453     int32_t constant_capacity =
   4454         ToInteger32(LConstantOperand::cast(current_capacity));
   4455     __ Cmpwi(ToRegister(key), Operand(constant_capacity), r0);
   4456     __ bge(deferred->entry());
   4457   } else {
   4458     __ cmpw(ToRegister(key), ToRegister(current_capacity));
   4459     __ bge(deferred->entry());
   4460   }
   4461 
   4462   if (instr->elements()->IsRegister()) {
   4463     __ Move(result, ToRegister(instr->elements()));
   4464   } else {
   4465     __ LoadP(result, ToMemOperand(instr->elements()));
   4466   }
   4467 
   4468   __ bind(deferred->exit());
   4469 }
   4470 
   4471 
   4472 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
   4473   // TODO(3095996): Get rid of this. For now, we need to make the
   4474   // result register contain a valid pointer because it is already
   4475   // contained in the register pointer map.
   4476   Register result = r3;
   4477   __ li(result, Operand::Zero());
   4478 
   4479   // We have to call a stub.
   4480   {
   4481     PushSafepointRegistersScope scope(this);
   4482     if (instr->object()->IsRegister()) {
   4483       __ Move(result, ToRegister(instr->object()));
   4484     } else {
   4485       __ LoadP(result, ToMemOperand(instr->object()));
   4486     }
   4487 
   4488     LOperand* key = instr->key();
   4489     if (key->IsConstantOperand()) {
   4490       __ LoadSmiLiteral(r6, ToSmi(LConstantOperand::cast(key)));
   4491     } else {
   4492       __ SmiTag(r6, ToRegister(key));
   4493     }
   4494 
   4495     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->is_js_array(),
   4496                                instr->hydrogen()->kind());
   4497     __ CallStub(&stub);
   4498     RecordSafepointWithLazyDeopt(
   4499         instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   4500     __ StoreToSafepointRegisterSlot(result, result);
   4501   }
   4502 
   4503   // Deopt on smi, which means the elements array changed to dictionary mode.
   4504   __ TestIfSmi(result, r0);
   4505   DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
   4506 }
   4507 
   4508 
   4509 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
   4510   Register object_reg = ToRegister(instr->object());
   4511   Register scratch = scratch0();
   4512 
   4513   Handle<Map> from_map = instr->original_map();
   4514   Handle<Map> to_map = instr->transitioned_map();
   4515   ElementsKind from_kind = instr->from_kind();
   4516   ElementsKind to_kind = instr->to_kind();
   4517 
   4518   Label not_applicable;
   4519   __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
   4520   __ Cmpi(scratch, Operand(from_map), r0);
   4521   __ bne(&not_applicable);
   4522 
   4523   if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
   4524     Register new_map_reg = ToRegister(instr->new_map_temp());
   4525     __ mov(new_map_reg, Operand(to_map));
   4526     __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset),
   4527               r0);
   4528     // Write barrier.
   4529     __ RecordWriteForMap(object_reg, new_map_reg, scratch,
   4530                          GetLinkRegisterState(), kDontSaveFPRegs);
   4531   } else {
   4532     DCHECK(ToRegister(instr->context()).is(cp));
   4533     DCHECK(object_reg.is(r3));
   4534     PushSafepointRegistersScope scope(this);
   4535     __ Move(r4, to_map);
   4536     bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
   4537     TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
   4538     __ CallStub(&stub);
   4539     RecordSafepointWithRegisters(instr->pointer_map(), 0,
   4540                                  Safepoint::kLazyDeopt);
   4541   }
   4542   __ bind(&not_applicable);
   4543 }
   4544 
   4545 
   4546 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
   4547   Register object = ToRegister(instr->object());
   4548   Register temp = ToRegister(instr->temp());
   4549   Label no_memento_found;
   4550   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
   4551   DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
   4552   __ bind(&no_memento_found);
   4553 }
   4554 
   4555 
   4556 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   4557   DCHECK(ToRegister(instr->context()).is(cp));
   4558   DCHECK(ToRegister(instr->left()).is(r4));
   4559   DCHECK(ToRegister(instr->right()).is(r3));
   4560   StringAddStub stub(isolate(), instr->hydrogen()->flags(),
   4561                      instr->hydrogen()->pretenure_flag());
   4562   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   4563 }
   4564 
   4565 
   4566 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   4567   class DeferredStringCharCodeAt final : public LDeferredCode {
   4568    public:
   4569     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
   4570         : LDeferredCode(codegen), instr_(instr) {}
   4571     void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
   4572     LInstruction* instr() override { return instr_; }
   4573 
   4574    private:
   4575     LStringCharCodeAt* instr_;
   4576   };
   4577 
   4578   DeferredStringCharCodeAt* deferred =
   4579       new (zone()) DeferredStringCharCodeAt(this, instr);
   4580 
   4581   StringCharLoadGenerator::Generate(
   4582       masm(), ToRegister(instr->string()), ToRegister(instr->index()),
   4583       ToRegister(instr->result()), deferred->entry());
   4584   __ bind(deferred->exit());
   4585 }
   4586 
   4587 
   4588 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
   4589   Register string = ToRegister(instr->string());
   4590   Register result = ToRegister(instr->result());
   4591   Register scratch = scratch0();
   4592 
   4593   // TODO(3095996): Get rid of this. For now, we need to make the
   4594   // result register contain a valid pointer because it is already
   4595   // contained in the register pointer map.
   4596   __ li(result, Operand::Zero());
   4597 
   4598   PushSafepointRegistersScope scope(this);
   4599   __ push(string);
   4600   // Push the index as a smi. This is safe because of the checks in
   4601   // DoStringCharCodeAt above.
   4602   if (instr->index()->IsConstantOperand()) {
   4603     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   4604     __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
   4605     __ push(scratch);
   4606   } else {
   4607     Register index = ToRegister(instr->index());
   4608     __ SmiTag(index);
   4609     __ push(index);
   4610   }
   4611   CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
   4612                           instr->context());
   4613   __ AssertSmi(r3);
   4614   __ SmiUntag(r3);
   4615   __ StoreToSafepointRegisterSlot(r3, result);
   4616 }
   4617 
   4618 
   4619 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
   4620   class DeferredStringCharFromCode final : public LDeferredCode {
   4621    public:
   4622     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
   4623         : LDeferredCode(codegen), instr_(instr) {}
   4624     void Generate() override {
   4625       codegen()->DoDeferredStringCharFromCode(instr_);
   4626     }
   4627     LInstruction* instr() override { return instr_; }
   4628 
   4629    private:
   4630     LStringCharFromCode* instr_;
   4631   };
   4632 
   4633   DeferredStringCharFromCode* deferred =
   4634       new (zone()) DeferredStringCharFromCode(this, instr);
   4635 
   4636   DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
   4637   Register char_code = ToRegister(instr->char_code());
   4638   Register result = ToRegister(instr->result());
   4639   DCHECK(!char_code.is(result));
   4640 
   4641   __ cmpli(char_code, Operand(String::kMaxOneByteCharCode));
   4642   __ bgt(deferred->entry());
   4643   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
   4644   __ ShiftLeftImm(r0, char_code, Operand(kPointerSizeLog2));
   4645   __ add(result, result, r0);
   4646   __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
   4647   __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
   4648   __ cmp(result, ip);
   4649   __ beq(deferred->entry());
   4650   __ bind(deferred->exit());
   4651 }
   4652 
   4653 
   4654 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
   4655   Register char_code = ToRegister(instr->char_code());
   4656   Register result = ToRegister(instr->result());
   4657 
   4658   // TODO(3095996): Get rid of this. For now, we need to make the
   4659   // result register contain a valid pointer because it is already
   4660   // contained in the register pointer map.
   4661   __ li(result, Operand::Zero());
   4662 
   4663   PushSafepointRegistersScope scope(this);
   4664   __ SmiTag(char_code);
   4665   __ push(char_code);
   4666   CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
   4667                           instr->context());
   4668   __ StoreToSafepointRegisterSlot(r3, result);
   4669 }
   4670 
   4671 
   4672 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   4673   LOperand* input = instr->value();
   4674   DCHECK(input->IsRegister() || input->IsStackSlot());
   4675   LOperand* output = instr->result();
   4676   DCHECK(output->IsDoubleRegister());
   4677   if (input->IsStackSlot()) {
   4678     Register scratch = scratch0();
   4679     __ LoadP(scratch, ToMemOperand(input));
   4680     __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
   4681   } else {
   4682     __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
   4683   }
   4684 }
   4685 
   4686 
   4687 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
   4688   LOperand* input = instr->value();
   4689   LOperand* output = instr->result();
   4690   __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
   4691 }
   4692 
   4693 
   4694 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
   4695   class DeferredNumberTagI final : public LDeferredCode {
   4696    public:
   4697     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
   4698         : LDeferredCode(codegen), instr_(instr) {}
   4699     void Generate() override {
   4700       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
   4701                                        instr_->temp2(), SIGNED_INT32);
   4702     }
   4703     LInstruction* instr() override { return instr_; }
   4704 
   4705    private:
   4706     LNumberTagI* instr_;
   4707   };
   4708 
   4709   Register src = ToRegister(instr->value());
   4710   Register dst = ToRegister(instr->result());
   4711 
   4712   DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
   4713 #if V8_TARGET_ARCH_PPC64
   4714   __ SmiTag(dst, src);
   4715 #else
   4716   __ SmiTagCheckOverflow(dst, src, r0);
   4717   __ BranchOnOverflow(deferred->entry());
   4718 #endif
   4719   __ bind(deferred->exit());
   4720 }
   4721 
   4722 
   4723 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
   4724   class DeferredNumberTagU final : public LDeferredCode {
   4725    public:
   4726     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
   4727         : LDeferredCode(codegen), instr_(instr) {}
   4728     void Generate() override {
   4729       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
   4730                                        instr_->temp2(), UNSIGNED_INT32);
   4731     }
   4732     LInstruction* instr() override { return instr_; }
   4733 
   4734    private:
   4735     LNumberTagU* instr_;
   4736   };
   4737 
   4738   Register input = ToRegister(instr->value());
   4739   Register result = ToRegister(instr->result());
   4740 
   4741   DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
   4742   __ Cmpli(input, Operand(Smi::kMaxValue), r0);
   4743   __ bgt(deferred->entry());
   4744   __ SmiTag(result, input);
   4745   __ bind(deferred->exit());
   4746 }
   4747 
   4748 
   4749 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
   4750                                      LOperand* temp1, LOperand* temp2,
   4751                                      IntegerSignedness signedness) {
   4752   Label done, slow;
   4753   Register src = ToRegister(value);
   4754   Register dst = ToRegister(instr->result());
   4755   Register tmp1 = scratch0();
   4756   Register tmp2 = ToRegister(temp1);
   4757   Register tmp3 = ToRegister(temp2);
   4758   DoubleRegister dbl_scratch = double_scratch0();
   4759 
   4760   if (signedness == SIGNED_INT32) {
   4761     // There was overflow, so bits 30 and 31 of the original integer
   4762     // disagree. Try to allocate a heap number in new space and store
   4763     // the value in there. If that fails, call the runtime system.
   4764     if (dst.is(src)) {
   4765       __ SmiUntag(src, dst);
   4766       __ xoris(src, src, Operand(HeapNumber::kSignMask >> 16));
   4767     }
   4768     __ ConvertIntToDouble(src, dbl_scratch);
   4769   } else {
   4770     __ ConvertUnsignedIntToDouble(src, dbl_scratch);
   4771   }
   4772 
   4773   if (FLAG_inline_new) {
   4774     __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
   4775     __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
   4776     __ b(&done);
   4777   }
   4778 
   4779   // Slow case: Call the runtime system to do the number allocation.
   4780   __ bind(&slow);
   4781   {
   4782     // TODO(3095996): Put a valid pointer value in the stack slot where the
   4783     // result register is stored, as this register is in the pointer map, but
   4784     // contains an integer value.
   4785     __ li(dst, Operand::Zero());
   4786 
   4787     // Preserve the value of all registers.
   4788     PushSafepointRegistersScope scope(this);
   4789 
   4790     // NumberTagI and NumberTagD use the context from the frame, rather than
   4791     // the environment's HContext or HInlinedContext value.
   4792     // They only call Runtime::kAllocateHeapNumber.
   4793     // The corresponding HChange instructions are added in a phase that does
   4794     // not have easy access to the local context.
   4795     __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   4796     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4797     RecordSafepointWithRegisters(instr->pointer_map(), 0,
   4798                                  Safepoint::kNoLazyDeopt);
   4799     __ StoreToSafepointRegisterSlot(r3, dst);
   4800   }
   4801 
   4802   // Done. Put the value in dbl_scratch into the value of the allocated heap
   4803   // number.
   4804   __ bind(&done);
   4805   __ stfd(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
   4806 }
   4807 
   4808 
   4809 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
   4810   class DeferredNumberTagD final : public LDeferredCode {
   4811    public:
   4812     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
   4813         : LDeferredCode(codegen), instr_(instr) {}
   4814     void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
   4815     LInstruction* instr() override { return instr_; }
   4816 
   4817    private:
   4818     LNumberTagD* instr_;
   4819   };
   4820 
   4821   DoubleRegister input_reg = ToDoubleRegister(instr->value());
   4822   Register scratch = scratch0();
   4823   Register reg = ToRegister(instr->result());
   4824   Register temp1 = ToRegister(instr->temp());
   4825   Register temp2 = ToRegister(instr->temp2());
   4826 
   4827   DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
   4828   if (FLAG_inline_new) {
   4829     __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
   4830     __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
   4831   } else {
   4832     __ b(deferred->entry());
   4833   }
   4834   __ bind(deferred->exit());
   4835   __ stfd(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
   4836 }
   4837 
   4838 
   4839 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
   4840   // TODO(3095996): Get rid of this. For now, we need to make the
   4841   // result register contain a valid pointer because it is already
   4842   // contained in the register pointer map.
   4843   Register reg = ToRegister(instr->result());
   4844   __ li(reg, Operand::Zero());
   4845 
   4846   PushSafepointRegistersScope scope(this);
   4847   // NumberTagI and NumberTagD use the context from the frame, rather than
   4848   // the environment's HContext or HInlinedContext value.
   4849   // They only call Runtime::kAllocateHeapNumber.
   4850   // The corresponding HChange instructions are added in a phase that does
   4851   // not have easy access to the local context.
   4852   __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   4853   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4854   RecordSafepointWithRegisters(instr->pointer_map(), 0,
   4855                                Safepoint::kNoLazyDeopt);
   4856   __ StoreToSafepointRegisterSlot(r3, reg);
   4857 }
   4858 
   4859 
   4860 void LCodeGen::DoSmiTag(LSmiTag* instr) {
   4861   HChange* hchange = instr->hydrogen();
   4862   Register input = ToRegister(instr->value());
   4863   Register output = ToRegister(instr->result());
   4864   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4865       hchange->value()->CheckFlag(HValue::kUint32)) {
   4866     __ TestUnsignedSmiCandidate(input, r0);
   4867     DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0);
   4868   }
   4869 #if !V8_TARGET_ARCH_PPC64
   4870   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4871       !hchange->value()->CheckFlag(HValue::kUint32)) {
   4872     __ SmiTagCheckOverflow(output, input, r0);
   4873     DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
   4874   } else {
   4875 #endif
   4876     __ SmiTag(output, input);
   4877 #if !V8_TARGET_ARCH_PPC64
   4878   }
   4879 #endif
   4880 }
   4881 
   4882 
   4883 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
   4884   Register scratch = scratch0();
   4885   Register input = ToRegister(instr->value());
   4886   Register result = ToRegister(instr->result());
   4887   if (instr->needs_check()) {
   4888     // If the input is a HeapObject, value of scratch won't be zero.
   4889     __ andi(scratch, input, Operand(kHeapObjectTag));
   4890     __ SmiUntag(result, input);
   4891     DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
   4892   } else {
   4893     __ SmiUntag(result, input);
   4894   }
   4895 }
   4896 
   4897 
   4898 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
   4899                                 DoubleRegister result_reg,
   4900                                 NumberUntagDMode mode) {
   4901   bool can_convert_undefined_to_nan =
   4902       instr->hydrogen()->can_convert_undefined_to_nan();
   4903   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
   4904 
   4905   Register scratch = scratch0();
   4906   DCHECK(!result_reg.is(double_scratch0()));
   4907 
   4908   Label convert, load_smi, done;
   4909 
   4910   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
   4911     // Smi check.
   4912     __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
   4913 
   4914     // Heap number map check.
   4915     __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   4916     __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
   4917     __ cmp(scratch, ip);
   4918     if (can_convert_undefined_to_nan) {
   4919       __ bne(&convert);
   4920     } else {
   4921       DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
   4922     }
   4923     // load heap number
   4924     __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   4925     if (deoptimize_on_minus_zero) {
   4926 #if V8_TARGET_ARCH_PPC64
   4927       __ MovDoubleToInt64(scratch, result_reg);
   4928       // rotate left by one for simple compare.
   4929       __ rldicl(scratch, scratch, 1, 0);
   4930       __ cmpi(scratch, Operand(1));
   4931 #else
   4932       __ MovDoubleToInt64(scratch, ip, result_reg);
   4933       __ cmpi(ip, Operand::Zero());
   4934       __ bne(&done);
   4935       __ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0);
   4936 #endif
   4937       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
   4938     }
   4939     __ b(&done);
   4940     if (can_convert_undefined_to_nan) {
   4941       __ bind(&convert);
   4942       // Convert undefined (and hole) to NaN.
   4943       __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
   4944       __ cmp(input_reg, ip);
   4945       DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
   4946       __ LoadRoot(scratch, Heap::kNanValueRootIndex);
   4947       __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
   4948       __ b(&done);
   4949     }
   4950   } else {
   4951     __ SmiUntag(scratch, input_reg);
   4952     DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
   4953   }
   4954   // Smi to double register conversion
   4955   __ bind(&load_smi);
   4956   // scratch: untagged value of input_reg
   4957   __ ConvertIntToDouble(scratch, result_reg);
   4958   __ bind(&done);
   4959 }
   4960 
   4961 
   4962 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   4963   Register input_reg = ToRegister(instr->value());
   4964   Register scratch1 = scratch0();
   4965   Register scratch2 = ToRegister(instr->temp());
   4966   DoubleRegister double_scratch = double_scratch0();
   4967   DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
   4968 
   4969   DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
   4970   DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
   4971 
   4972   Label done;
   4973 
   4974   // Heap number map check.
   4975   __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   4976   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
   4977   __ cmp(scratch1, ip);
   4978 
   4979   if (instr->truncating()) {
   4980     // Performs a truncating conversion of a floating point number as used by
   4981     // the JS bitwise operations.
   4982     Label no_heap_number, check_bools, check_false;
   4983     __ bne(&no_heap_number);
   4984     __ mr(scratch2, input_reg);
   4985     __ TruncateHeapNumberToI(input_reg, scratch2);
   4986     __ b(&done);
   4987 
   4988     // Check for Oddballs. Undefined/False is converted to zero and True to one
   4989     // for truncating conversions.
   4990     __ bind(&no_heap_number);
   4991     __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
   4992     __ cmp(input_reg, ip);
   4993     __ bne(&check_bools);
   4994     __ li(input_reg, Operand::Zero());
   4995     __ b(&done);
   4996 
   4997     __ bind(&check_bools);
   4998     __ LoadRoot(ip, Heap::kTrueValueRootIndex);
   4999     __ cmp(input_reg, ip);
   5000     __ bne(&check_false);
   5001     __ li(input_reg, Operand(1));
   5002     __ b(&done);
   5003 
   5004     __ bind(&check_false);
   5005     __ LoadRoot(ip, Heap::kFalseValueRootIndex);
   5006     __ cmp(input_reg, ip);
   5007     DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
   5008     __ li(input_reg, Operand::Zero());
   5009   } else {
   5010     DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
   5011 
   5012     __ lfd(double_scratch2,
   5013            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   5014     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   5015       // preserve heap number pointer in scratch2 for minus zero check below
   5016       __ mr(scratch2, input_reg);
   5017     }
   5018     __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
   5019                              double_scratch);
   5020     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
   5021 
   5022     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   5023       __ cmpi(input_reg, Operand::Zero());
   5024       __ bne(&done);
   5025       __ lwz(scratch1,
   5026              FieldMemOperand(scratch2, HeapNumber::kValueOffset +
   5027                                            Register::kExponentOffset));
   5028       __ cmpwi(scratch1, Operand::Zero());
   5029       DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
   5030     }
   5031   }
   5032   __ bind(&done);
   5033 }
   5034 
   5035 
   5036 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
   5037   class DeferredTaggedToI final : public LDeferredCode {
   5038    public:
   5039     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
   5040         : LDeferredCode(codegen), instr_(instr) {}
   5041     void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
   5042     LInstruction* instr() override { return instr_; }
   5043 
   5044    private:
   5045     LTaggedToI* instr_;
   5046   };
   5047 
   5048   LOperand* input = instr->value();
   5049   DCHECK(input->IsRegister());
   5050   DCHECK(input->Equals(instr->result()));
   5051 
   5052   Register input_reg = ToRegister(input);
   5053 
   5054   if (instr->hydrogen()->value()->representation().IsSmi()) {
   5055     __ SmiUntag(input_reg);
   5056   } else {
   5057     DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
   5058 
   5059     // Branch to deferred code if the input is a HeapObject.
   5060     __ JumpIfNotSmi(input_reg, deferred->entry());
   5061 
   5062     __ SmiUntag(input_reg);
   5063     __ bind(deferred->exit());
   5064   }
   5065 }
   5066 
   5067 
   5068 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   5069   LOperand* input = instr->value();
   5070   DCHECK(input->IsRegister());
   5071   LOperand* result = instr->result();
   5072   DCHECK(result->IsDoubleRegister());
   5073 
   5074   Register input_reg = ToRegister(input);
   5075   DoubleRegister result_reg = ToDoubleRegister(result);
   5076 
   5077   HValue* value = instr->hydrogen()->value();
   5078   NumberUntagDMode mode = value->representation().IsSmi()
   5079                               ? NUMBER_CANDIDATE_IS_SMI
   5080                               : NUMBER_CANDIDATE_IS_ANY_TAGGED;
   5081 
   5082   EmitNumberUntagD(instr, input_reg, result_reg, mode);
   5083 }
   5084 
   5085 
   5086 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
   5087   Register result_reg = ToRegister(instr->result());
   5088   Register scratch1 = scratch0();
   5089   DoubleRegister double_input = ToDoubleRegister(instr->value());
   5090   DoubleRegister double_scratch = double_scratch0();
   5091 
   5092   if (instr->truncating()) {
   5093     __ TruncateDoubleToI(result_reg, double_input);
   5094   } else {
   5095     __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
   5096                              double_scratch);
   5097     // Deoptimize if the input wasn't a int32 (inside a double).
   5098     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
   5099     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   5100       Label done;
   5101       __ cmpi(result_reg, Operand::Zero());
   5102       __ bne(&done);
   5103 #if V8_TARGET_ARCH_PPC64
   5104       __ MovDoubleToInt64(scratch1, double_input);
   5105 #else
   5106       __ MovDoubleHighToInt(scratch1, double_input);
   5107 #endif
   5108       __ cmpi(scratch1, Operand::Zero());
   5109       DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
   5110       __ bind(&done);
   5111     }
   5112   }
   5113 }
   5114 
   5115 
   5116 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
   5117   Register result_reg = ToRegister(instr->result());
   5118   Register scratch1 = scratch0();
   5119   DoubleRegister double_input = ToDoubleRegister(instr->value());
   5120   DoubleRegister double_scratch = double_scratch0();
   5121 
   5122   if (instr->truncating()) {
   5123     __ TruncateDoubleToI(result_reg, double_input);
   5124   } else {
   5125     __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
   5126                              double_scratch);
   5127     // Deoptimize if the input wasn't a int32 (inside a double).
   5128     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
   5129     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   5130       Label done;
   5131       __ cmpi(result_reg, Operand::Zero());
   5132       __ bne(&done);
   5133 #if V8_TARGET_ARCH_PPC64
   5134       __ MovDoubleToInt64(scratch1, double_input);
   5135 #else
   5136       __ MovDoubleHighToInt(scratch1, double_input);
   5137 #endif
   5138       __ cmpi(scratch1, Operand::Zero());
   5139       DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
   5140       __ bind(&done);
   5141     }
   5142   }
   5143 #if V8_TARGET_ARCH_PPC64
   5144   __ SmiTag(result_reg);
   5145 #else
   5146   __ SmiTagCheckOverflow(result_reg, r0);
   5147   DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
   5148 #endif
   5149 }
   5150 
   5151 
   5152 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   5153   LOperand* input = instr->value();
   5154   __ TestIfSmi(ToRegister(input), r0);
   5155   DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
   5156 }
   5157 
   5158 
   5159 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
   5160   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   5161     LOperand* input = instr->value();
   5162     __ TestIfSmi(ToRegister(input), r0);
   5163     DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
   5164   }
   5165 }
   5166 
   5167 
   5168 void LCodeGen::DoCheckArrayBufferNotNeutered(
   5169     LCheckArrayBufferNotNeutered* instr) {
   5170   Register view = ToRegister(instr->view());
   5171   Register scratch = scratch0();
   5172 
   5173   __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
   5174   __ lwz(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
   5175   __ andi(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
   5176   DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds, cr0);
   5177 }
   5178 
   5179 
   5180 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   5181   Register input = ToRegister(instr->value());
   5182   Register scratch = scratch0();
   5183 
   5184   __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   5185   __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   5186 
   5187   if (instr->hydrogen()->is_interval_check()) {
   5188     InstanceType first;
   5189     InstanceType last;
   5190     instr->hydrogen()->GetCheckInterval(&first, &last);
   5191 
   5192     __ cmpli(scratch, Operand(first));
   5193 
   5194     // If there is only one type in the interval check for equality.
   5195     if (first == last) {
   5196       DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
   5197     } else {
   5198       DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType);
   5199       // Omit check for the last type.
   5200       if (last != LAST_TYPE) {
   5201         __ cmpli(scratch, Operand(last));
   5202         DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType);
   5203       }
   5204     }
   5205   } else {
   5206     uint8_t mask;
   5207     uint8_t tag;
   5208     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
   5209 
   5210     if (base::bits::IsPowerOfTwo32(mask)) {
   5211       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
   5212       __ andi(r0, scratch, Operand(mask));
   5213       DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
   5214                    cr0);
   5215     } else {
   5216       __ andi(scratch, scratch, Operand(mask));
   5217       __ cmpi(scratch, Operand(tag));
   5218       DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
   5219     }
   5220   }
   5221 }
   5222 
   5223 
   5224 void LCodeGen::DoCheckValue(LCheckValue* instr) {
   5225   Register reg = ToRegister(instr->value());
   5226   Handle<HeapObject> object = instr->hydrogen()->object().handle();
   5227   AllowDeferredHandleDereference smi_check;
   5228   if (isolate()->heap()->InNewSpace(*object)) {
   5229     Register reg = ToRegister(instr->value());
   5230     Handle<Cell> cell = isolate()->factory()->NewCell(object);
   5231     __ mov(ip, Operand(cell));
   5232     __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
   5233     __ cmp(reg, ip);
   5234   } else {
   5235     __ Cmpi(reg, Operand(object), r0);
   5236   }
   5237   DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
   5238 }
   5239 
   5240 
   5241 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   5242   Register temp = ToRegister(instr->temp());
   5243   {
   5244     PushSafepointRegistersScope scope(this);
   5245     __ push(object);
   5246     __ li(cp, Operand::Zero());
   5247     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
   5248     RecordSafepointWithRegisters(instr->pointer_map(), 1,
   5249                                  Safepoint::kNoLazyDeopt);
   5250     __ StoreToSafepointRegisterSlot(r3, temp);
   5251   }
   5252   __ TestIfSmi(temp, r0);
   5253   DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
   5254 }
   5255 
   5256 
   5257 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
   5258   class DeferredCheckMaps final : public LDeferredCode {
   5259    public:
   5260     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
   5261         : LDeferredCode(codegen), instr_(instr), object_(object) {
   5262       SetExit(check_maps());
   5263     }
   5264     void Generate() override {
   5265       codegen()->DoDeferredInstanceMigration(instr_, object_);
   5266     }
   5267     Label* check_maps() { return &check_maps_; }
   5268     LInstruction* instr() override { return instr_; }
   5269 
   5270    private:
   5271     LCheckMaps* instr_;
   5272     Label check_maps_;
   5273     Register object_;
   5274   };
   5275 
   5276   if (instr->hydrogen()->IsStabilityCheck()) {
   5277     const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   5278     for (int i = 0; i < maps->size(); ++i) {
   5279       AddStabilityDependency(maps->at(i).handle());
   5280     }
   5281     return;
   5282   }
   5283 
   5284   Register object = ToRegister(instr->value());
   5285   Register map_reg = ToRegister(instr->temp());
   5286 
   5287   __ LoadP(map_reg, FieldMemOperand(object, HeapObject::kMapOffset));
   5288 
   5289   DeferredCheckMaps* deferred = NULL;
   5290   if (instr->hydrogen()->HasMigrationTarget()) {
   5291     deferred = new (zone()) DeferredCheckMaps(this, instr, object);
   5292     __ bind(deferred->check_maps());
   5293   }
   5294 
   5295   const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   5296   Label success;
   5297   for (int i = 0; i < maps->size() - 1; i++) {
   5298     Handle<Map> map = maps->at(i).handle();
   5299     __ CompareMap(map_reg, map, &success);
   5300     __ beq(&success);
   5301   }
   5302 
   5303   Handle<Map> map = maps->at(maps->size() - 1).handle();
   5304   __ CompareMap(map_reg, map, &success);
   5305   if (instr->hydrogen()->HasMigrationTarget()) {
   5306     __ bne(deferred->entry());
   5307   } else {
   5308     DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
   5309   }
   5310 
   5311   __ bind(&success);
   5312 }
   5313 
   5314 
   5315 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
   5316   DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
   5317   Register result_reg = ToRegister(instr->result());
   5318   __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
   5319 }
   5320 
   5321 
   5322 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
   5323   Register unclamped_reg = ToRegister(instr->unclamped());
   5324   Register result_reg = ToRegister(instr->result());
   5325   __ ClampUint8(result_reg, unclamped_reg);
   5326 }
   5327 
   5328 
   5329 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
   5330   Register scratch = scratch0();
   5331   Register input_reg = ToRegister(instr->unclamped());
   5332   Register result_reg = ToRegister(instr->result());
   5333   DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
   5334   Label is_smi, done, heap_number;
   5335 
   5336   // Both smi and heap number cases are handled.
   5337   __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
   5338 
   5339   // Check for heap number
   5340   __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   5341   __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0);
   5342   __ beq(&heap_number);
   5343 
   5344   // Check for undefined. Undefined is converted to zero for clamping
   5345   // conversions.
   5346   __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
   5347   DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
   5348   __ li(result_reg, Operand::Zero());
   5349   __ b(&done);
   5350 
   5351   // Heap number
   5352   __ bind(&heap_number);
   5353   __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   5354   __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
   5355   __ b(&done);
   5356 
   5357   // smi
   5358   __ bind(&is_smi);
   5359   __ ClampUint8(result_reg, result_reg);
   5360 
   5361   __ bind(&done);
   5362 }
   5363 
   5364 
   5365 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
   5366   DoubleRegister value_reg = ToDoubleRegister(instr->value());
   5367   Register result_reg = ToRegister(instr->result());
   5368 
   5369   if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
   5370     __ MovDoubleHighToInt(result_reg, value_reg);
   5371   } else {
   5372     __ MovDoubleLowToInt(result_reg, value_reg);
   5373   }
   5374 }
   5375 
   5376 
   5377 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
   5378   Register hi_reg = ToRegister(instr->hi());
   5379   Register lo_reg = ToRegister(instr->lo());
   5380   DoubleRegister result_reg = ToDoubleRegister(instr->result());
   5381 #if V8_TARGET_ARCH_PPC64
   5382   __ MovInt64ComponentsToDouble(result_reg, hi_reg, lo_reg, r0);
   5383 #else
   5384   __ MovInt64ToDouble(result_reg, hi_reg, lo_reg);
   5385 #endif
   5386 }
   5387 
   5388 
   5389 void LCodeGen::DoAllocate(LAllocate* instr) {
   5390   class DeferredAllocate final : public LDeferredCode {
   5391    public:
   5392     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
   5393         : LDeferredCode(codegen), instr_(instr) {}
   5394     void Generate() override { codegen()->DoDeferredAllocate(instr_); }
   5395     LInstruction* instr() override { return instr_; }
   5396 
   5397    private:
   5398     LAllocate* instr_;
   5399   };
   5400 
   5401   DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
   5402 
   5403   Register result = ToRegister(instr->result());
   5404   Register scratch = ToRegister(instr->temp1());
   5405   Register scratch2 = ToRegister(instr->temp2());
   5406 
   5407   // Allocate memory for the object.
   5408   AllocationFlags flags = TAG_OBJECT;
   5409   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   5410     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   5411   }
   5412   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5413     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5414     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   5415   }
   5416 
   5417   if (instr->size()->IsConstantOperand()) {
   5418     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5419     CHECK(size <= Page::kMaxRegularHeapObjectSize);
   5420     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   5421   } else {
   5422     Register size = ToRegister(instr->size());
   5423     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   5424   }
   5425 
   5426   __ bind(deferred->exit());
   5427 
   5428   if (instr->hydrogen()->MustPrefillWithFiller()) {
   5429     if (instr->size()->IsConstantOperand()) {
   5430       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5431       __ LoadIntLiteral(scratch, size - kHeapObjectTag);
   5432     } else {
   5433       __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
   5434     }
   5435     __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
   5436     Label loop;
   5437     __ bind(&loop);
   5438     __ subi(scratch, scratch, Operand(kPointerSize));
   5439     __ StorePX(scratch2, MemOperand(result, scratch));
   5440     __ cmpi(scratch, Operand::Zero());
   5441     __ bge(&loop);
   5442   }
   5443 }
   5444 
   5445 
   5446 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
   5447   Register result = ToRegister(instr->result());
   5448 
   5449   // TODO(3095996): Get rid of this. For now, we need to make the
   5450   // result register contain a valid pointer because it is already
   5451   // contained in the register pointer map.
   5452   __ LoadSmiLiteral(result, Smi::FromInt(0));
   5453 
   5454   PushSafepointRegistersScope scope(this);
   5455   if (instr->size()->IsRegister()) {
   5456     Register size = ToRegister(instr->size());
   5457     DCHECK(!size.is(result));
   5458     __ SmiTag(size);
   5459     __ push(size);
   5460   } else {
   5461     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5462 #if !V8_TARGET_ARCH_PPC64
   5463     if (size >= 0 && size <= Smi::kMaxValue) {
   5464 #endif
   5465       __ Push(Smi::FromInt(size));
   5466 #if !V8_TARGET_ARCH_PPC64
   5467     } else {
   5468       // We should never get here at runtime => abort
   5469       __ stop("invalid allocation size");
   5470       return;
   5471     }
   5472 #endif
   5473   }
   5474 
   5475   int flags = AllocateDoubleAlignFlag::encode(
   5476       instr->hydrogen()->MustAllocateDoubleAligned());
   5477   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5478     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5479     flags = AllocateTargetSpace::update(flags, OLD_SPACE);
   5480   } else {
   5481     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
   5482   }
   5483   __ Push(Smi::FromInt(flags));
   5484 
   5485   CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
   5486                           instr->context());
   5487   __ StoreToSafepointRegisterSlot(r3, result);
   5488 }
   5489 
   5490 
   5491 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
   5492   DCHECK(ToRegister(instr->value()).is(r3));
   5493   __ push(r3);
   5494   CallRuntime(Runtime::kToFastProperties, 1, instr);
   5495 }
   5496 
   5497 
   5498 void LCodeGen::DoTypeof(LTypeof* instr) {
   5499   DCHECK(ToRegister(instr->value()).is(r6));
   5500   DCHECK(ToRegister(instr->result()).is(r3));
   5501   Label end, do_call;
   5502   Register value_register = ToRegister(instr->value());
   5503   __ JumpIfNotSmi(value_register, &do_call);
   5504   __ mov(r3, Operand(isolate()->factory()->number_string()));
   5505   __ b(&end);
   5506   __ bind(&do_call);
   5507   TypeofStub stub(isolate());
   5508   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   5509   __ bind(&end);
   5510 }
   5511 
   5512 
   5513 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   5514   Register input = ToRegister(instr->value());
   5515 
   5516   Condition final_branch_condition =
   5517       EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
   5518                    instr->type_literal());
   5519   if (final_branch_condition != kNoCondition) {
   5520     EmitBranch(instr, final_branch_condition);
   5521   }
   5522 }
   5523 
   5524 
   5525 Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
   5526                                  Register input, Handle<String> type_name) {
   5527   Condition final_branch_condition = kNoCondition;
   5528   Register scratch = scratch0();
   5529   Factory* factory = isolate()->factory();
   5530   if (String::Equals(type_name, factory->number_string())) {
   5531     __ JumpIfSmi(input, true_label);
   5532     __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   5533     __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
   5534     final_branch_condition = eq;
   5535 
   5536   } else if (String::Equals(type_name, factory->string_string())) {
   5537     __ JumpIfSmi(input, false_label);
   5538     __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
   5539     final_branch_condition = lt;
   5540 
   5541   } else if (String::Equals(type_name, factory->symbol_string())) {
   5542     __ JumpIfSmi(input, false_label);
   5543     __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
   5544     final_branch_condition = eq;
   5545 
   5546   } else if (String::Equals(type_name, factory->boolean_string())) {
   5547     __ CompareRoot(input, Heap::kTrueValueRootIndex);
   5548     __ beq(true_label);
   5549     __ CompareRoot(input, Heap::kFalseValueRootIndex);
   5550     final_branch_condition = eq;
   5551 
   5552   } else if (String::Equals(type_name, factory->undefined_string())) {
   5553     __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
   5554     __ beq(true_label);
   5555     __ JumpIfSmi(input, false_label);
   5556     // Check for undetectable objects => true.
   5557     __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   5558     __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5559     __ ExtractBit(r0, scratch, Map::kIsUndetectable);
   5560     __ cmpi(r0, Operand::Zero());
   5561     final_branch_condition = ne;
   5562 
   5563   } else if (String::Equals(type_name, factory->function_string())) {
   5564     __ JumpIfSmi(input, false_label);
   5565     __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   5566     __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5567     __ andi(scratch, scratch,
   5568             Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
   5569     __ cmpi(scratch, Operand(1 << Map::kIsCallable));
   5570     final_branch_condition = eq;
   5571 
   5572   } else if (String::Equals(type_name, factory->object_string())) {
   5573     __ JumpIfSmi(input, false_label);
   5574     __ CompareRoot(input, Heap::kNullValueRootIndex);
   5575     __ beq(true_label);
   5576     STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
   5577     __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
   5578     __ blt(false_label);
   5579     // Check for callable or undetectable objects => false.
   5580     __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5581     __ andi(r0, scratch,
   5582             Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
   5583     __ cmpi(r0, Operand::Zero());
   5584     final_branch_condition = eq;
   5585 
   5586 // clang-format off
   5587 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)        \
   5588   } else if (String::Equals(type_name, factory->type##_string())) {  \
   5589     __ JumpIfSmi(input, false_label);                                \
   5590     __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
   5591     __ CompareRoot(scratch, Heap::k##Type##MapRootIndex);            \
   5592     final_branch_condition = eq;
   5593   SIMD128_TYPES(SIMD128_TYPE)
   5594 #undef SIMD128_TYPE
   5595     // clang-format on
   5596 
   5597   } else {
   5598     __ b(false_label);
   5599   }
   5600 
   5601   return final_branch_condition;
   5602 }
   5603 
   5604 
   5605 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
   5606   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
   5607     // Ensure that we have enough space after the previous lazy-bailout
   5608     // instruction for patching the code here.
   5609     int current_pc = masm()->pc_offset();
   5610     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
   5611       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
   5612       DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
   5613       while (padding_size > 0) {
   5614         __ nop();
   5615         padding_size -= Assembler::kInstrSize;
   5616       }
   5617     }
   5618   }
   5619   last_lazy_deopt_pc_ = masm()->pc_offset();
   5620 }
   5621 
   5622 
   5623 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   5624   last_lazy_deopt_pc_ = masm()->pc_offset();
   5625   DCHECK(instr->HasEnvironment());
   5626   LEnvironment* env = instr->environment();
   5627   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5628   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5629 }
   5630 
   5631 
   5632 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
   5633   Deoptimizer::BailoutType type = instr->hydrogen()->type();
   5634   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
   5635   // needed return address), even though the implementation of LAZY and EAGER is
   5636   // now identical. When LAZY is eventually completely folded into EAGER, remove
   5637   // the special case below.
   5638   if (info()->IsStub() && type == Deoptimizer::EAGER) {
   5639     type = Deoptimizer::LAZY;
   5640   }
   5641 
   5642   DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
   5643 }
   5644 
   5645 
   5646 void LCodeGen::DoDummy(LDummy* instr) {
   5647   // Nothing to see here, move on!
   5648 }
   5649 
   5650 
   5651 void LCodeGen::DoDummyUse(LDummyUse* instr) {
   5652   // Nothing to see here, move on!
   5653 }
   5654 
   5655 
   5656 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   5657   PushSafepointRegistersScope scope(this);
   5658   LoadContextFromDeferred(instr->context());
   5659   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   5660   RecordSafepointWithLazyDeopt(
   5661       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   5662   DCHECK(instr->HasEnvironment());
   5663   LEnvironment* env = instr->environment();
   5664   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5665 }
   5666 
   5667 
   5668 void LCodeGen::DoStackCheck(LStackCheck* instr) {
   5669   class DeferredStackCheck final : public LDeferredCode {
   5670    public:
   5671     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
   5672         : LDeferredCode(codegen), instr_(instr) {}
   5673     void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
   5674     LInstruction* instr() override { return instr_; }
   5675 
   5676    private:
   5677     LStackCheck* instr_;
   5678   };
   5679 
   5680   DCHECK(instr->HasEnvironment());
   5681   LEnvironment* env = instr->environment();
   5682   // There is no LLazyBailout instruction for stack-checks. We have to
   5683   // prepare for lazy deoptimization explicitly here.
   5684   if (instr->hydrogen()->is_function_entry()) {
   5685     // Perform stack overflow check.
   5686     Label done;
   5687     __ LoadRoot(ip, Heap::kStackLimitRootIndex);
   5688     __ cmpl(sp, ip);
   5689     __ bge(&done);
   5690     DCHECK(instr->context()->IsRegister());
   5691     DCHECK(ToRegister(instr->context()).is(cp));
   5692     CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
   5693              instr);
   5694     __ bind(&done);
   5695   } else {
   5696     DCHECK(instr->hydrogen()->is_backwards_branch());
   5697     // Perform stack overflow check if this goto needs it before jumping.
   5698     DeferredStackCheck* deferred_stack_check =
   5699         new (zone()) DeferredStackCheck(this, instr);
   5700     __ LoadRoot(ip, Heap::kStackLimitRootIndex);
   5701     __ cmpl(sp, ip);
   5702     __ blt(deferred_stack_check->entry());
   5703     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   5704     __ bind(instr->done_label());
   5705     deferred_stack_check->SetExit(instr->done_label());
   5706     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5707     // Don't record a deoptimization index for the safepoint here.
   5708     // This will be done explicitly when emitting call and the safepoint in
   5709     // the deferred code.
   5710   }
   5711 }
   5712 
   5713 
   5714 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
   5715   // This is a pseudo-instruction that ensures that the environment here is
   5716   // properly registered for deoptimization and records the assembler's PC
   5717   // offset.
   5718   LEnvironment* environment = instr->environment();
   5719 
   5720   // If the environment were already registered, we would have no way of
   5721   // backpatching it with the spill slot operands.
   5722   DCHECK(!environment->HasBeenRegistered());
   5723   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   5724 
   5725   GenerateOsrPrologue();
   5726 }
   5727 
   5728 
   5729 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   5730   __ TestIfSmi(r3, r0);
   5731   DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
   5732 
   5733   STATIC_ASSERT(JS_PROXY_TYPE == FIRST_JS_RECEIVER_TYPE);
   5734   __ CompareObjectType(r3, r4, r4, JS_PROXY_TYPE);
   5735   DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
   5736 
   5737   Label use_cache, call_runtime;
   5738   Register null_value = r8;
   5739   __ LoadRoot(null_value, Heap::kNullValueRootIndex);
   5740   __ CheckEnumCache(null_value, &call_runtime);
   5741 
   5742   __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
   5743   __ b(&use_cache);
   5744 
   5745   // Get the set of properties to enumerate.
   5746   __ bind(&call_runtime);
   5747   __ push(r3);
   5748   CallRuntime(Runtime::kGetPropertyNamesFast, instr);
   5749 
   5750   __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
   5751   __ LoadRoot(ip, Heap::kMetaMapRootIndex);
   5752   __ cmp(r4, ip);
   5753   DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
   5754   __ bind(&use_cache);
   5755 }
   5756 
   5757 
   5758 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
   5759   Register map = ToRegister(instr->map());
   5760   Register result = ToRegister(instr->result());
   5761   Label load_cache, done;
   5762   __ EnumLength(result, map);
   5763   __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
   5764   __ bne(&load_cache);
   5765   __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
   5766   __ b(&done);
   5767 
   5768   __ bind(&load_cache);
   5769   __ LoadInstanceDescriptors(map, result);
   5770   __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
   5771   __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
   5772   __ cmpi(result, Operand::Zero());
   5773   DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
   5774 
   5775   __ bind(&done);
   5776 }
   5777 
   5778 
   5779 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
   5780   Register object = ToRegister(instr->value());
   5781   Register map = ToRegister(instr->map());
   5782   __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
   5783   __ cmp(map, scratch0());
   5784   DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
   5785 }
   5786 
   5787 
   5788 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
   5789                                            Register result, Register object,
   5790                                            Register index) {
   5791   PushSafepointRegistersScope scope(this);
   5792   __ Push(object, index);
   5793   __ li(cp, Operand::Zero());
   5794   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
   5795   RecordSafepointWithRegisters(instr->pointer_map(), 2,
   5796                                Safepoint::kNoLazyDeopt);
   5797   __ StoreToSafepointRegisterSlot(r3, result);
   5798 }
   5799 
   5800 
   5801 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
   5802   class DeferredLoadMutableDouble final : public LDeferredCode {
   5803    public:
   5804     DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
   5805                               Register result, Register object, Register index)
   5806         : LDeferredCode(codegen),
   5807           instr_(instr),
   5808           result_(result),
   5809           object_(object),
   5810           index_(index) {}
   5811     void Generate() override {
   5812       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
   5813     }
   5814     LInstruction* instr() override { return instr_; }
   5815 
   5816    private:
   5817     LLoadFieldByIndex* instr_;
   5818     Register result_;
   5819     Register object_;
   5820     Register index_;
   5821   };
   5822 
   5823   Register object = ToRegister(instr->object());
   5824   Register index = ToRegister(instr->index());
   5825   Register result = ToRegister(instr->result());
   5826   Register scratch = scratch0();
   5827 
   5828   DeferredLoadMutableDouble* deferred;
   5829   deferred = new (zone())
   5830       DeferredLoadMutableDouble(this, instr, result, object, index);
   5831 
   5832   Label out_of_object, done;
   5833 
   5834   __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
   5835   __ bne(deferred->entry(), cr0);
   5836   __ ShiftRightArithImm(index, index, 1);
   5837 
   5838   __ cmpi(index, Operand::Zero());
   5839   __ blt(&out_of_object);
   5840 
   5841   __ SmiToPtrArrayOffset(r0, index);
   5842   __ add(scratch, object, r0);
   5843   __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
   5844 
   5845   __ b(&done);
   5846 
   5847   __ bind(&out_of_object);
   5848   __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   5849   // Index is equal to negated out of object property index plus 1.
   5850   __ SmiToPtrArrayOffset(r0, index);
   5851   __ sub(scratch, result, r0);
   5852   __ LoadP(result,
   5853            FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
   5854   __ bind(deferred->exit());
   5855   __ bind(&done);
   5856 }
   5857 
   5858 
   5859 void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
   5860   Register context = ToRegister(instr->context());
   5861   __ StoreP(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
   5862 }
   5863 
   5864 
   5865 void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
   5866   Handle<ScopeInfo> scope_info = instr->scope_info();
   5867   __ Push(scope_info);
   5868   __ push(ToRegister(instr->function()));
   5869   CallRuntime(Runtime::kPushBlockContext, instr);
   5870   RecordSafepoint(Safepoint::kNoLazyDeopt);
   5871 }
   5872 
   5873 
   5874 #undef __
   5875 }  // namespace internal
   5876 }  // namespace v8
   5877