Home | History | Annotate | Download | only in s390
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 //
      3 // Use of this source code is governed by a BSD-style license that can be
      4 // found in the LICENSE file.
      5 
      6 #include "src/crankshaft/s390/lithium-codegen-s390.h"
      7 
      8 #include "src/base/bits.h"
      9 #include "src/code-factory.h"
     10 #include "src/code-stubs.h"
     11 #include "src/crankshaft/hydrogen-osr.h"
     12 #include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
     13 #include "src/ic/ic.h"
     14 #include "src/ic/stub-cache.h"
     15 
     16 namespace v8 {
     17 namespace internal {
     18 
     19 class SafepointGenerator final : public CallWrapper {
     20  public:
     21   SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
     22                      Safepoint::DeoptMode mode)
     23       : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
     24   virtual ~SafepointGenerator() {}
     25 
     26   void BeforeCall(int call_size) const override {}
     27 
     28   void AfterCall() const override {
     29     codegen_->RecordSafepoint(pointers_, deopt_mode_);
     30   }
     31 
     32  private:
     33   LCodeGen* codegen_;
     34   LPointerMap* pointers_;
     35   Safepoint::DeoptMode deopt_mode_;
     36 };
     37 
     38 LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
     39     LCodeGen* codegen)
     40     : codegen_(codegen) {
     41   DCHECK(codegen_->info()->is_calling());
     42   DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
     43   codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
     44   StoreRegistersStateStub stub(codegen_->isolate());
     45   codegen_->masm_->CallStub(&stub);
     46 }
     47 
     48 LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
     49   DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
     50   RestoreRegistersStateStub stub(codegen_->isolate());
     51   codegen_->masm_->CallStub(&stub);
     52   codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
     53 }
     54 
     55 #define __ masm()->
     56 
     57 bool LCodeGen::GenerateCode() {
     58   LPhase phase("Z_Code generation", chunk());
     59   DCHECK(is_unused());
     60   status_ = GENERATING;
     61 
     62   // Open a frame scope to indicate that there is a frame on the stack.  The
     63   // NONE indicates that the scope shouldn't actually generate code to set up
     64   // the frame (that is done in GeneratePrologue).
     65   FrameScope frame_scope(masm_, StackFrame::NONE);
     66 
     67   return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
     68          GenerateJumpTable() && GenerateSafepointTable();
     69 }
     70 
     71 void LCodeGen::FinishCode(Handle<Code> code) {
     72   DCHECK(is_done());
     73   code->set_stack_slots(GetTotalFrameSlotCount());
     74   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
     75   PopulateDeoptimizationData(code);
     76 }
     77 
     78 void LCodeGen::SaveCallerDoubles() {
     79   DCHECK(info()->saves_caller_doubles());
     80   DCHECK(NeedsEagerFrame());
     81   Comment(";;; Save clobbered callee double registers");
     82   int count = 0;
     83   BitVector* doubles = chunk()->allocated_double_registers();
     84   BitVector::Iterator save_iterator(doubles);
     85   while (!save_iterator.Done()) {
     86     __ StoreDouble(DoubleRegister::from_code(save_iterator.Current()),
     87                    MemOperand(sp, count * kDoubleSize));
     88     save_iterator.Advance();
     89     count++;
     90   }
     91 }
     92 
     93 void LCodeGen::RestoreCallerDoubles() {
     94   DCHECK(info()->saves_caller_doubles());
     95   DCHECK(NeedsEagerFrame());
     96   Comment(";;; Restore clobbered callee double registers");
     97   BitVector* doubles = chunk()->allocated_double_registers();
     98   BitVector::Iterator save_iterator(doubles);
     99   int count = 0;
    100   while (!save_iterator.Done()) {
    101     __ LoadDouble(DoubleRegister::from_code(save_iterator.Current()),
    102                   MemOperand(sp, count * kDoubleSize));
    103     save_iterator.Advance();
    104     count++;
    105   }
    106 }
    107 
    108 bool LCodeGen::GeneratePrologue() {
    109   DCHECK(is_generating());
    110 
    111   if (info()->IsOptimizing()) {
    112     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
    113 
    114     // r3: Callee's JS function.
    115     // cp: Callee's context.
    116     // fp: Caller's frame pointer.
    117     // lr: Caller's pc.
    118     // ip: Our own function entry (required by the prologue)
    119   }
    120 
    121   int prologue_offset = masm_->pc_offset();
    122 
    123   if (prologue_offset) {
    124     // Prologue logic requires its starting address in ip and the
    125     // corresponding offset from the function entry.  Need to add
    126     // 4 bytes for the size of AHI/AGHI that AddP expands into.
    127     prologue_offset += sizeof(FourByteInstr);
    128     __ AddP(ip, ip, Operand(prologue_offset));
    129   }
    130   info()->set_prologue_offset(prologue_offset);
    131   if (NeedsEagerFrame()) {
    132     if (info()->IsStub()) {
    133       __ StubPrologue(StackFrame::STUB, ip, prologue_offset);
    134     } else {
    135       __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset);
    136     }
    137     frame_is_built_ = true;
    138   }
    139 
    140   // Reserve space for the stack slots needed by the code.
    141   int slots = GetStackSlotCount();
    142   if (slots > 0) {
    143     __ lay(sp, MemOperand(sp, -(slots * kPointerSize)));
    144     if (FLAG_debug_code) {
    145       __ Push(r2, r3);
    146       __ mov(r2, Operand(slots * kPointerSize));
    147       __ mov(r3, Operand(kSlotsZapValue));
    148       Label loop;
    149       __ bind(&loop);
    150       __ StoreP(r3, MemOperand(sp, r2, kPointerSize));
    151       __ lay(r2, MemOperand(r2, -kPointerSize));
    152       __ CmpP(r2, Operand::Zero());
    153       __ bne(&loop);
    154       __ Pop(r2, r3);
    155     }
    156   }
    157 
    158   if (info()->saves_caller_doubles()) {
    159     SaveCallerDoubles();
    160   }
    161   return !is_aborted();
    162 }
    163 
    164 void LCodeGen::DoPrologue(LPrologue* instr) {
    165   Comment(";;; Prologue begin");
    166 
    167   // Possibly allocate a local context.
    168   if (info()->scope()->NeedsContext()) {
    169     Comment(";;; Allocate local context");
    170     bool need_write_barrier = true;
    171     // Argument to NewContext is the function, which is in r3.
    172     int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
    173     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
    174     if (info()->scope()->is_script_scope()) {
    175       __ push(r3);
    176       __ Push(info()->scope()->scope_info());
    177       __ CallRuntime(Runtime::kNewScriptContext);
    178       deopt_mode = Safepoint::kLazyDeopt;
    179     } else {
    180       if (slots <= FastNewFunctionContextStub::kMaximumSlots) {
    181         FastNewFunctionContextStub stub(isolate());
    182         __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
    183                Operand(slots));
    184         __ CallStub(&stub);
    185         // Result of FastNewFunctionContextStub is always in new space.
    186         need_write_barrier = false;
    187       } else {
    188         __ push(r3);
    189         __ CallRuntime(Runtime::kNewFunctionContext);
    190       }
    191     }
    192     RecordSafepoint(deopt_mode);
    193 
    194     // Context is returned in both r2 and cp.  It replaces the context
    195     // passed to us.  It's saved in the stack and kept live in cp.
    196     __ LoadRR(cp, r2);
    197     __ StoreP(r2, MemOperand(fp, StandardFrameConstants::kContextOffset));
    198     // Copy any necessary parameters into the context.
    199     int num_parameters = info()->scope()->num_parameters();
    200     int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
    201     for (int i = first_parameter; i < num_parameters; i++) {
    202       Variable* var = (i == -1) ? info()->scope()->receiver()
    203                                 : info()->scope()->parameter(i);
    204       if (var->IsContextSlot()) {
    205         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
    206                                (num_parameters - 1 - i) * kPointerSize;
    207         // Load parameter from stack.
    208         __ LoadP(r2, MemOperand(fp, parameter_offset));
    209         // Store it in the context.
    210         MemOperand target = ContextMemOperand(cp, var->index());
    211         __ StoreP(r2, target);
    212         // Update the write barrier. This clobbers r5 and r2.
    213         if (need_write_barrier) {
    214           __ RecordWriteContextSlot(cp, target.offset(), r2, r5,
    215                                     GetLinkRegisterState(), kSaveFPRegs);
    216         } else if (FLAG_debug_code) {
    217           Label done;
    218           __ JumpIfInNewSpace(cp, r2, &done);
    219           __ Abort(kExpectedNewSpaceObject);
    220           __ bind(&done);
    221         }
    222       }
    223     }
    224     Comment(";;; End allocate local context");
    225   }
    226 
    227   Comment(";;; Prologue end");
    228 }
    229 
    230 void LCodeGen::GenerateOsrPrologue() {
    231   // Generate the OSR entry prologue at the first unknown OSR value, or if there
    232   // are none, at the OSR entrypoint instruction.
    233   if (osr_pc_offset_ >= 0) return;
    234 
    235   osr_pc_offset_ = masm()->pc_offset();
    236 
    237   // Adjust the frame size, subsuming the unoptimized frame into the
    238   // optimized frame.
    239   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
    240   DCHECK(slots >= 0);
    241   __ lay(sp, MemOperand(sp, -slots * kPointerSize));
    242 }
    243 
    244 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
    245   if (instr->IsCall()) {
    246     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
    247   }
    248   if (!instr->IsLazyBailout() && !instr->IsGap()) {
    249     safepoints_.BumpLastLazySafepointIndex();
    250   }
    251 }
    252 
    253 bool LCodeGen::GenerateDeferredCode() {
    254   DCHECK(is_generating());
    255   if (deferred_.length() > 0) {
    256     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
    257       LDeferredCode* code = deferred_[i];
    258 
    259       HValue* value =
    260           instructions_->at(code->instruction_index())->hydrogen_value();
    261       RecordAndWritePosition(value->position());
    262 
    263       Comment(
    264           ";;; <@%d,#%d> "
    265           "-------------------- Deferred %s --------------------",
    266           code->instruction_index(), code->instr()->hydrogen_value()->id(),
    267           code->instr()->Mnemonic());
    268       __ bind(code->entry());
    269       if (NeedsDeferredFrame()) {
    270         Comment(";;; Build frame");
    271         DCHECK(!frame_is_built_);
    272         DCHECK(info()->IsStub());
    273         frame_is_built_ = true;
    274         __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
    275         __ PushCommonFrame(scratch0());
    276         Comment(";;; Deferred code");
    277       }
    278       code->Generate();
    279       if (NeedsDeferredFrame()) {
    280         Comment(";;; Destroy frame");
    281         DCHECK(frame_is_built_);
    282         __ PopCommonFrame(scratch0());
    283         frame_is_built_ = false;
    284       }
    285       __ b(code->exit());
    286     }
    287   }
    288 
    289   return !is_aborted();
    290 }
    291 
    292 bool LCodeGen::GenerateJumpTable() {
    293   // Check that the jump table is accessible from everywhere in the function
    294   // code, i.e. that offsets in halfworld to the table can be encoded in the
    295   // 32-bit signed immediate of a branch instruction.
    296   // To simplify we consider the code size from the first instruction to the
    297   // end of the jump table. We also don't consider the pc load delta.
    298   // Each entry in the jump table generates one instruction and inlines one
    299   // 32bit data after it.
    300   // TODO(joransiu): The Int24 condition can likely be relaxed for S390
    301   if (!is_int24(masm()->pc_offset() + jump_table_.length() * 7)) {
    302     Abort(kGeneratedCodeIsTooLarge);
    303   }
    304 
    305   if (jump_table_.length() > 0) {
    306     Label needs_frame, call_deopt_entry;
    307 
    308     Comment(";;; -------------------- Jump table --------------------");
    309     Address base = jump_table_[0].address;
    310 
    311     Register entry_offset = scratch0();
    312 
    313     int length = jump_table_.length();
    314     for (int i = 0; i < length; i++) {
    315       Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
    316       __ bind(&table_entry->label);
    317 
    318       DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
    319       Address entry = table_entry->address;
    320       DeoptComment(table_entry->deopt_info);
    321 
    322       // Second-level deopt table entries are contiguous and small, so instead
    323       // of loading the full, absolute address of each one, load an immediate
    324       // offset which will be added to the base address later.
    325       __ mov(entry_offset, Operand(entry - base));
    326 
    327       if (table_entry->needs_frame) {
    328         DCHECK(!info()->saves_caller_doubles());
    329         Comment(";;; call deopt with frame");
    330         __ PushCommonFrame();
    331         __ b(r14, &needs_frame);
    332       } else {
    333         __ b(r14, &call_deopt_entry);
    334       }
    335     }
    336 
    337     if (needs_frame.is_linked()) {
    338       __ bind(&needs_frame);
    339       // This variant of deopt can only be used with stubs. Since we don't
    340       // have a function pointer to install in the stack frame that we're
    341       // building, install a special marker there instead.
    342       DCHECK(info()->IsStub());
    343       __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
    344       __ push(ip);
    345       DCHECK(info()->IsStub());
    346     }
    347 
    348     Comment(";;; call deopt");
    349     __ bind(&call_deopt_entry);
    350 
    351     if (info()->saves_caller_doubles()) {
    352       DCHECK(info()->IsStub());
    353       RestoreCallerDoubles();
    354     }
    355 
    356     // Add the base address to the offset previously loaded in entry_offset.
    357     __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
    358     __ AddP(ip, entry_offset, ip);
    359     __ Jump(ip);
    360   }
    361 
    362   // The deoptimization jump table is the last part of the instruction
    363   // sequence. Mark the generated code as done unless we bailed out.
    364   if (!is_aborted()) status_ = DONE;
    365   return !is_aborted();
    366 }
    367 
    368 bool LCodeGen::GenerateSafepointTable() {
    369   DCHECK(is_done());
    370   safepoints_.Emit(masm(), GetTotalFrameSlotCount());
    371   return !is_aborted();
    372 }
    373 
    374 Register LCodeGen::ToRegister(int code) const {
    375   return Register::from_code(code);
    376 }
    377 
    378 DoubleRegister LCodeGen::ToDoubleRegister(int code) const {
    379   return DoubleRegister::from_code(code);
    380 }
    381 
    382 Register LCodeGen::ToRegister(LOperand* op) const {
    383   DCHECK(op->IsRegister());
    384   return ToRegister(op->index());
    385 }
    386 
    387 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
    388   if (op->IsRegister()) {
    389     return ToRegister(op->index());
    390   } else if (op->IsConstantOperand()) {
    391     LConstantOperand* const_op = LConstantOperand::cast(op);
    392     HConstant* constant = chunk_->LookupConstant(const_op);
    393     Handle<Object> literal = constant->handle(isolate());
    394     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    395     if (r.IsInteger32()) {
    396       AllowDeferredHandleDereference get_number;
    397       DCHECK(literal->IsNumber());
    398       __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
    399     } else if (r.IsDouble()) {
    400       Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
    401     } else {
    402       DCHECK(r.IsSmiOrTagged());
    403       __ Move(scratch, literal);
    404     }
    405     return scratch;
    406   } else if (op->IsStackSlot()) {
    407     __ LoadP(scratch, ToMemOperand(op));
    408     return scratch;
    409   }
    410   UNREACHABLE();
    411   return scratch;
    412 }
    413 
    414 void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
    415                                        Register dst) {
    416   DCHECK(IsInteger32(const_op));
    417   HConstant* constant = chunk_->LookupConstant(const_op);
    418   int32_t value = constant->Integer32Value();
    419   if (IsSmi(const_op)) {
    420     __ LoadSmiLiteral(dst, Smi::FromInt(value));
    421   } else {
    422     __ LoadIntLiteral(dst, value);
    423   }
    424 }
    425 
    426 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
    427   DCHECK(op->IsDoubleRegister());
    428   return ToDoubleRegister(op->index());
    429 }
    430 
    431 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
    432   HConstant* constant = chunk_->LookupConstant(op);
    433   DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
    434   return constant->handle(isolate());
    435 }
    436 
    437 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
    438   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
    439 }
    440 
    441 bool LCodeGen::IsSmi(LConstantOperand* op) const {
    442   return chunk_->LookupLiteralRepresentation(op).IsSmi();
    443 }
    444 
    445 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
    446   return ToRepresentation(op, Representation::Integer32());
    447 }
    448 
    449 intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
    450                                     const Representation& r) const {
    451   HConstant* constant = chunk_->LookupConstant(op);
    452   int32_t value = constant->Integer32Value();
    453   if (r.IsInteger32()) return value;
    454   DCHECK(r.IsSmiOrTagged());
    455   return reinterpret_cast<intptr_t>(Smi::FromInt(value));
    456 }
    457 
    458 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
    459   HConstant* constant = chunk_->LookupConstant(op);
    460   return Smi::FromInt(constant->Integer32Value());
    461 }
    462 
    463 double LCodeGen::ToDouble(LConstantOperand* op) const {
    464   HConstant* constant = chunk_->LookupConstant(op);
    465   DCHECK(constant->HasDoubleValue());
    466   return constant->DoubleValue();
    467 }
    468 
    469 Operand LCodeGen::ToOperand(LOperand* op) {
    470   if (op->IsConstantOperand()) {
    471     LConstantOperand* const_op = LConstantOperand::cast(op);
    472     HConstant* constant = chunk()->LookupConstant(const_op);
    473     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    474     if (r.IsSmi()) {
    475       DCHECK(constant->HasSmiValue());
    476       return Operand(Smi::FromInt(constant->Integer32Value()));
    477     } else if (r.IsInteger32()) {
    478       DCHECK(constant->HasInteger32Value());
    479       return Operand(constant->Integer32Value());
    480     } else if (r.IsDouble()) {
    481       Abort(kToOperandUnsupportedDoubleImmediate);
    482     }
    483     DCHECK(r.IsTagged());
    484     return Operand(constant->handle(isolate()));
    485   } else if (op->IsRegister()) {
    486     return Operand(ToRegister(op));
    487   } else if (op->IsDoubleRegister()) {
    488     Abort(kToOperandIsDoubleRegisterUnimplemented);
    489     return Operand::Zero();
    490   }
    491   // Stack slots not implemented, use ToMemOperand instead.
    492   UNREACHABLE();
    493   return Operand::Zero();
    494 }
    495 
    496 static int ArgumentsOffsetWithoutFrame(int index) {
    497   DCHECK(index < 0);
    498   return -(index + 1) * kPointerSize;
    499 }
    500 
    501 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
    502   DCHECK(!op->IsRegister());
    503   DCHECK(!op->IsDoubleRegister());
    504   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
    505   if (NeedsEagerFrame()) {
    506     return MemOperand(fp, FrameSlotToFPOffset(op->index()));
    507   } else {
    508     // Retrieve parameter without eager stack-frame relative to the
    509     // stack-pointer.
    510     return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
    511   }
    512 }
    513 
    514 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
    515   DCHECK(op->IsDoubleStackSlot());
    516   if (NeedsEagerFrame()) {
    517     return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
    518   } else {
    519     // Retrieve parameter without eager stack-frame relative to the
    520     // stack-pointer.
    521     return MemOperand(sp,
    522                       ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
    523   }
    524 }
    525 
    526 void LCodeGen::WriteTranslation(LEnvironment* environment,
    527                                 Translation* translation) {
    528   if (environment == NULL) return;
    529 
    530   // The translation includes one command per value in the environment.
    531   int translation_size = environment->translation_size();
    532 
    533   WriteTranslation(environment->outer(), translation);
    534   WriteTranslationFrame(environment, translation);
    535 
    536   int object_index = 0;
    537   int dematerialized_index = 0;
    538   for (int i = 0; i < translation_size; ++i) {
    539     LOperand* value = environment->values()->at(i);
    540     AddToTranslation(
    541         environment, translation, value, environment->HasTaggedValueAt(i),
    542         environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
    543   }
    544 }
    545 
    546 void LCodeGen::AddToTranslation(LEnvironment* environment,
    547                                 Translation* translation, LOperand* op,
    548                                 bool is_tagged, bool is_uint32,
    549                                 int* object_index_pointer,
    550                                 int* dematerialized_index_pointer) {
    551   if (op == LEnvironment::materialization_marker()) {
    552     int object_index = (*object_index_pointer)++;
    553     if (environment->ObjectIsDuplicateAt(object_index)) {
    554       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
    555       translation->DuplicateObject(dupe_of);
    556       return;
    557     }
    558     int object_length = environment->ObjectLengthAt(object_index);
    559     if (environment->ObjectIsArgumentsAt(object_index)) {
    560       translation->BeginArgumentsObject(object_length);
    561     } else {
    562       translation->BeginCapturedObject(object_length);
    563     }
    564     int dematerialized_index = *dematerialized_index_pointer;
    565     int env_offset = environment->translation_size() + dematerialized_index;
    566     *dematerialized_index_pointer += object_length;
    567     for (int i = 0; i < object_length; ++i) {
    568       LOperand* value = environment->values()->at(env_offset + i);
    569       AddToTranslation(environment, translation, value,
    570                        environment->HasTaggedValueAt(env_offset + i),
    571                        environment->HasUint32ValueAt(env_offset + i),
    572                        object_index_pointer, dematerialized_index_pointer);
    573     }
    574     return;
    575   }
    576 
    577   if (op->IsStackSlot()) {
    578     int index = op->index();
    579     if (is_tagged) {
    580       translation->StoreStackSlot(index);
    581     } else if (is_uint32) {
    582       translation->StoreUint32StackSlot(index);
    583     } else {
    584       translation->StoreInt32StackSlot(index);
    585     }
    586   } else if (op->IsDoubleStackSlot()) {
    587     int index = op->index();
    588     translation->StoreDoubleStackSlot(index);
    589   } else if (op->IsRegister()) {
    590     Register reg = ToRegister(op);
    591     if (is_tagged) {
    592       translation->StoreRegister(reg);
    593     } else if (is_uint32) {
    594       translation->StoreUint32Register(reg);
    595     } else {
    596       translation->StoreInt32Register(reg);
    597     }
    598   } else if (op->IsDoubleRegister()) {
    599     DoubleRegister reg = ToDoubleRegister(op);
    600     translation->StoreDoubleRegister(reg);
    601   } else if (op->IsConstantOperand()) {
    602     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
    603     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
    604     translation->StoreLiteral(src_index);
    605   } else {
    606     UNREACHABLE();
    607   }
    608 }
    609 
    610 void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
    611                         LInstruction* instr) {
    612   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
    613 }
    614 
    615 void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
    616                                LInstruction* instr,
    617                                SafepointMode safepoint_mode) {
    618   DCHECK(instr != NULL);
    619   __ Call(code, mode);
    620   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
    621 
    622   // Signal that we don't inline smi code before these stubs in the
    623   // optimizing code generator.
    624   if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
    625     __ nop();
    626   }
    627 }
    628 
    629 void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
    630                            LInstruction* instr, SaveFPRegsMode save_doubles) {
    631   DCHECK(instr != NULL);
    632 
    633   __ CallRuntime(function, num_arguments, save_doubles);
    634 
    635   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
    636 }
    637 
    638 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
    639   if (context->IsRegister()) {
    640     __ Move(cp, ToRegister(context));
    641   } else if (context->IsStackSlot()) {
    642     __ LoadP(cp, ToMemOperand(context));
    643   } else if (context->IsConstantOperand()) {
    644     HConstant* constant =
    645         chunk_->LookupConstant(LConstantOperand::cast(context));
    646     __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
    647   } else {
    648     UNREACHABLE();
    649   }
    650 }
    651 
    652 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
    653                                        LInstruction* instr, LOperand* context) {
    654   LoadContextFromDeferred(context);
    655   __ CallRuntimeSaveDoubles(id);
    656   RecordSafepointWithRegisters(instr->pointer_map(), argc,
    657                                Safepoint::kNoLazyDeopt);
    658 }
    659 
    660 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
    661                                                     Safepoint::DeoptMode mode) {
    662   environment->set_has_been_used();
    663   if (!environment->HasBeenRegistered()) {
    664     // Physical stack frame layout:
    665     // -x ............. -4  0 ..................................... y
    666     // [incoming arguments] [spill slots] [pushed outgoing arguments]
    667 
    668     // Layout of the environment:
    669     // 0 ..................................................... size-1
    670     // [parameters] [locals] [expression stack including arguments]
    671 
    672     // Layout of the translation:
    673     // 0 ........................................................ size - 1 + 4
    674     // [expression stack including arguments] [locals] [4 words] [parameters]
    675     // |>------------  translation_size ------------<|
    676 
    677     int frame_count = 0;
    678     int jsframe_count = 0;
    679     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
    680       ++frame_count;
    681       if (e->frame_type() == JS_FUNCTION) {
    682         ++jsframe_count;
    683       }
    684     }
    685     Translation translation(&translations_, frame_count, jsframe_count, zone());
    686     WriteTranslation(environment, &translation);
    687     int deoptimization_index = deoptimizations_.length();
    688     int pc_offset = masm()->pc_offset();
    689     environment->Register(deoptimization_index, translation.index(),
    690                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
    691     deoptimizations_.Add(environment, zone());
    692   }
    693 }
    694 
    695 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
    696                             DeoptimizeReason deopt_reason,
    697                             Deoptimizer::BailoutType bailout_type,
    698                             CRegister cr) {
    699   LEnvironment* environment = instr->environment();
    700   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
    701   DCHECK(environment->HasBeenRegistered());
    702   int id = environment->deoptimization_index();
    703   Address entry =
    704       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
    705   if (entry == NULL) {
    706     Abort(kBailoutWasNotPrepared);
    707     return;
    708   }
    709 
    710   if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
    711     Register scratch = scratch0();
    712     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
    713     Label no_deopt;
    714 
    715     // Store the condition on the stack if necessary
    716     if (cond != al) {
    717       Label done;
    718       __ LoadImmP(scratch, Operand::Zero());
    719       __ b(NegateCondition(cond), &done, Label::kNear);
    720       __ LoadImmP(scratch, Operand(1));
    721       __ bind(&done);
    722       __ push(scratch);
    723     }
    724 
    725     Label done;
    726     __ Push(r3);
    727     __ mov(scratch, Operand(count));
    728     __ LoadW(r3, MemOperand(scratch));
    729     __ Sub32(r3, r3, Operand(1));
    730     __ Cmp32(r3, Operand::Zero());
    731     __ bne(&no_deopt, Label::kNear);
    732 
    733     __ LoadImmP(r3, Operand(FLAG_deopt_every_n_times));
    734     __ StoreW(r3, MemOperand(scratch));
    735     __ Pop(r3);
    736 
    737     if (cond != al) {
    738       // Clean up the stack before the deoptimizer call
    739       __ pop(scratch);
    740     }
    741 
    742     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
    743 
    744     __ b(&done);
    745 
    746     __ bind(&no_deopt);
    747     __ StoreW(r3, MemOperand(scratch));
    748     __ Pop(r3);
    749 
    750     if (cond != al) {
    751       // Clean up the stack before the deoptimizer call
    752       __ pop(scratch);
    753     }
    754 
    755     __ bind(&done);
    756 
    757     if (cond != al) {
    758       cond = ne;
    759       __ CmpP(scratch, Operand::Zero());
    760     }
    761   }
    762 
    763   if (info()->ShouldTrapOnDeopt()) {
    764     __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
    765   }
    766 
    767   Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
    768 
    769   DCHECK(info()->IsStub() || frame_is_built_);
    770   // Go through jump table if we need to handle condition, build frame, or
    771   // restore caller doubles.
    772   if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
    773     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
    774   } else {
    775     Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
    776                                             !frame_is_built_);
    777     // We often have several deopts to the same entry, reuse the last
    778     // jump entry if this is the case.
    779     if (FLAG_trace_deopt || isolate()->is_profiling() ||
    780         jump_table_.is_empty() ||
    781         !table_entry.IsEquivalentTo(jump_table_.last())) {
    782       jump_table_.Add(table_entry, zone());
    783     }
    784     __ b(cond, &jump_table_.last().label /*, cr*/);
    785   }
    786 }
    787 
    788 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
    789                             DeoptimizeReason deopt_reason, CRegister cr) {
    790   Deoptimizer::BailoutType bailout_type =
    791       info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
    792   DeoptimizeIf(cond, instr, deopt_reason, bailout_type, cr);
    793 }
    794 
    795 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
    796                                             SafepointMode safepoint_mode) {
    797   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
    798     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
    799   } else {
    800     DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
    801     RecordSafepointWithRegisters(instr->pointer_map(), 0,
    802                                  Safepoint::kLazyDeopt);
    803   }
    804 }
    805 
    806 void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
    807                                int arguments, Safepoint::DeoptMode deopt_mode) {
    808   DCHECK(expected_safepoint_kind_ == kind);
    809 
    810   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
    811   Safepoint safepoint =
    812       safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
    813   for (int i = 0; i < operands->length(); i++) {
    814     LOperand* pointer = operands->at(i);
    815     if (pointer->IsStackSlot()) {
    816       safepoint.DefinePointerSlot(pointer->index(), zone());
    817     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
    818       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
    819     }
    820   }
    821 }
    822 
    823 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
    824                                Safepoint::DeoptMode deopt_mode) {
    825   RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
    826 }
    827 
    828 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
    829   LPointerMap empty_pointers(zone());
    830   RecordSafepoint(&empty_pointers, deopt_mode);
    831 }
    832 
    833 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
    834                                             int arguments,
    835                                             Safepoint::DeoptMode deopt_mode) {
    836   RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
    837 }
    838 
    839 static const char* LabelType(LLabel* label) {
    840   if (label->is_loop_header()) return " (loop header)";
    841   if (label->is_osr_entry()) return " (OSR entry)";
    842   return "";
    843 }
    844 
    845 void LCodeGen::DoLabel(LLabel* label) {
    846   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
    847           current_instruction_, label->hydrogen_value()->id(),
    848           label->block_id(), LabelType(label));
    849   __ bind(label->label());
    850   current_block_ = label->block_id();
    851   DoGap(label);
    852 }
    853 
    854 void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
    855 
    856 void LCodeGen::DoGap(LGap* gap) {
    857   for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
    858        i++) {
    859     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
    860     LParallelMove* move = gap->GetParallelMove(inner_pos);
    861     if (move != NULL) DoParallelMove(move);
    862   }
    863 }
    864 
    865 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
    866 
    867 void LCodeGen::DoParameter(LParameter* instr) {
    868   // Nothing to do.
    869 }
    870 
    871 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
    872   GenerateOsrPrologue();
    873 }
    874 
    875 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
    876   Register dividend = ToRegister(instr->dividend());
    877   int32_t divisor = instr->divisor();
    878   DCHECK(dividend.is(ToRegister(instr->result())));
    879 
    880   // Theoretically, a variation of the branch-free code for integer division by
    881   // a power of 2 (calculating the remainder via an additional multiplication
    882   // (which gets simplified to an 'and') and subtraction) should be faster, and
    883   // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
    884   // indicate that positive dividends are heavily favored, so the branching
    885   // version performs better.
    886   HMod* hmod = instr->hydrogen();
    887   int32_t shift = WhichPowerOf2Abs(divisor);
    888   Label dividend_is_not_negative, done;
    889   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
    890     __ CmpP(dividend, Operand::Zero());
    891     __ bge(&dividend_is_not_negative, Label::kNear);
    892     if (shift) {
    893       // Note that this is correct even for kMinInt operands.
    894       __ LoadComplementRR(dividend, dividend);
    895       __ ExtractBitRange(dividend, dividend, shift - 1, 0);
    896       __ LoadComplementRR(dividend, dividend);
    897       if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    898         DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
    899       }
    900     } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    901       __ mov(dividend, Operand::Zero());
    902     } else {
    903       DeoptimizeIf(al, instr, DeoptimizeReason::kMinusZero);
    904     }
    905     __ b(&done, Label::kNear);
    906   }
    907 
    908   __ bind(&dividend_is_not_negative);
    909   if (shift) {
    910     __ ExtractBitRange(dividend, dividend, shift - 1, 0);
    911   } else {
    912     __ mov(dividend, Operand::Zero());
    913   }
    914   __ bind(&done);
    915 }
    916 
    917 void LCodeGen::DoModByConstI(LModByConstI* instr) {
    918   Register dividend = ToRegister(instr->dividend());
    919   int32_t divisor = instr->divisor();
    920   Register result = ToRegister(instr->result());
    921   DCHECK(!dividend.is(result));
    922 
    923   if (divisor == 0) {
    924     DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
    925     return;
    926   }
    927 
    928   __ TruncatingDiv(result, dividend, Abs(divisor));
    929   __ mov(ip, Operand(Abs(divisor)));
    930   __ Mul(result, result, ip);
    931   __ SubP(result, dividend, result /*, LeaveOE, SetRC*/);
    932 
    933   // Check for negative zero.
    934   HMod* hmod = instr->hydrogen();
    935   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    936     Label remainder_not_zero;
    937     __ bne(&remainder_not_zero, Label::kNear /*, cr0*/);
    938     __ Cmp32(dividend, Operand::Zero());
    939     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
    940     __ bind(&remainder_not_zero);
    941   }
    942 }
    943 
    944 void LCodeGen::DoModI(LModI* instr) {
    945   HMod* hmod = instr->hydrogen();
    946   Register left_reg = ToRegister(instr->left());
    947   Register right_reg = ToRegister(instr->right());
    948   Register result_reg = ToRegister(instr->result());
    949   Label done;
    950 
    951   // Check for x % 0.
    952   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
    953     __ Cmp32(right_reg, Operand::Zero());
    954     DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
    955   }
    956 
    957   // Check for kMinInt % -1, dr will return undefined, which is not what we
    958   // want. We have to deopt if we care about -0, because we can't return that.
    959   if (hmod->CheckFlag(HValue::kCanOverflow)) {
    960     Label no_overflow_possible;
    961     __ Cmp32(left_reg, Operand(kMinInt));
    962     __ bne(&no_overflow_possible, Label::kNear);
    963     __ Cmp32(right_reg, Operand(-1));
    964     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    965       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
    966     } else {
    967       __ b(ne, &no_overflow_possible, Label::kNear);
    968       __ mov(result_reg, Operand::Zero());
    969       __ b(&done, Label::kNear);
    970     }
    971     __ bind(&no_overflow_possible);
    972   }
    973 
    974   // Divide instruction dr will implicity use register pair
    975   // r0 & r1 below.
    976   DCHECK(!left_reg.is(r1));
    977   DCHECK(!right_reg.is(r1));
    978   DCHECK(!result_reg.is(r1));
    979   __ LoadRR(r0, left_reg);
    980   __ srda(r0, Operand(32));
    981   __ dr(r0, right_reg);  // R0:R1 = R1 / divisor - R0 remainder
    982 
    983   __ LoadAndTestP_ExtendSrc(result_reg, r0);  // Copy remainder to resultreg
    984 
    985   // If we care about -0, test if the dividend is <0 and the result is 0.
    986   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    987     __ bne(&done, Label::kNear);
    988     __ Cmp32(left_reg, Operand::Zero());
    989     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
    990   }
    991 
    992   __ bind(&done);
    993 }
    994 
    995 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
    996   Register dividend = ToRegister(instr->dividend());
    997   int32_t divisor = instr->divisor();
    998   Register result = ToRegister(instr->result());
    999   DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
   1000   DCHECK(!result.is(dividend));
   1001 
   1002   // Check for (0 / -x) that will produce negative zero.
   1003   HDiv* hdiv = instr->hydrogen();
   1004   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1005     __ Cmp32(dividend, Operand::Zero());
   1006     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
   1007   }
   1008   // Check for (kMinInt / -1).
   1009   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
   1010     __ Cmp32(dividend, Operand(0x80000000));
   1011     DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
   1012   }
   1013 
   1014   int32_t shift = WhichPowerOf2Abs(divisor);
   1015 
   1016   // Deoptimize if remainder will not be 0.
   1017   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
   1018     __ TestBitRange(dividend, shift - 1, 0, r0);
   1019     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, cr0);
   1020   }
   1021 
   1022   if (divisor == -1) {  // Nice shortcut, not needed for correctness.
   1023     __ LoadComplementRR(result, dividend);
   1024     return;
   1025   }
   1026   if (shift == 0) {
   1027     __ LoadRR(result, dividend);
   1028   } else {
   1029     if (shift == 1) {
   1030       __ ShiftRight(result, dividend, Operand(31));
   1031     } else {
   1032       __ ShiftRightArith(result, dividend, Operand(31));
   1033       __ ShiftRight(result, result, Operand(32 - shift));
   1034     }
   1035     __ AddP(result, dividend, result);
   1036     __ ShiftRightArith(result, result, Operand(shift));
   1037 #if V8_TARGET_ARCH_S390X
   1038     __ lgfr(result, result);
   1039 #endif
   1040   }
   1041   if (divisor < 0) __ LoadComplementRR(result, result);
   1042 }
   1043 
   1044 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
   1045   Register dividend = ToRegister(instr->dividend());
   1046   int32_t divisor = instr->divisor();
   1047   Register result = ToRegister(instr->result());
   1048   DCHECK(!dividend.is(result));
   1049 
   1050   if (divisor == 0) {
   1051     DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
   1052     return;
   1053   }
   1054 
   1055   // Check for (0 / -x) that will produce negative zero.
   1056   HDiv* hdiv = instr->hydrogen();
   1057   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1058     __ Cmp32(dividend, Operand::Zero());
   1059     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
   1060   }
   1061 
   1062   __ TruncatingDiv(result, dividend, Abs(divisor));
   1063   if (divisor < 0) __ LoadComplementRR(result, result);
   1064 
   1065   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
   1066     Register scratch = scratch0();
   1067     __ mov(ip, Operand(divisor));
   1068     __ Mul(scratch, result, ip);
   1069     __ Cmp32(scratch, dividend);
   1070     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
   1071   }
   1072 }
   1073 
   1074 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
   1075 void LCodeGen::DoDivI(LDivI* instr) {
   1076   HBinaryOperation* hdiv = instr->hydrogen();
   1077   const Register dividend = ToRegister(instr->dividend());
   1078   const Register divisor = ToRegister(instr->divisor());
   1079   Register result = ToRegister(instr->result());
   1080 
   1081   DCHECK(!dividend.is(result));
   1082   DCHECK(!divisor.is(result));
   1083 
   1084   // Check for x / 0.
   1085   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1086     __ Cmp32(divisor, Operand::Zero());
   1087     DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
   1088   }
   1089 
   1090   // Check for (0 / -x) that will produce negative zero.
   1091   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1092     Label dividend_not_zero;
   1093     __ Cmp32(dividend, Operand::Zero());
   1094     __ bne(&dividend_not_zero, Label::kNear);
   1095     __ Cmp32(divisor, Operand::Zero());
   1096     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
   1097     __ bind(&dividend_not_zero);
   1098   }
   1099 
   1100   // Check for (kMinInt / -1).
   1101   if (hdiv->CheckFlag(HValue::kCanOverflow)) {
   1102     Label dividend_not_min_int;
   1103     __ Cmp32(dividend, Operand(kMinInt));
   1104     __ bne(&dividend_not_min_int, Label::kNear);
   1105     __ Cmp32(divisor, Operand(-1));
   1106     DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
   1107     __ bind(&dividend_not_min_int);
   1108   }
   1109 
   1110   __ LoadRR(r0, dividend);
   1111   __ srda(r0, Operand(32));
   1112   __ dr(r0, divisor);  // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
   1113 
   1114   __ LoadAndTestP_ExtendSrc(result, r1);  // Move quotient to result register
   1115 
   1116   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
   1117     // Deoptimize if remainder is not 0.
   1118     __ Cmp32(r0, Operand::Zero());
   1119     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
   1120   }
   1121 }
   1122 
   1123 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
   1124   HBinaryOperation* hdiv = instr->hydrogen();
   1125   Register dividend = ToRegister(instr->dividend());
   1126   Register result = ToRegister(instr->result());
   1127   int32_t divisor = instr->divisor();
   1128   bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
   1129 
   1130   // If the divisor is positive, things are easy: There can be no deopts and we
   1131   // can simply do an arithmetic right shift.
   1132   int32_t shift = WhichPowerOf2Abs(divisor);
   1133   if (divisor > 0) {
   1134     if (shift || !result.is(dividend)) {
   1135       __ ShiftRightArith(result, dividend, Operand(shift));
   1136 #if V8_TARGET_ARCH_S390X
   1137       __ lgfr(result, result);
   1138 #endif
   1139     }
   1140     return;
   1141   }
   1142 
   1143 // If the divisor is negative, we have to negate and handle edge cases.
   1144 #if V8_TARGET_ARCH_S390X
   1145   if (divisor == -1 && can_overflow) {
   1146     __ Cmp32(dividend, Operand(0x80000000));
   1147     DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
   1148   }
   1149 #endif
   1150 
   1151   __ LoadComplementRR(result, dividend);
   1152   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1153     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0);
   1154   }
   1155 
   1156 // If the negation could not overflow, simply shifting is OK.
   1157 #if !V8_TARGET_ARCH_S390X
   1158   if (!can_overflow) {
   1159 #endif
   1160     if (shift) {
   1161       __ ShiftRightArithP(result, result, Operand(shift));
   1162     }
   1163     return;
   1164 #if !V8_TARGET_ARCH_S390X
   1165   }
   1166 
   1167   // Dividing by -1 is basically negation, unless we overflow.
   1168   if (divisor == -1) {
   1169     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
   1170     return;
   1171   }
   1172 
   1173   Label overflow_label, done;
   1174   __ b(overflow, &overflow_label, Label::kNear);
   1175   __ ShiftRightArith(result, result, Operand(shift));
   1176 #if V8_TARGET_ARCH_S390X
   1177   __ lgfr(result, result);
   1178 #endif
   1179   __ b(&done, Label::kNear);
   1180   __ bind(&overflow_label);
   1181   __ mov(result, Operand(kMinInt / divisor));
   1182   __ bind(&done);
   1183 #endif
   1184 }
   1185 
   1186 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
   1187   Register dividend = ToRegister(instr->dividend());
   1188   int32_t divisor = instr->divisor();
   1189   Register result = ToRegister(instr->result());
   1190   DCHECK(!dividend.is(result));
   1191 
   1192   if (divisor == 0) {
   1193     DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
   1194     return;
   1195   }
   1196 
   1197   // Check for (0 / -x) that will produce negative zero.
   1198   HMathFloorOfDiv* hdiv = instr->hydrogen();
   1199   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1200     __ Cmp32(dividend, Operand::Zero());
   1201     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
   1202   }
   1203 
   1204   // Easy case: We need no dynamic check for the dividend and the flooring
   1205   // division is the same as the truncating division.
   1206   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
   1207       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
   1208     __ TruncatingDiv(result, dividend, Abs(divisor));
   1209     if (divisor < 0) __ LoadComplementRR(result, result);
   1210     return;
   1211   }
   1212 
   1213   // In the general case we may need to adjust before and after the truncating
   1214   // division to get a flooring division.
   1215   Register temp = ToRegister(instr->temp());
   1216   DCHECK(!temp.is(dividend) && !temp.is(result));
   1217   Label needs_adjustment, done;
   1218   __ Cmp32(dividend, Operand::Zero());
   1219   __ b(divisor > 0 ? lt : gt, &needs_adjustment);
   1220   __ TruncatingDiv(result, dividend, Abs(divisor));
   1221   if (divisor < 0) __ LoadComplementRR(result, result);
   1222   __ b(&done, Label::kNear);
   1223   __ bind(&needs_adjustment);
   1224   __ AddP(temp, dividend, Operand(divisor > 0 ? 1 : -1));
   1225   __ TruncatingDiv(result, temp, Abs(divisor));
   1226   if (divisor < 0) __ LoadComplementRR(result, result);
   1227   __ SubP(result, result, Operand(1));
   1228   __ bind(&done);
   1229 }
   1230 
   1231 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
   1232 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
   1233   HBinaryOperation* hdiv = instr->hydrogen();
   1234   const Register dividend = ToRegister(instr->dividend());
   1235   const Register divisor = ToRegister(instr->divisor());
   1236   Register result = ToRegister(instr->result());
   1237 
   1238   DCHECK(!dividend.is(result));
   1239   DCHECK(!divisor.is(result));
   1240 
   1241   // Check for x / 0.
   1242   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1243     __ Cmp32(divisor, Operand::Zero());
   1244     DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
   1245   }
   1246 
   1247   // Check for (0 / -x) that will produce negative zero.
   1248   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1249     Label dividend_not_zero;
   1250     __ Cmp32(dividend, Operand::Zero());
   1251     __ bne(&dividend_not_zero, Label::kNear);
   1252     __ Cmp32(divisor, Operand::Zero());
   1253     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
   1254     __ bind(&dividend_not_zero);
   1255   }
   1256 
   1257   // Check for (kMinInt / -1).
   1258   if (hdiv->CheckFlag(HValue::kCanOverflow)) {
   1259     Label no_overflow_possible;
   1260     __ Cmp32(dividend, Operand(kMinInt));
   1261     __ bne(&no_overflow_possible, Label::kNear);
   1262     __ Cmp32(divisor, Operand(-1));
   1263     if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
   1264       DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
   1265     } else {
   1266       __ bne(&no_overflow_possible, Label::kNear);
   1267       __ LoadRR(result, dividend);
   1268     }
   1269     __ bind(&no_overflow_possible);
   1270   }
   1271 
   1272   __ LoadRR(r0, dividend);
   1273   __ srda(r0, Operand(32));
   1274   __ dr(r0, divisor);  // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
   1275 
   1276   __ lr(result, r1);  // Move quotient to result register
   1277 
   1278   Label done;
   1279   Register scratch = scratch0();
   1280   // If both operands have the same sign then we are done.
   1281   __ Xor(scratch, dividend, divisor);
   1282   __ ltr(scratch, scratch);  // use 32 bit version LoadAndTestRR even in 64 bit
   1283   __ bge(&done, Label::kNear);
   1284 
   1285   // If there is no remainder then we are done.
   1286   __ lr(scratch, result);
   1287   __ msr(scratch, divisor);
   1288   __ Cmp32(dividend, scratch);
   1289   __ beq(&done, Label::kNear);
   1290 
   1291   // We performed a truncating division. Correct the result.
   1292   __ Sub32(result, result, Operand(1));
   1293   __ bind(&done);
   1294 }
   1295 
   1296 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
   1297   DoubleRegister addend = ToDoubleRegister(instr->addend());
   1298   DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
   1299   DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
   1300   DoubleRegister result = ToDoubleRegister(instr->result());
   1301 
   1302   // Unable to use madbr as the intermediate value is not rounded
   1303   // to proper precision
   1304   __ ldr(result, multiplier);
   1305   __ mdbr(result, multiplicand);
   1306   __ adbr(result, addend);
   1307 }
   1308 
   1309 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
   1310   DoubleRegister minuend = ToDoubleRegister(instr->minuend());
   1311   DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
   1312   DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
   1313   DoubleRegister result = ToDoubleRegister(instr->result());
   1314 
   1315   // Unable to use msdbr as the intermediate value is not rounded
   1316   // to proper precision
   1317   __ ldr(result, multiplier);
   1318   __ mdbr(result, multiplicand);
   1319   __ sdbr(result, minuend);
   1320 }
   1321 
   1322 void LCodeGen::DoMulI(LMulI* instr) {
   1323   Register scratch = scratch0();
   1324   Register result = ToRegister(instr->result());
   1325   // Note that result may alias left.
   1326   Register left = ToRegister(instr->left());
   1327   LOperand* right_op = instr->right();
   1328 
   1329   bool bailout_on_minus_zero =
   1330       instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
   1331   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1332 
   1333   if (right_op->IsConstantOperand()) {
   1334     int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
   1335 
   1336     if (bailout_on_minus_zero && (constant < 0)) {
   1337       // The case of a null constant will be handled separately.
   1338       // If constant is negative and left is null, the result should be -0.
   1339       __ CmpP(left, Operand::Zero());
   1340       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
   1341     }
   1342 
   1343     switch (constant) {
   1344       case -1:
   1345         if (can_overflow) {
   1346 #if V8_TARGET_ARCH_S390X
   1347           if (instr->hydrogen()->representation().IsSmi()) {
   1348 #endif
   1349             __ LoadComplementRR(result, left);
   1350             DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   1351 #if V8_TARGET_ARCH_S390X
   1352           } else {
   1353             __ LoadComplementRR(result, left);
   1354             __ TestIfInt32(result, r0);
   1355             DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
   1356           }
   1357 #endif
   1358         } else {
   1359           __ LoadComplementRR(result, left);
   1360         }
   1361         break;
   1362       case 0:
   1363         if (bailout_on_minus_zero) {
   1364 // If left is strictly negative and the constant is null, the
   1365 // result is -0. Deoptimize if required, otherwise return 0.
   1366 #if V8_TARGET_ARCH_S390X
   1367           if (instr->hydrogen()->representation().IsSmi()) {
   1368 #endif
   1369             __ Cmp32(left, Operand::Zero());
   1370 #if V8_TARGET_ARCH_S390X
   1371           } else {
   1372             __ Cmp32(left, Operand::Zero());
   1373           }
   1374 #endif
   1375           DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
   1376         }
   1377         __ LoadImmP(result, Operand::Zero());
   1378         break;
   1379       case 1:
   1380         __ Move(result, left);
   1381         break;
   1382       default:
   1383         // Multiplying by powers of two and powers of two plus or minus
   1384         // one can be done faster with shifted operands.
   1385         // For other constants we emit standard code.
   1386         int32_t mask = constant >> 31;
   1387         uint32_t constant_abs = (constant + mask) ^ mask;
   1388 
   1389         if (base::bits::IsPowerOfTwo32(constant_abs)) {
   1390           int32_t shift = WhichPowerOf2(constant_abs);
   1391           __ ShiftLeftP(result, left, Operand(shift));
   1392           // Correct the sign of the result if the constant is negative.
   1393           if (constant < 0) __ LoadComplementRR(result, result);
   1394         } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
   1395           int32_t shift = WhichPowerOf2(constant_abs - 1);
   1396           __ ShiftLeftP(scratch, left, Operand(shift));
   1397           __ AddP(result, scratch, left);
   1398           // Correct the sign of the result if the constant is negative.
   1399           if (constant < 0) __ LoadComplementRR(result, result);
   1400         } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
   1401           int32_t shift = WhichPowerOf2(constant_abs + 1);
   1402           __ ShiftLeftP(scratch, left, Operand(shift));
   1403           __ SubP(result, scratch, left);
   1404           // Correct the sign of the result if the constant is negative.
   1405           if (constant < 0) __ LoadComplementRR(result, result);
   1406         } else {
   1407           // Generate standard code.
   1408           __ Move(result, left);
   1409           __ MulP(result, Operand(constant));
   1410         }
   1411     }
   1412 
   1413   } else {
   1414     DCHECK(right_op->IsRegister());
   1415     Register right = ToRegister(right_op);
   1416 
   1417     if (can_overflow) {
   1418 #if V8_TARGET_ARCH_S390X
   1419       // result = left * right.
   1420       if (instr->hydrogen()->representation().IsSmi()) {
   1421         __ SmiUntag(result, left);
   1422         __ SmiUntag(scratch, right);
   1423         __ msgr(result, scratch);
   1424       } else {
   1425         __ LoadRR(result, left);
   1426         __ msgr(result, right);
   1427       }
   1428       __ TestIfInt32(result, r0);
   1429       DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
   1430       if (instr->hydrogen()->representation().IsSmi()) {
   1431         __ SmiTag(result);
   1432       }
   1433 #else
   1434       // r0:scratch = scratch * right
   1435       if (instr->hydrogen()->representation().IsSmi()) {
   1436         __ SmiUntag(scratch, left);
   1437         __ mr_z(r0, right);
   1438         __ LoadRR(result, scratch);
   1439       } else {
   1440         // r0:scratch = scratch * right
   1441         __ LoadRR(scratch, left);
   1442         __ mr_z(r0, right);
   1443         __ LoadRR(result, scratch);
   1444       }
   1445       __ TestIfInt32(r0, result, scratch);
   1446       DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
   1447 #endif
   1448     } else {
   1449       if (instr->hydrogen()->representation().IsSmi()) {
   1450         __ SmiUntag(result, left);
   1451         __ Mul(result, result, right);
   1452       } else {
   1453         __ Mul(result, left, right);
   1454       }
   1455     }
   1456 
   1457     if (bailout_on_minus_zero) {
   1458       Label done;
   1459 #if V8_TARGET_ARCH_S390X
   1460       if (instr->hydrogen()->representation().IsSmi()) {
   1461 #endif
   1462         __ XorP(r0, left, right);
   1463         __ LoadAndTestRR(r0, r0);
   1464         __ bge(&done, Label::kNear);
   1465 #if V8_TARGET_ARCH_S390X
   1466       } else {
   1467         __ XorP(r0, left, right);
   1468         __ Cmp32(r0, Operand::Zero());
   1469         __ bge(&done, Label::kNear);
   1470       }
   1471 #endif
   1472       // Bail out if the result is minus zero.
   1473       __ CmpP(result, Operand::Zero());
   1474       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
   1475       __ bind(&done);
   1476     }
   1477   }
   1478 }
   1479 
   1480 void LCodeGen::DoBitI(LBitI* instr) {
   1481   LOperand* left_op = instr->left();
   1482   LOperand* right_op = instr->right();
   1483   DCHECK(left_op->IsRegister());
   1484   Register left = ToRegister(left_op);
   1485   Register result = ToRegister(instr->result());
   1486 
   1487   if (right_op->IsConstantOperand()) {
   1488     switch (instr->op()) {
   1489       case Token::BIT_AND:
   1490         __ AndP(result, left, Operand(ToOperand(right_op)));
   1491         break;
   1492       case Token::BIT_OR:
   1493         __ OrP(result, left, Operand(ToOperand(right_op)));
   1494         break;
   1495       case Token::BIT_XOR:
   1496         __ XorP(result, left, Operand(ToOperand(right_op)));
   1497         break;
   1498       default:
   1499         UNREACHABLE();
   1500         break;
   1501     }
   1502   } else if (right_op->IsStackSlot()) {
   1503     // Reg-Mem instruction clobbers, so copy src to dst first.
   1504     if (!left.is(result)) __ LoadRR(result, left);
   1505     switch (instr->op()) {
   1506       case Token::BIT_AND:
   1507         __ AndP(result, ToMemOperand(right_op));
   1508         break;
   1509       case Token::BIT_OR:
   1510         __ OrP(result, ToMemOperand(right_op));
   1511         break;
   1512       case Token::BIT_XOR:
   1513         __ XorP(result, ToMemOperand(right_op));
   1514         break;
   1515       default:
   1516         UNREACHABLE();
   1517         break;
   1518     }
   1519   } else {
   1520     DCHECK(right_op->IsRegister());
   1521 
   1522     switch (instr->op()) {
   1523       case Token::BIT_AND:
   1524         __ AndP(result, left, ToRegister(right_op));
   1525         break;
   1526       case Token::BIT_OR:
   1527         __ OrP(result, left, ToRegister(right_op));
   1528         break;
   1529       case Token::BIT_XOR:
   1530         __ XorP(result, left, ToRegister(right_op));
   1531         break;
   1532       default:
   1533         UNREACHABLE();
   1534         break;
   1535     }
   1536   }
   1537 }
   1538 
   1539 void LCodeGen::DoShiftI(LShiftI* instr) {
   1540   // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
   1541   // result may alias either of them.
   1542   LOperand* right_op = instr->right();
   1543   Register left = ToRegister(instr->left());
   1544   Register result = ToRegister(instr->result());
   1545   Register scratch = scratch0();
   1546   if (right_op->IsRegister()) {
   1547     // Mask the right_op operand.
   1548     __ AndP(scratch, ToRegister(right_op), Operand(0x1F));
   1549     switch (instr->op()) {
   1550       case Token::ROR:
   1551         // rotate_right(a, b) == rotate_left(a, 32 - b)
   1552         __ LoadComplementRR(scratch, scratch);
   1553         __ rll(result, left, scratch, Operand(32));
   1554 #if V8_TARGET_ARCH_S390X
   1555         __ lgfr(result, result);
   1556 #endif
   1557         break;
   1558       case Token::SAR:
   1559         __ ShiftRightArith(result, left, scratch);
   1560 #if V8_TARGET_ARCH_S390X
   1561         __ lgfr(result, result);
   1562 #endif
   1563         break;
   1564       case Token::SHR:
   1565         __ ShiftRight(result, left, scratch);
   1566 #if V8_TARGET_ARCH_S390X
   1567         __ lgfr(result, result);
   1568 #endif
   1569         if (instr->can_deopt()) {
   1570 #if V8_TARGET_ARCH_S390X
   1571           __ ltgfr(result, result /*, SetRC*/);
   1572 #else
   1573           __ ltr(result, result);  // Set the <,==,> condition
   1574 #endif
   1575           DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, cr0);
   1576         }
   1577         break;
   1578       case Token::SHL:
   1579         __ ShiftLeft(result, left, scratch);
   1580 #if V8_TARGET_ARCH_S390X
   1581         __ lgfr(result, result);
   1582 #endif
   1583         break;
   1584       default:
   1585         UNREACHABLE();
   1586         break;
   1587     }
   1588   } else {
   1589     // Mask the right_op operand.
   1590     int value = ToInteger32(LConstantOperand::cast(right_op));
   1591     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
   1592     switch (instr->op()) {
   1593       case Token::ROR:
   1594         if (shift_count != 0) {
   1595           __ rll(result, left, Operand(32 - shift_count));
   1596 #if V8_TARGET_ARCH_S390X
   1597           __ lgfr(result, result);
   1598 #endif
   1599         } else {
   1600           __ Move(result, left);
   1601         }
   1602         break;
   1603       case Token::SAR:
   1604         if (shift_count != 0) {
   1605           __ ShiftRightArith(result, left, Operand(shift_count));
   1606 #if V8_TARGET_ARCH_S390X
   1607           __ lgfr(result, result);
   1608 #endif
   1609         } else {
   1610           __ Move(result, left);
   1611         }
   1612         break;
   1613       case Token::SHR:
   1614         if (shift_count != 0) {
   1615           __ ShiftRight(result, left, Operand(shift_count));
   1616 #if V8_TARGET_ARCH_S390X
   1617           __ lgfr(result, result);
   1618 #endif
   1619         } else {
   1620           if (instr->can_deopt()) {
   1621             __ Cmp32(left, Operand::Zero());
   1622             DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue);
   1623           }
   1624           __ Move(result, left);
   1625         }
   1626         break;
   1627       case Token::SHL:
   1628         if (shift_count != 0) {
   1629 #if V8_TARGET_ARCH_S390X
   1630           if (instr->hydrogen_value()->representation().IsSmi()) {
   1631             __ ShiftLeftP(result, left, Operand(shift_count));
   1632 #else
   1633           if (instr->hydrogen_value()->representation().IsSmi() &&
   1634               instr->can_deopt()) {
   1635             if (shift_count != 1) {
   1636               __ ShiftLeft(result, left, Operand(shift_count - 1));
   1637 #if V8_TARGET_ARCH_S390X
   1638               __ lgfr(result, result);
   1639 #endif
   1640               __ SmiTagCheckOverflow(result, result, scratch);
   1641             } else {
   1642               __ SmiTagCheckOverflow(result, left, scratch);
   1643             }
   1644             DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
   1645 #endif
   1646           } else {
   1647             __ ShiftLeft(result, left, Operand(shift_count));
   1648 #if V8_TARGET_ARCH_S390X
   1649             __ lgfr(result, result);
   1650 #endif
   1651           }
   1652         } else {
   1653           __ Move(result, left);
   1654         }
   1655         break;
   1656       default:
   1657         UNREACHABLE();
   1658         break;
   1659     }
   1660   }
   1661 }
   1662 
   1663 void LCodeGen::DoSubI(LSubI* instr) {
   1664   LOperand* left = instr->left();
   1665   LOperand* right = instr->right();
   1666   LOperand* result = instr->result();
   1667 
   1668   bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
   1669                      instr->hydrogen()->representation().IsExternal());
   1670 
   1671 #if V8_TARGET_ARCH_S390X
   1672   // The overflow detection needs to be tested on the lower 32-bits.
   1673   // As a result, on 64-bit, we need to force 32-bit arithmetic operations
   1674   // to set the CC overflow bit properly.  The result is then sign-extended.
   1675   bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1676 #else
   1677   bool checkOverflow = true;
   1678 #endif
   1679 
   1680   if (right->IsConstantOperand()) {
   1681     if (!isInteger || !checkOverflow)
   1682       __ SubP(ToRegister(result), ToRegister(left), ToOperand(right));
   1683     else
   1684       __ Sub32(ToRegister(result), ToRegister(left), ToOperand(right));
   1685   } else if (right->IsRegister()) {
   1686     if (!isInteger)
   1687       __ SubP(ToRegister(result), ToRegister(left), ToRegister(right));
   1688     else if (!checkOverflow)
   1689       __ SubP_ExtendSrc(ToRegister(result), ToRegister(left),
   1690                         ToRegister(right));
   1691     else
   1692       __ Sub32(ToRegister(result), ToRegister(left), ToRegister(right));
   1693   } else {
   1694     if (!left->Equals(instr->result()))
   1695       __ LoadRR(ToRegister(result), ToRegister(left));
   1696 
   1697     MemOperand mem = ToMemOperand(right);
   1698     if (!isInteger) {
   1699       __ SubP(ToRegister(result), mem);
   1700     } else {
   1701 #if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
   1702       // We want to read the 32-bits directly from memory
   1703       MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
   1704 #else
   1705       MemOperand Upper32Mem = ToMemOperand(right);
   1706 #endif
   1707       if (checkOverflow) {
   1708         __ Sub32(ToRegister(result), Upper32Mem);
   1709       } else {
   1710         __ SubP_ExtendSrc(ToRegister(result), Upper32Mem);
   1711       }
   1712     }
   1713   }
   1714 
   1715 #if V8_TARGET_ARCH_S390X
   1716   if (isInteger && checkOverflow)
   1717     __ lgfr(ToRegister(result), ToRegister(result));
   1718 #endif
   1719   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1720     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   1721   }
   1722 }
   1723 
   1724 void LCodeGen::DoRSubI(LRSubI* instr) {
   1725   LOperand* left = instr->left();
   1726   LOperand* right = instr->right();
   1727   LOperand* result = instr->result();
   1728 
   1729   DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
   1730          right->IsConstantOperand());
   1731 
   1732 #if V8_TARGET_ARCH_S390X
   1733   // The overflow detection needs to be tested on the lower 32-bits.
   1734   // As a result, on 64-bit, we need to force 32-bit arithmetic operations
   1735   // to set the CC overflow bit properly.  The result is then sign-extended.
   1736   bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1737 #else
   1738   bool checkOverflow = true;
   1739 #endif
   1740 
   1741   Operand right_operand = ToOperand(right);
   1742   __ mov(r0, right_operand);
   1743 
   1744   if (!checkOverflow) {
   1745     __ SubP_ExtendSrc(ToRegister(result), r0, ToRegister(left));
   1746   } else {
   1747     __ Sub32(ToRegister(result), r0, ToRegister(left));
   1748   }
   1749 }
   1750 
   1751 void LCodeGen::DoConstantI(LConstantI* instr) {
   1752   __ mov(ToRegister(instr->result()), Operand(instr->value()));
   1753 }
   1754 
   1755 void LCodeGen::DoConstantS(LConstantS* instr) {
   1756   __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
   1757 }
   1758 
   1759 void LCodeGen::DoConstantD(LConstantD* instr) {
   1760   DCHECK(instr->result()->IsDoubleRegister());
   1761   DoubleRegister result = ToDoubleRegister(instr->result());
   1762   uint64_t bits = instr->bits();
   1763   __ LoadDoubleLiteral(result, bits, scratch0());
   1764 }
   1765 
   1766 void LCodeGen::DoConstantE(LConstantE* instr) {
   1767   __ mov(ToRegister(instr->result()), Operand(instr->value()));
   1768 }
   1769 
   1770 void LCodeGen::DoConstantT(LConstantT* instr) {
   1771   Handle<Object> object = instr->value(isolate());
   1772   AllowDeferredHandleDereference smi_check;
   1773   __ Move(ToRegister(instr->result()), object);
   1774 }
   1775 
   1776 MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
   1777                                            String::Encoding encoding) {
   1778   if (index->IsConstantOperand()) {
   1779     int offset = ToInteger32(LConstantOperand::cast(index));
   1780     if (encoding == String::TWO_BYTE_ENCODING) {
   1781       offset *= kUC16Size;
   1782     }
   1783     STATIC_ASSERT(kCharSize == 1);
   1784     return FieldMemOperand(string, SeqString::kHeaderSize + offset);
   1785   }
   1786   Register scratch = scratch0();
   1787   DCHECK(!scratch.is(string));
   1788   DCHECK(!scratch.is(ToRegister(index)));
   1789   // TODO(joransiu) : Fold Add into FieldMemOperand
   1790   if (encoding == String::ONE_BYTE_ENCODING) {
   1791     __ AddP(scratch, string, ToRegister(index));
   1792   } else {
   1793     STATIC_ASSERT(kUC16Size == 2);
   1794     __ ShiftLeftP(scratch, ToRegister(index), Operand(1));
   1795     __ AddP(scratch, string, scratch);
   1796   }
   1797   return FieldMemOperand(scratch, SeqString::kHeaderSize);
   1798 }
   1799 
   1800 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
   1801   String::Encoding encoding = instr->hydrogen()->encoding();
   1802   Register string = ToRegister(instr->string());
   1803   Register result = ToRegister(instr->result());
   1804 
   1805   if (FLAG_debug_code) {
   1806     Register scratch = scratch0();
   1807     __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
   1808     __ llc(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   1809 
   1810     __ AndP(scratch, scratch,
   1811             Operand(kStringRepresentationMask | kStringEncodingMask));
   1812     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1813     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1814     __ CmpP(scratch,
   1815             Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
   1816                                                           : two_byte_seq_type));
   1817     __ Check(eq, kUnexpectedStringType);
   1818   }
   1819 
   1820   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   1821   if (encoding == String::ONE_BYTE_ENCODING) {
   1822     __ llc(result, operand);
   1823   } else {
   1824     __ llh(result, operand);
   1825   }
   1826 }
   1827 
   1828 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
   1829   String::Encoding encoding = instr->hydrogen()->encoding();
   1830   Register string = ToRegister(instr->string());
   1831   Register value = ToRegister(instr->value());
   1832 
   1833   if (FLAG_debug_code) {
   1834     Register index = ToRegister(instr->index());
   1835     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1836     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1837     int encoding_mask =
   1838         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
   1839             ? one_byte_seq_type
   1840             : two_byte_seq_type;
   1841     __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
   1842   }
   1843 
   1844   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   1845   if (encoding == String::ONE_BYTE_ENCODING) {
   1846     __ stc(value, operand);
   1847   } else {
   1848     __ sth(value, operand);
   1849   }
   1850 }
   1851 
   1852 void LCodeGen::DoAddI(LAddI* instr) {
   1853   LOperand* left = instr->left();
   1854   LOperand* right = instr->right();
   1855   LOperand* result = instr->result();
   1856   bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
   1857                      instr->hydrogen()->representation().IsExternal());
   1858 #if V8_TARGET_ARCH_S390X
   1859   // The overflow detection needs to be tested on the lower 32-bits.
   1860   // As a result, on 64-bit, we need to force 32-bit arithmetic operations
   1861   // to set the CC overflow bit properly.  The result is then sign-extended.
   1862   bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1863 #else
   1864   bool checkOverflow = true;
   1865 #endif
   1866 
   1867   if (right->IsConstantOperand()) {
   1868     if (!isInteger || !checkOverflow)
   1869       __ AddP(ToRegister(result), ToRegister(left), ToOperand(right));
   1870     else
   1871       __ Add32(ToRegister(result), ToRegister(left), ToOperand(right));
   1872   } else if (right->IsRegister()) {
   1873     if (!isInteger)
   1874       __ AddP(ToRegister(result), ToRegister(left), ToRegister(right));
   1875     else if (!checkOverflow)
   1876       __ AddP_ExtendSrc(ToRegister(result), ToRegister(left),
   1877                         ToRegister(right));
   1878     else
   1879       __ Add32(ToRegister(result), ToRegister(left), ToRegister(right));
   1880   } else {
   1881     if (!left->Equals(instr->result()))
   1882       __ LoadRR(ToRegister(result), ToRegister(left));
   1883 
   1884     MemOperand mem = ToMemOperand(right);
   1885     if (!isInteger) {
   1886       __ AddP(ToRegister(result), mem);
   1887     } else {
   1888 #if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
   1889       // We want to read the 32-bits directly from memory
   1890       MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
   1891 #else
   1892       MemOperand Upper32Mem = ToMemOperand(right);
   1893 #endif
   1894       if (checkOverflow) {
   1895         __ Add32(ToRegister(result), Upper32Mem);
   1896       } else {
   1897         __ AddP_ExtendSrc(ToRegister(result), Upper32Mem);
   1898       }
   1899     }
   1900   }
   1901 
   1902 #if V8_TARGET_ARCH_S390X
   1903   if (isInteger && checkOverflow)
   1904     __ lgfr(ToRegister(result), ToRegister(result));
   1905 #endif
   1906   // Doptimize on overflow
   1907   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1908     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   1909   }
   1910 }
   1911 
   1912 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
   1913   LOperand* left = instr->left();
   1914   LOperand* right = instr->right();
   1915   HMathMinMax::Operation operation = instr->hydrogen()->operation();
   1916   Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
   1917   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
   1918     Register left_reg = ToRegister(left);
   1919     Register right_reg = EmitLoadRegister(right, ip);
   1920     Register result_reg = ToRegister(instr->result());
   1921     Label return_left, done;
   1922 #if V8_TARGET_ARCH_S390X
   1923     if (instr->hydrogen_value()->representation().IsSmi()) {
   1924 #endif
   1925       __ CmpP(left_reg, right_reg);
   1926 #if V8_TARGET_ARCH_S390X
   1927     } else {
   1928       __ Cmp32(left_reg, right_reg);
   1929     }
   1930 #endif
   1931     __ b(cond, &return_left, Label::kNear);
   1932     __ Move(result_reg, right_reg);
   1933     __ b(&done, Label::kNear);
   1934     __ bind(&return_left);
   1935     __ Move(result_reg, left_reg);
   1936     __ bind(&done);
   1937   } else {
   1938     DCHECK(instr->hydrogen()->representation().IsDouble());
   1939     DoubleRegister left_reg = ToDoubleRegister(left);
   1940     DoubleRegister right_reg = ToDoubleRegister(right);
   1941     DoubleRegister result_reg = ToDoubleRegister(instr->result());
   1942     Label check_nan_left, check_zero, return_left, return_right, done;
   1943     __ cdbr(left_reg, right_reg);
   1944     __ bunordered(&check_nan_left, Label::kNear);
   1945     __ beq(&check_zero);
   1946     __ b(cond, &return_left, Label::kNear);
   1947     __ b(&return_right, Label::kNear);
   1948 
   1949     __ bind(&check_zero);
   1950     __ lzdr(kDoubleRegZero);
   1951     __ cdbr(left_reg, kDoubleRegZero);
   1952     __ bne(&return_left, Label::kNear);  // left == right != 0.
   1953 
   1954     // At this point, both left and right are either 0 or -0.
   1955     // N.B. The following works because +0 + -0 == +0
   1956     if (operation == HMathMinMax::kMathMin) {
   1957       // For min we want logical-or of sign bit: -(-L + -R)
   1958       __ lcdbr(left_reg, left_reg);
   1959       __ ldr(result_reg, left_reg);
   1960       if (left_reg.is(right_reg)) {
   1961         __ adbr(result_reg, right_reg);
   1962       } else {
   1963         __ sdbr(result_reg, right_reg);
   1964       }
   1965       __ lcdbr(result_reg, result_reg);
   1966     } else {
   1967       // For max we want logical-and of sign bit: (L + R)
   1968       __ ldr(result_reg, left_reg);
   1969       __ adbr(result_reg, right_reg);
   1970     }
   1971     __ b(&done, Label::kNear);
   1972 
   1973     __ bind(&check_nan_left);
   1974     __ cdbr(left_reg, left_reg);
   1975     __ bunordered(&return_left, Label::kNear);  // left == NaN.
   1976 
   1977     __ bind(&return_right);
   1978     if (!right_reg.is(result_reg)) {
   1979       __ ldr(result_reg, right_reg);
   1980     }
   1981     __ b(&done, Label::kNear);
   1982 
   1983     __ bind(&return_left);
   1984     if (!left_reg.is(result_reg)) {
   1985       __ ldr(result_reg, left_reg);
   1986     }
   1987     __ bind(&done);
   1988   }
   1989 }
   1990 
   1991 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
   1992   DoubleRegister left = ToDoubleRegister(instr->left());
   1993   DoubleRegister right = ToDoubleRegister(instr->right());
   1994   DoubleRegister result = ToDoubleRegister(instr->result());
   1995   // All operations except MOD are computed in-place.
   1996   DCHECK(instr->op() == Token::MOD || left.is(result));
   1997   switch (instr->op()) {
   1998     case Token::ADD:
   1999       __ adbr(result, right);
   2000       break;
   2001     case Token::SUB:
   2002       __ sdbr(result, right);
   2003       break;
   2004     case Token::MUL:
   2005       __ mdbr(result, right);
   2006       break;
   2007     case Token::DIV:
   2008       __ ddbr(result, right);
   2009       break;
   2010     case Token::MOD: {
   2011       __ PrepareCallCFunction(0, 2, scratch0());
   2012       __ MovToFloatParameters(left, right);
   2013       __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
   2014                        0, 2);
   2015       // Move the result in the double result register.
   2016       __ MovFromFloatResult(result);
   2017       break;
   2018     }
   2019     default:
   2020       UNREACHABLE();
   2021       break;
   2022   }
   2023 }
   2024 
   2025 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
   2026   DCHECK(ToRegister(instr->context()).is(cp));
   2027   DCHECK(ToRegister(instr->left()).is(r3));
   2028   DCHECK(ToRegister(instr->right()).is(r2));
   2029   DCHECK(ToRegister(instr->result()).is(r2));
   2030 
   2031   Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
   2032   CallCode(code, RelocInfo::CODE_TARGET, instr);
   2033 }
   2034 
   2035 template <class InstrType>
   2036 void LCodeGen::EmitBranch(InstrType instr, Condition cond) {
   2037   int left_block = instr->TrueDestination(chunk_);
   2038   int right_block = instr->FalseDestination(chunk_);
   2039 
   2040   int next_block = GetNextEmittedBlock();
   2041 
   2042   if (right_block == left_block || cond == al) {
   2043     EmitGoto(left_block);
   2044   } else if (left_block == next_block) {
   2045     __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block));
   2046   } else if (right_block == next_block) {
   2047     __ b(cond, chunk_->GetAssemblyLabel(left_block));
   2048   } else {
   2049     __ b(cond, chunk_->GetAssemblyLabel(left_block));
   2050     __ b(chunk_->GetAssemblyLabel(right_block));
   2051   }
   2052 }
   2053 
   2054 template <class InstrType>
   2055 void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond) {
   2056   int true_block = instr->TrueDestination(chunk_);
   2057   __ b(cond, chunk_->GetAssemblyLabel(true_block));
   2058 }
   2059 
   2060 template <class InstrType>
   2061 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond) {
   2062   int false_block = instr->FalseDestination(chunk_);
   2063   __ b(cond, chunk_->GetAssemblyLabel(false_block));
   2064 }
   2065 
   2066 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
   2067 
   2068 void LCodeGen::DoBranch(LBranch* instr) {
   2069   Representation r = instr->hydrogen()->value()->representation();
   2070   DoubleRegister dbl_scratch = double_scratch0();
   2071 
   2072   if (r.IsInteger32()) {
   2073     DCHECK(!info()->IsStub());
   2074     Register reg = ToRegister(instr->value());
   2075     __ Cmp32(reg, Operand::Zero());
   2076     EmitBranch(instr, ne);
   2077   } else if (r.IsSmi()) {
   2078     DCHECK(!info()->IsStub());
   2079     Register reg = ToRegister(instr->value());
   2080     __ CmpP(reg, Operand::Zero());
   2081     EmitBranch(instr, ne);
   2082   } else if (r.IsDouble()) {
   2083     DCHECK(!info()->IsStub());
   2084     DoubleRegister reg = ToDoubleRegister(instr->value());
   2085     __ lzdr(kDoubleRegZero);
   2086     __ cdbr(reg, kDoubleRegZero);
   2087     // Test the double value. Zero and NaN are false.
   2088     Condition lt_gt = static_cast<Condition>(lt | gt);
   2089 
   2090     EmitBranch(instr, lt_gt);
   2091   } else {
   2092     DCHECK(r.IsTagged());
   2093     Register reg = ToRegister(instr->value());
   2094     HType type = instr->hydrogen()->value()->type();
   2095     if (type.IsBoolean()) {
   2096       DCHECK(!info()->IsStub());
   2097       __ CompareRoot(reg, Heap::kTrueValueRootIndex);
   2098       EmitBranch(instr, eq);
   2099     } else if (type.IsSmi()) {
   2100       DCHECK(!info()->IsStub());
   2101       __ CmpP(reg, Operand::Zero());
   2102       EmitBranch(instr, ne);
   2103     } else if (type.IsJSArray()) {
   2104       DCHECK(!info()->IsStub());
   2105       EmitBranch(instr, al);
   2106     } else if (type.IsHeapNumber()) {
   2107       DCHECK(!info()->IsStub());
   2108       __ LoadDouble(dbl_scratch,
   2109                     FieldMemOperand(reg, HeapNumber::kValueOffset));
   2110       // Test the double value. Zero and NaN are false.
   2111       __ lzdr(kDoubleRegZero);
   2112       __ cdbr(dbl_scratch, kDoubleRegZero);
   2113       Condition lt_gt = static_cast<Condition>(lt | gt);
   2114       EmitBranch(instr, lt_gt);
   2115     } else if (type.IsString()) {
   2116       DCHECK(!info()->IsStub());
   2117       __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
   2118       __ CmpP(ip, Operand::Zero());
   2119       EmitBranch(instr, ne);
   2120     } else {
   2121       ToBooleanHints expected = instr->hydrogen()->expected_input_types();
   2122       // Avoid deopts in the case where we've never executed this path before.
   2123       if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
   2124 
   2125       if (expected & ToBooleanHint::kUndefined) {
   2126         // undefined -> false.
   2127         __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
   2128         __ beq(instr->FalseLabel(chunk_));
   2129       }
   2130       if (expected & ToBooleanHint::kBoolean) {
   2131         // Boolean -> its value.
   2132         __ CompareRoot(reg, Heap::kTrueValueRootIndex);
   2133         __ beq(instr->TrueLabel(chunk_));
   2134         __ CompareRoot(reg, Heap::kFalseValueRootIndex);
   2135         __ beq(instr->FalseLabel(chunk_));
   2136       }
   2137       if (expected & ToBooleanHint::kNull) {
   2138         // 'null' -> false.
   2139         __ CompareRoot(reg, Heap::kNullValueRootIndex);
   2140         __ beq(instr->FalseLabel(chunk_));
   2141       }
   2142 
   2143       if (expected & ToBooleanHint::kSmallInteger) {
   2144         // Smis: 0 -> false, all other -> true.
   2145         __ CmpP(reg, Operand::Zero());
   2146         __ beq(instr->FalseLabel(chunk_));
   2147         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
   2148       } else if (expected & ToBooleanHint::kNeedsMap) {
   2149         // If we need a map later and have a Smi -> deopt.
   2150         __ TestIfSmi(reg);
   2151         DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
   2152       }
   2153 
   2154       const Register map = scratch0();
   2155       if (expected & ToBooleanHint::kNeedsMap) {
   2156         __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
   2157 
   2158         if (expected & ToBooleanHint::kCanBeUndetectable) {
   2159           // Undetectable -> false.
   2160           __ tm(FieldMemOperand(map, Map::kBitFieldOffset),
   2161                 Operand(1 << Map::kIsUndetectable));
   2162           __ bne(instr->FalseLabel(chunk_));
   2163         }
   2164       }
   2165 
   2166       if (expected & ToBooleanHint::kReceiver) {
   2167         // spec object -> true.
   2168         __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
   2169         __ bge(instr->TrueLabel(chunk_));
   2170       }
   2171 
   2172       if (expected & ToBooleanHint::kString) {
   2173         // String value -> false iff empty.
   2174         Label not_string;
   2175         __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
   2176         __ bge(&not_string, Label::kNear);
   2177         __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
   2178         __ CmpP(ip, Operand::Zero());
   2179         __ bne(instr->TrueLabel(chunk_));
   2180         __ b(instr->FalseLabel(chunk_));
   2181         __ bind(&not_string);
   2182       }
   2183 
   2184       if (expected & ToBooleanHint::kSymbol) {
   2185         // Symbol value -> true.
   2186         __ CompareInstanceType(map, ip, SYMBOL_TYPE);
   2187         __ beq(instr->TrueLabel(chunk_));
   2188       }
   2189 
   2190       if (expected & ToBooleanHint::kSimdValue) {
   2191         // SIMD value -> true.
   2192         Label not_simd;
   2193         __ CompareInstanceType(map, ip, SIMD128_VALUE_TYPE);
   2194         __ beq(instr->TrueLabel(chunk_));
   2195       }
   2196 
   2197       if (expected & ToBooleanHint::kHeapNumber) {
   2198         // heap number -> false iff +0, -0, or NaN.
   2199         Label not_heap_number;
   2200         __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
   2201         __ bne(&not_heap_number, Label::kNear);
   2202         __ LoadDouble(dbl_scratch,
   2203                       FieldMemOperand(reg, HeapNumber::kValueOffset));
   2204         __ lzdr(kDoubleRegZero);
   2205         __ cdbr(dbl_scratch, kDoubleRegZero);
   2206         __ bunordered(instr->FalseLabel(chunk_));  // NaN -> false.
   2207         __ beq(instr->FalseLabel(chunk_));         // +0, -0 -> false.
   2208         __ b(instr->TrueLabel(chunk_));
   2209         __ bind(&not_heap_number);
   2210       }
   2211 
   2212       if (expected != ToBooleanHint::kAny) {
   2213         // We've seen something for the first time -> deopt.
   2214         // This can only happen if we are not generic already.
   2215         DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
   2216       }
   2217     }
   2218   }
   2219 }
   2220 
   2221 void LCodeGen::EmitGoto(int block) {
   2222   if (!IsNextEmittedBlock(block)) {
   2223     __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
   2224   }
   2225 }
   2226 
   2227 void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
   2228 
   2229 Condition LCodeGen::TokenToCondition(Token::Value op) {
   2230   Condition cond = kNoCondition;
   2231   switch (op) {
   2232     case Token::EQ:
   2233     case Token::EQ_STRICT:
   2234       cond = eq;
   2235       break;
   2236     case Token::NE:
   2237     case Token::NE_STRICT:
   2238       cond = ne;
   2239       break;
   2240     case Token::LT:
   2241       cond = lt;
   2242       break;
   2243     case Token::GT:
   2244       cond = gt;
   2245       break;
   2246     case Token::LTE:
   2247       cond = le;
   2248       break;
   2249     case Token::GTE:
   2250       cond = ge;
   2251       break;
   2252     case Token::IN:
   2253     case Token::INSTANCEOF:
   2254     default:
   2255       UNREACHABLE();
   2256   }
   2257   return cond;
   2258 }
   2259 
   2260 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
   2261   LOperand* left = instr->left();
   2262   LOperand* right = instr->right();
   2263   bool is_unsigned =
   2264       instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
   2265       instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
   2266   Condition cond = TokenToCondition(instr->op());
   2267 
   2268   if (left->IsConstantOperand() && right->IsConstantOperand()) {
   2269     // We can statically evaluate the comparison.
   2270     double left_val = ToDouble(LConstantOperand::cast(left));
   2271     double right_val = ToDouble(LConstantOperand::cast(right));
   2272     int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
   2273                          ? instr->TrueDestination(chunk_)
   2274                          : instr->FalseDestination(chunk_);
   2275     EmitGoto(next_block);
   2276   } else {
   2277     if (instr->is_double()) {
   2278       // Compare left and right operands as doubles and load the
   2279       // resulting flags into the normal status register.
   2280       __ cdbr(ToDoubleRegister(left), ToDoubleRegister(right));
   2281       // If a NaN is involved, i.e. the result is unordered,
   2282       // jump to false block label.
   2283       __ bunordered(instr->FalseLabel(chunk_));
   2284     } else {
   2285       if (right->IsConstantOperand()) {
   2286         int32_t value = ToInteger32(LConstantOperand::cast(right));
   2287         if (instr->hydrogen_value()->representation().IsSmi()) {
   2288           if (is_unsigned) {
   2289             __ CmpLogicalSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
   2290           } else {
   2291             __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
   2292           }
   2293         } else {
   2294           if (is_unsigned) {
   2295             __ CmpLogical32(ToRegister(left), ToOperand(right));
   2296           } else {
   2297             __ Cmp32(ToRegister(left), ToOperand(right));
   2298           }
   2299         }
   2300       } else if (left->IsConstantOperand()) {
   2301         int32_t value = ToInteger32(LConstantOperand::cast(left));
   2302         if (instr->hydrogen_value()->representation().IsSmi()) {
   2303           if (is_unsigned) {
   2304             __ CmpLogicalSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
   2305           } else {
   2306             __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
   2307           }
   2308         } else {
   2309           if (is_unsigned) {
   2310             __ CmpLogical32(ToRegister(right), ToOperand(left));
   2311           } else {
   2312             __ Cmp32(ToRegister(right), ToOperand(left));
   2313           }
   2314         }
   2315         // We commuted the operands, so commute the condition.
   2316         cond = CommuteCondition(cond);
   2317       } else if (instr->hydrogen_value()->representation().IsSmi()) {
   2318         if (is_unsigned) {
   2319           __ CmpLogicalP(ToRegister(left), ToRegister(right));
   2320         } else {
   2321           __ CmpP(ToRegister(left), ToRegister(right));
   2322         }
   2323       } else {
   2324         if (is_unsigned) {
   2325           __ CmpLogical32(ToRegister(left), ToRegister(right));
   2326         } else {
   2327           __ Cmp32(ToRegister(left), ToRegister(right));
   2328         }
   2329       }
   2330     }
   2331     EmitBranch(instr, cond);
   2332   }
   2333 }
   2334 
   2335 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   2336   Register left = ToRegister(instr->left());
   2337   Register right = ToRegister(instr->right());
   2338 
   2339   __ CmpP(left, right);
   2340   EmitBranch(instr, eq);
   2341 }
   2342 
   2343 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
   2344   if (instr->hydrogen()->representation().IsTagged()) {
   2345     Register input_reg = ToRegister(instr->object());
   2346     __ CmpP(input_reg, Operand(factory()->the_hole_value()));
   2347     EmitBranch(instr, eq);
   2348     return;
   2349   }
   2350 
   2351   DoubleRegister input_reg = ToDoubleRegister(instr->object());
   2352   __ cdbr(input_reg, input_reg);
   2353   EmitFalseBranch(instr, ordered);
   2354 
   2355   Register scratch = scratch0();
   2356   // Convert to GPR and examine the upper 32 bits
   2357   __ lgdr(scratch, input_reg);
   2358   __ srlg(scratch, scratch, Operand(32));
   2359   __ Cmp32(scratch, Operand(kHoleNanUpper32));
   2360   EmitBranch(instr, eq);
   2361 }
   2362 
   2363 Condition LCodeGen::EmitIsString(Register input, Register temp1,
   2364                                  Label* is_not_string,
   2365                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
   2366   if (check_needed == INLINE_SMI_CHECK) {
   2367     __ JumpIfSmi(input, is_not_string);
   2368   }
   2369   __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
   2370 
   2371   return lt;
   2372 }
   2373 
   2374 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
   2375   Register reg = ToRegister(instr->value());
   2376   Register temp1 = ToRegister(instr->temp());
   2377 
   2378   SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
   2379                               ? OMIT_SMI_CHECK
   2380                               : INLINE_SMI_CHECK;
   2381   Condition true_cond =
   2382       EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
   2383 
   2384   EmitBranch(instr, true_cond);
   2385 }
   2386 
   2387 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   2388   Register input_reg = EmitLoadRegister(instr->value(), ip);
   2389   __ TestIfSmi(input_reg);
   2390   EmitBranch(instr, eq);
   2391 }
   2392 
   2393 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
   2394   Register input = ToRegister(instr->value());
   2395   Register temp = ToRegister(instr->temp());
   2396 
   2397   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2398     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2399   }
   2400   __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
   2401   __ tm(FieldMemOperand(temp, Map::kBitFieldOffset),
   2402         Operand(1 << Map::kIsUndetectable));
   2403   EmitBranch(instr, ne);
   2404 }
   2405 
   2406 static Condition ComputeCompareCondition(Token::Value op) {
   2407   switch (op) {
   2408     case Token::EQ_STRICT:
   2409     case Token::EQ:
   2410       return eq;
   2411     case Token::LT:
   2412       return lt;
   2413     case Token::GT:
   2414       return gt;
   2415     case Token::LTE:
   2416       return le;
   2417     case Token::GTE:
   2418       return ge;
   2419     default:
   2420       UNREACHABLE();
   2421       return kNoCondition;
   2422   }
   2423 }
   2424 
   2425 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   2426   DCHECK(ToRegister(instr->context()).is(cp));
   2427   DCHECK(ToRegister(instr->left()).is(r3));
   2428   DCHECK(ToRegister(instr->right()).is(r2));
   2429 
   2430   Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
   2431   CallCode(code, RelocInfo::CODE_TARGET, instr);
   2432   __ CompareRoot(r2, Heap::kTrueValueRootIndex);
   2433   EmitBranch(instr, eq);
   2434 }
   2435 
   2436 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   2437   InstanceType from = instr->from();
   2438   InstanceType to = instr->to();
   2439   if (from == FIRST_TYPE) return to;
   2440   DCHECK(from == to || to == LAST_TYPE);
   2441   return from;
   2442 }
   2443 
   2444 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
   2445   InstanceType from = instr->from();
   2446   InstanceType to = instr->to();
   2447   if (from == to) return eq;
   2448   if (to == LAST_TYPE) return ge;
   2449   if (from == FIRST_TYPE) return le;
   2450   UNREACHABLE();
   2451   return eq;
   2452 }
   2453 
   2454 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   2455   Register scratch = scratch0();
   2456   Register input = ToRegister(instr->value());
   2457 
   2458   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2459     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2460   }
   2461 
   2462   __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
   2463   EmitBranch(instr, BranchCondition(instr->hydrogen()));
   2464 }
   2465 
   2466 // Branches to a label or falls through with the answer in flags.  Trashes
   2467 // the temp registers, but not the input.
   2468 void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
   2469                                Handle<String> class_name, Register input,
   2470                                Register temp, Register temp2) {
   2471   DCHECK(!input.is(temp));
   2472   DCHECK(!input.is(temp2));
   2473   DCHECK(!temp.is(temp2));
   2474 
   2475   __ JumpIfSmi(input, is_false);
   2476 
   2477   __ CompareObjectType(input, temp, temp2, FIRST_FUNCTION_TYPE);
   2478   STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
   2479   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
   2480     __ bge(is_true);
   2481   } else {
   2482     __ bge(is_false);
   2483   }
   2484 
   2485   // Check if the constructor in the map is a function.
   2486   Register instance_type = ip;
   2487   __ GetMapConstructor(temp, temp, temp2, instance_type);
   2488 
   2489   // Objects with a non-function constructor have class 'Object'.
   2490   __ CmpP(instance_type, Operand(JS_FUNCTION_TYPE));
   2491   if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
   2492     __ bne(is_true);
   2493   } else {
   2494     __ bne(is_false);
   2495   }
   2496 
   2497   // temp now contains the constructor function. Grab the
   2498   // instance class name from there.
   2499   __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
   2500   __ LoadP(temp,
   2501            FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
   2502   // The class name we are testing against is internalized since it's a literal.
   2503   // The name in the constructor is internalized because of the way the context
   2504   // is booted.  This routine isn't expected to work for random API-created
   2505   // classes and it doesn't have to because you can't access it with natives
   2506   // syntax.  Since both sides are internalized it is sufficient to use an
   2507   // identity comparison.
   2508   __ CmpP(temp, Operand(class_name));
   2509   // End with the answer in flags.
   2510 }
   2511 
   2512 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   2513   Register input = ToRegister(instr->value());
   2514   Register temp = scratch0();
   2515   Register temp2 = ToRegister(instr->temp());
   2516   Handle<String> class_name = instr->hydrogen()->class_name();
   2517 
   2518   EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
   2519                   class_name, input, temp, temp2);
   2520 
   2521   EmitBranch(instr, eq);
   2522 }
   2523 
   2524 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   2525   Register reg = ToRegister(instr->value());
   2526   Register temp = ToRegister(instr->temp());
   2527 
   2528   __ mov(temp, Operand(instr->map()));
   2529   __ CmpP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
   2530   EmitBranch(instr, eq);
   2531 }
   2532 
   2533 void LCodeGen::DoHasInPrototypeChainAndBranch(
   2534     LHasInPrototypeChainAndBranch* instr) {
   2535   Register const object = ToRegister(instr->object());
   2536   Register const object_map = scratch0();
   2537   Register const object_instance_type = ip;
   2538   Register const object_prototype = object_map;
   2539   Register const prototype = ToRegister(instr->prototype());
   2540 
   2541   // The {object} must be a spec object.  It's sufficient to know that {object}
   2542   // is not a smi, since all other non-spec objects have {null} prototypes and
   2543   // will be ruled out below.
   2544   if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
   2545     __ TestIfSmi(object);
   2546     EmitFalseBranch(instr, eq);
   2547   }
   2548   // Loop through the {object}s prototype chain looking for the {prototype}.
   2549   __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
   2550   Label loop;
   2551   __ bind(&loop);
   2552 
   2553   // Deoptimize if the object needs to be access checked.
   2554   __ LoadlB(object_instance_type,
   2555             FieldMemOperand(object_map, Map::kBitFieldOffset));
   2556   __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
   2557   DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, cr0);
   2558   // Deoptimize for proxies.
   2559   __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
   2560   DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
   2561   __ LoadP(object_prototype,
   2562            FieldMemOperand(object_map, Map::kPrototypeOffset));
   2563   __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
   2564   EmitFalseBranch(instr, eq);
   2565   __ CmpP(object_prototype, prototype);
   2566   EmitTrueBranch(instr, eq);
   2567   __ LoadP(object_map,
   2568            FieldMemOperand(object_prototype, HeapObject::kMapOffset));
   2569   __ b(&loop);
   2570 }
   2571 
   2572 void LCodeGen::DoCmpT(LCmpT* instr) {
   2573   DCHECK(ToRegister(instr->context()).is(cp));
   2574   Token::Value op = instr->op();
   2575 
   2576   Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   2577   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2578   // This instruction also signals no smi code inlined
   2579   __ CmpP(r2, Operand::Zero());
   2580 
   2581   Condition condition = ComputeCompareCondition(op);
   2582   Label true_value, done;
   2583 
   2584   __ b(condition, &true_value, Label::kNear);
   2585 
   2586   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
   2587   __ b(&done, Label::kNear);
   2588 
   2589   __ bind(&true_value);
   2590   __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
   2591 
   2592   __ bind(&done);
   2593 }
   2594 
   2595 void LCodeGen::DoReturn(LReturn* instr) {
   2596   if (FLAG_trace && info()->IsOptimizing()) {
   2597     // Push the return value on the stack as the parameter.
   2598     // Runtime::TraceExit returns its parameter in r2.  We're leaving the code
   2599     // managed by the register allocator and tearing down the frame, it's
   2600     // safe to write to the context register.
   2601     __ push(r2);
   2602     __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   2603     __ CallRuntime(Runtime::kTraceExit);
   2604   }
   2605   if (info()->saves_caller_doubles()) {
   2606     RestoreCallerDoubles();
   2607   }
   2608   if (instr->has_constant_parameter_count()) {
   2609     int parameter_count = ToInteger32(instr->constant_parameter_count());
   2610     int32_t sp_delta = (parameter_count + 1) * kPointerSize;
   2611     if (NeedsEagerFrame()) {
   2612       masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
   2613     } else if (sp_delta != 0) {
   2614       // TODO(joransiu): Clean this up into Macro Assembler
   2615       if (sp_delta >= 0 && sp_delta < 4096)
   2616         __ la(sp, MemOperand(sp, sp_delta));
   2617       else
   2618         __ lay(sp, MemOperand(sp, sp_delta));
   2619     }
   2620   } else {
   2621     DCHECK(info()->IsStub());  // Functions would need to drop one more value.
   2622     Register reg = ToRegister(instr->parameter_count());
   2623     // The argument count parameter is a smi
   2624     if (NeedsEagerFrame()) {
   2625       masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
   2626     }
   2627     __ SmiToPtrArrayOffset(r0, reg);
   2628     __ AddP(sp, sp, r0);
   2629   }
   2630 
   2631   __ Ret();
   2632 }
   2633 
   2634 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   2635   Register context = ToRegister(instr->context());
   2636   Register result = ToRegister(instr->result());
   2637   __ LoadP(result, ContextMemOperand(context, instr->slot_index()));
   2638   if (instr->hydrogen()->RequiresHoleCheck()) {
   2639     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
   2640     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2641       DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
   2642     } else {
   2643       Label skip;
   2644       __ bne(&skip, Label::kNear);
   2645       __ mov(result, Operand(factory()->undefined_value()));
   2646       __ bind(&skip);
   2647     }
   2648   }
   2649 }
   2650 
   2651 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   2652   Register context = ToRegister(instr->context());
   2653   Register value = ToRegister(instr->value());
   2654   Register scratch = scratch0();
   2655   MemOperand target = ContextMemOperand(context, instr->slot_index());
   2656 
   2657   Label skip_assignment;
   2658 
   2659   if (instr->hydrogen()->RequiresHoleCheck()) {
   2660     __ LoadP(scratch, target);
   2661     __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
   2662     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2663       DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
   2664     } else {
   2665       __ bne(&skip_assignment);
   2666     }
   2667   }
   2668 
   2669   __ StoreP(value, target);
   2670   if (instr->hydrogen()->NeedsWriteBarrier()) {
   2671     SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
   2672                                 ? OMIT_SMI_CHECK
   2673                                 : INLINE_SMI_CHECK;
   2674     __ RecordWriteContextSlot(context, target.offset(), value, scratch,
   2675                               GetLinkRegisterState(), kSaveFPRegs,
   2676                               EMIT_REMEMBERED_SET, check_needed);
   2677   }
   2678 
   2679   __ bind(&skip_assignment);
   2680 }
   2681 
   2682 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   2683   HObjectAccess access = instr->hydrogen()->access();
   2684   int offset = access.offset();
   2685   Register object = ToRegister(instr->object());
   2686 
   2687   if (access.IsExternalMemory()) {
   2688     Register result = ToRegister(instr->result());
   2689     MemOperand operand = MemOperand(object, offset);
   2690     __ LoadRepresentation(result, operand, access.representation(), r0);
   2691     return;
   2692   }
   2693 
   2694   if (instr->hydrogen()->representation().IsDouble()) {
   2695     DCHECK(access.IsInobject());
   2696     DoubleRegister result = ToDoubleRegister(instr->result());
   2697     __ LoadDouble(result, FieldMemOperand(object, offset));
   2698     return;
   2699   }
   2700 
   2701   Register result = ToRegister(instr->result());
   2702   if (!access.IsInobject()) {
   2703     __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   2704     object = result;
   2705   }
   2706 
   2707   Representation representation = access.representation();
   2708 
   2709 #if V8_TARGET_ARCH_S390X
   2710   // 64-bit Smi optimization
   2711   if (representation.IsSmi() &&
   2712       instr->hydrogen()->representation().IsInteger32()) {
   2713     // Read int value directly from upper half of the smi.
   2714     offset = SmiWordOffset(offset);
   2715     representation = Representation::Integer32();
   2716   }
   2717 #endif
   2718 
   2719   __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
   2720                         r0);
   2721 }
   2722 
   2723 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   2724   Register scratch = scratch0();
   2725   Register function = ToRegister(instr->function());
   2726   Register result = ToRegister(instr->result());
   2727 
   2728   // Get the prototype or initial map from the function.
   2729   __ LoadP(result,
   2730            FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   2731 
   2732   // Check that the function has a prototype or an initial map.
   2733   __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
   2734   DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
   2735 
   2736   // If the function does not have an initial map, we're done.
   2737   Label done;
   2738   __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
   2739   __ bne(&done, Label::kNear);
   2740 
   2741   // Get the prototype from the initial map.
   2742   __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
   2743 
   2744   // All done.
   2745   __ bind(&done);
   2746 }
   2747 
   2748 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
   2749   Register result = ToRegister(instr->result());
   2750   __ LoadRoot(result, instr->index());
   2751 }
   2752 
   2753 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
   2754   Register arguments = ToRegister(instr->arguments());
   2755   Register result = ToRegister(instr->result());
   2756   // There are two words between the frame pointer and the last argument.
   2757   // Subtracting from length accounts for one of them add one more.
   2758   if (instr->length()->IsConstantOperand()) {
   2759     int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
   2760     if (instr->index()->IsConstantOperand()) {
   2761       int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   2762       int index = (const_length - const_index) + 1;
   2763       __ LoadP(result, MemOperand(arguments, index * kPointerSize));
   2764     } else {
   2765       Register index = ToRegister(instr->index());
   2766       __ SubP(result, index, Operand(const_length + 1));
   2767       __ LoadComplementRR(result, result);
   2768       __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
   2769       __ LoadP(result, MemOperand(arguments, result));
   2770     }
   2771   } else if (instr->index()->IsConstantOperand()) {
   2772     Register length = ToRegister(instr->length());
   2773     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   2774     int loc = const_index - 1;
   2775     if (loc != 0) {
   2776       __ SubP(result, length, Operand(loc));
   2777       __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
   2778       __ LoadP(result, MemOperand(arguments, result));
   2779     } else {
   2780       __ ShiftLeftP(result, length, Operand(kPointerSizeLog2));
   2781       __ LoadP(result, MemOperand(arguments, result));
   2782     }
   2783   } else {
   2784     Register length = ToRegister(instr->length());
   2785     Register index = ToRegister(instr->index());
   2786     __ SubP(result, length, index);
   2787     __ AddP(result, result, Operand(1));
   2788     __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
   2789     __ LoadP(result, MemOperand(arguments, result));
   2790   }
   2791 }
   2792 
   2793 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
   2794   Register external_pointer = ToRegister(instr->elements());
   2795   Register key = no_reg;
   2796   ElementsKind elements_kind = instr->elements_kind();
   2797   bool key_is_constant = instr->key()->IsConstantOperand();
   2798   int constant_key = 0;
   2799   if (key_is_constant) {
   2800     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   2801     if (constant_key & 0xF0000000) {
   2802       Abort(kArrayIndexConstantValueTooBig);
   2803     }
   2804   } else {
   2805     key = ToRegister(instr->key());
   2806   }
   2807   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   2808   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
   2809   bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
   2810   int base_offset = instr->base_offset();
   2811   bool use_scratch = false;
   2812 
   2813   if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
   2814     DoubleRegister result = ToDoubleRegister(instr->result());
   2815     if (key_is_constant) {
   2816       base_offset += constant_key << element_size_shift;
   2817       if (!is_int20(base_offset)) {
   2818         __ mov(scratch0(), Operand(base_offset));
   2819         base_offset = 0;
   2820         use_scratch = true;
   2821       }
   2822     } else {
   2823       __ IndexToArrayOffset(scratch0(), key, element_size_shift, key_is_smi,
   2824                             keyMaybeNegative);
   2825       use_scratch = true;
   2826     }
   2827     if (elements_kind == FLOAT32_ELEMENTS) {
   2828       if (!use_scratch) {
   2829         __ ldeb(result, MemOperand(external_pointer, base_offset));
   2830       } else {
   2831         __ ldeb(result, MemOperand(scratch0(), external_pointer, base_offset));
   2832       }
   2833     } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
   2834       if (!use_scratch) {
   2835         __ LoadDouble(result, MemOperand(external_pointer, base_offset));
   2836       } else {
   2837         __ LoadDouble(result,
   2838                       MemOperand(scratch0(), external_pointer, base_offset));
   2839       }
   2840     }
   2841   } else {
   2842     Register result = ToRegister(instr->result());
   2843     MemOperand mem_operand =
   2844         PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
   2845                             constant_key, element_size_shift, base_offset,
   2846                             keyMaybeNegative);
   2847     switch (elements_kind) {
   2848       case INT8_ELEMENTS:
   2849         __ LoadB(result, mem_operand);
   2850         break;
   2851       case UINT8_ELEMENTS:
   2852       case UINT8_CLAMPED_ELEMENTS:
   2853         __ LoadlB(result, mem_operand);
   2854         break;
   2855       case INT16_ELEMENTS:
   2856         __ LoadHalfWordP(result, mem_operand);
   2857         break;
   2858       case UINT16_ELEMENTS:
   2859         __ LoadLogicalHalfWordP(result, mem_operand);
   2860         break;
   2861       case INT32_ELEMENTS:
   2862         __ LoadW(result, mem_operand, r0);
   2863         break;
   2864       case UINT32_ELEMENTS:
   2865         __ LoadlW(result, mem_operand, r0);
   2866         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
   2867           __ CmpLogical32(result, Operand(0x80000000));
   2868           DeoptimizeIf(ge, instr, DeoptimizeReason::kNegativeValue);
   2869         }
   2870         break;
   2871       case FLOAT32_ELEMENTS:
   2872       case FLOAT64_ELEMENTS:
   2873       case FAST_HOLEY_DOUBLE_ELEMENTS:
   2874       case FAST_HOLEY_ELEMENTS:
   2875       case FAST_HOLEY_SMI_ELEMENTS:
   2876       case FAST_DOUBLE_ELEMENTS:
   2877       case FAST_ELEMENTS:
   2878       case FAST_SMI_ELEMENTS:
   2879       case DICTIONARY_ELEMENTS:
   2880       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
   2881       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
   2882       case FAST_STRING_WRAPPER_ELEMENTS:
   2883       case SLOW_STRING_WRAPPER_ELEMENTS:
   2884       case NO_ELEMENTS:
   2885         UNREACHABLE();
   2886         break;
   2887     }
   2888   }
   2889 }
   2890 
   2891 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
   2892   Register elements = ToRegister(instr->elements());
   2893   bool key_is_constant = instr->key()->IsConstantOperand();
   2894   Register key = no_reg;
   2895   DoubleRegister result = ToDoubleRegister(instr->result());
   2896   Register scratch = scratch0();
   2897 
   2898   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   2899   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
   2900   bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
   2901   int constant_key = 0;
   2902   if (key_is_constant) {
   2903     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   2904     if (constant_key & 0xF0000000) {
   2905       Abort(kArrayIndexConstantValueTooBig);
   2906     }
   2907   } else {
   2908     key = ToRegister(instr->key());
   2909   }
   2910 
   2911   bool use_scratch = false;
   2912   intptr_t base_offset = instr->base_offset() + constant_key * kDoubleSize;
   2913   if (!key_is_constant) {
   2914     use_scratch = true;
   2915     __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi,
   2916                           keyMaybeNegative);
   2917   }
   2918 
   2919   // Memory references support up to 20-bits signed displacement in RXY form
   2920   // Include Register::kExponentOffset in check, so we are guaranteed not to
   2921   // overflow displacement later.
   2922   if (!is_int20(base_offset + Register::kExponentOffset)) {
   2923     use_scratch = true;
   2924     if (key_is_constant) {
   2925       __ mov(scratch, Operand(base_offset));
   2926     } else {
   2927       __ AddP(scratch, Operand(base_offset));
   2928     }
   2929     base_offset = 0;
   2930   }
   2931 
   2932   if (!use_scratch) {
   2933     __ LoadDouble(result, MemOperand(elements, base_offset));
   2934   } else {
   2935     __ LoadDouble(result, MemOperand(scratch, elements, base_offset));
   2936   }
   2937 
   2938   if (instr->hydrogen()->RequiresHoleCheck()) {
   2939     if (!use_scratch) {
   2940       __ LoadlW(r0,
   2941                 MemOperand(elements, base_offset + Register::kExponentOffset));
   2942     } else {
   2943       __ LoadlW(r0, MemOperand(scratch, elements,
   2944                                base_offset + Register::kExponentOffset));
   2945     }
   2946     __ Cmp32(r0, Operand(kHoleNanUpper32));
   2947     DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
   2948   }
   2949 }
   2950 
   2951 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
   2952   HLoadKeyed* hinstr = instr->hydrogen();
   2953   Register elements = ToRegister(instr->elements());
   2954   Register result = ToRegister(instr->result());
   2955   Register scratch = scratch0();
   2956   int offset = instr->base_offset();
   2957 
   2958   if (instr->key()->IsConstantOperand()) {
   2959     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   2960     offset += ToInteger32(const_operand) * kPointerSize;
   2961   } else {
   2962     Register key = ToRegister(instr->key());
   2963     // Even though the HLoadKeyed instruction forces the input
   2964     // representation for the key to be an integer, the input gets replaced
   2965     // during bound check elimination with the index argument to the bounds
   2966     // check, which can be tagged, so that case must be handled here, too.
   2967     if (hinstr->key()->representation().IsSmi()) {
   2968       __ SmiToPtrArrayOffset(scratch, key);
   2969     } else {
   2970       __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2));
   2971     }
   2972   }
   2973 
   2974   bool requires_hole_check = hinstr->RequiresHoleCheck();
   2975   Representation representation = hinstr->representation();
   2976 
   2977 #if V8_TARGET_ARCH_S390X
   2978   // 64-bit Smi optimization
   2979   if (representation.IsInteger32() &&
   2980       hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
   2981     DCHECK(!requires_hole_check);
   2982     // Read int value directly from upper half of the smi.
   2983     offset = SmiWordOffset(offset);
   2984   }
   2985 #endif
   2986 
   2987   if (instr->key()->IsConstantOperand()) {
   2988     __ LoadRepresentation(result, MemOperand(elements, offset), representation,
   2989                           r1);
   2990   } else {
   2991     __ LoadRepresentation(result, MemOperand(scratch, elements, offset),
   2992                           representation, r1);
   2993   }
   2994 
   2995   // Check for the hole value.
   2996   if (requires_hole_check) {
   2997     if (IsFastSmiElementsKind(hinstr->elements_kind())) {
   2998       __ TestIfSmi(result);
   2999       DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
   3000     } else {
   3001       __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
   3002       DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
   3003     }
   3004   } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
   3005     DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
   3006     Label done;
   3007     __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
   3008     __ CmpP(result, scratch);
   3009     __ bne(&done);
   3010     if (info()->IsStub()) {
   3011       // A stub can safely convert the hole to undefined only if the array
   3012       // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
   3013       // it needs to bail out.
   3014       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
   3015       __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
   3016       __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kProtectorValid), r0);
   3017       DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
   3018     }
   3019     __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
   3020     __ bind(&done);
   3021   }
   3022 }
   3023 
   3024 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
   3025   if (instr->is_fixed_typed_array()) {
   3026     DoLoadKeyedExternalArray(instr);
   3027   } else if (instr->hydrogen()->representation().IsDouble()) {
   3028     DoLoadKeyedFixedDoubleArray(instr);
   3029   } else {
   3030     DoLoadKeyedFixedArray(instr);
   3031   }
   3032 }
   3033 
   3034 MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
   3035                                          bool key_is_constant, bool key_is_smi,
   3036                                          int constant_key,
   3037                                          int element_size_shift,
   3038                                          int base_offset,
   3039                                          bool keyMaybeNegative) {
   3040   Register scratch = scratch0();
   3041 
   3042   if (key_is_constant) {
   3043     int offset = (base_offset + (constant_key << element_size_shift));
   3044     if (!is_int20(offset)) {
   3045       __ mov(scratch, Operand(offset));
   3046       return MemOperand(base, scratch);
   3047     } else {
   3048       return MemOperand(base,
   3049                         (constant_key << element_size_shift) + base_offset);
   3050     }
   3051   }
   3052 
   3053   bool needs_shift =
   3054       (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
   3055 
   3056   if (needs_shift) {
   3057     __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi,
   3058                           keyMaybeNegative);
   3059   } else {
   3060     scratch = key;
   3061   }
   3062 
   3063   if (!is_int20(base_offset)) {
   3064     __ AddP(scratch, Operand(base_offset));
   3065     base_offset = 0;
   3066   }
   3067   return MemOperand(scratch, base, base_offset);
   3068 }
   3069 
   3070 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   3071   Register scratch = scratch0();
   3072   Register result = ToRegister(instr->result());
   3073 
   3074   if (instr->hydrogen()->from_inlined()) {
   3075     __ lay(result, MemOperand(sp, -2 * kPointerSize));
   3076   } else if (instr->hydrogen()->arguments_adaptor()) {
   3077     // Check if the calling frame is an arguments adaptor frame.
   3078     Label done, adapted;
   3079     __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3080     __ LoadP(
   3081         result,
   3082         MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
   3083     __ LoadSmiLiteral(r0, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
   3084     __ CmpP(result, r0);
   3085 
   3086     // Result is the frame pointer for the frame if not adapted and for the real
   3087     // frame below the adaptor frame if adapted.
   3088     __ beq(&adapted, Label::kNear);
   3089     __ LoadRR(result, fp);
   3090     __ b(&done, Label::kNear);
   3091 
   3092     __ bind(&adapted);
   3093     __ LoadRR(result, scratch);
   3094     __ bind(&done);
   3095   } else {
   3096     __ LoadRR(result, fp);
   3097   }
   3098 }
   3099 
   3100 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
   3101   Register elem = ToRegister(instr->elements());
   3102   Register result = ToRegister(instr->result());
   3103 
   3104   Label done;
   3105 
   3106   // If no arguments adaptor frame the number of arguments is fixed.
   3107   __ CmpP(fp, elem);
   3108   __ mov(result, Operand(scope()->num_parameters()));
   3109   __ beq(&done, Label::kNear);
   3110 
   3111   // Arguments adaptor frame present. Get argument length from there.
   3112   __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3113   __ LoadP(result,
   3114            MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
   3115   __ SmiUntag(result);
   3116 
   3117   // Argument length is in result register.
   3118   __ bind(&done);
   3119 }
   3120 
   3121 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
   3122   Register receiver = ToRegister(instr->receiver());
   3123   Register function = ToRegister(instr->function());
   3124   Register result = ToRegister(instr->result());
   3125   Register scratch = scratch0();
   3126 
   3127   // If the receiver is null or undefined, we have to pass the global
   3128   // object as a receiver to normal functions. Values have to be
   3129   // passed unchanged to builtins and strict-mode functions.
   3130   Label global_object, result_in_receiver;
   3131 
   3132   if (!instr->hydrogen()->known_function()) {
   3133     // Do not transform the receiver to object for strict mode
   3134     // functions or builtins.
   3135     __ LoadP(scratch,
   3136              FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
   3137     __ LoadlW(scratch, FieldMemOperand(
   3138                            scratch, SharedFunctionInfo::kCompilerHintsOffset));
   3139     __ AndP(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
   3140                                  (1 << SharedFunctionInfo::kNativeBit)));
   3141     __ bne(&result_in_receiver, Label::kNear);
   3142   }
   3143 
   3144   // Normal function. Replace undefined or null with global receiver.
   3145   __ CompareRoot(receiver, Heap::kNullValueRootIndex);
   3146   __ beq(&global_object, Label::kNear);
   3147   __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
   3148   __ beq(&global_object, Label::kNear);
   3149 
   3150   // Deoptimize if the receiver is not a JS object.
   3151   __ TestIfSmi(receiver);
   3152   DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
   3153   __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
   3154   DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject);
   3155 
   3156   __ b(&result_in_receiver, Label::kNear);
   3157   __ bind(&global_object);
   3158   __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
   3159   __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
   3160   __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
   3161 
   3162   if (result.is(receiver)) {
   3163     __ bind(&result_in_receiver);
   3164   } else {
   3165     Label result_ok;
   3166     __ b(&result_ok, Label::kNear);
   3167     __ bind(&result_in_receiver);
   3168     __ LoadRR(result, receiver);
   3169     __ bind(&result_ok);
   3170   }
   3171 }
   3172 
   3173 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   3174   Register receiver = ToRegister(instr->receiver());
   3175   Register function = ToRegister(instr->function());
   3176   Register length = ToRegister(instr->length());
   3177   Register elements = ToRegister(instr->elements());
   3178   Register scratch = scratch0();
   3179   DCHECK(receiver.is(r2));  // Used for parameter count.
   3180   DCHECK(function.is(r3));  // Required by InvokeFunction.
   3181   DCHECK(ToRegister(instr->result()).is(r2));
   3182 
   3183   // Copy the arguments to this function possibly from the
   3184   // adaptor frame below it.
   3185   const uint32_t kArgumentsLimit = 1 * KB;
   3186   __ CmpLogicalP(length, Operand(kArgumentsLimit));
   3187   DeoptimizeIf(gt, instr, DeoptimizeReason::kTooManyArguments);
   3188 
   3189   // Push the receiver and use the register to keep the original
   3190   // number of arguments.
   3191   __ push(receiver);
   3192   __ LoadRR(receiver, length);
   3193   // The arguments are at a one pointer size offset from elements.
   3194   __ AddP(elements, Operand(1 * kPointerSize));
   3195 
   3196   // Loop through the arguments pushing them onto the execution
   3197   // stack.
   3198   Label invoke, loop;
   3199   // length is a small non-negative integer, due to the test above.
   3200   __ CmpP(length, Operand::Zero());
   3201   __ beq(&invoke, Label::kNear);
   3202   __ bind(&loop);
   3203   __ ShiftLeftP(r1, length, Operand(kPointerSizeLog2));
   3204   __ LoadP(scratch, MemOperand(elements, r1));
   3205   __ push(scratch);
   3206   __ BranchOnCount(length, &loop);
   3207 
   3208   __ bind(&invoke);
   3209 
   3210   InvokeFlag flag = CALL_FUNCTION;
   3211   if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
   3212     DCHECK(!info()->saves_caller_doubles());
   3213     // TODO(ishell): drop current frame before pushing arguments to the stack.
   3214     flag = JUMP_FUNCTION;
   3215     ParameterCount actual(r2);
   3216     // It is safe to use r5, r6 and r7 as scratch registers here given that
   3217     // 1) we are not going to return to caller function anyway,
   3218     // 2) r5 (new.target) will be initialized below.
   3219     PrepareForTailCall(actual, r5, r6, r7);
   3220   }
   3221 
   3222   DCHECK(instr->HasPointerMap());
   3223   LPointerMap* pointers = instr->pointer_map();
   3224   SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
   3225   // The number of arguments is stored in receiver which is r2, as expected
   3226   // by InvokeFunction.
   3227   ParameterCount actual(receiver);
   3228   __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
   3229 }
   3230 
   3231 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   3232   LOperand* argument = instr->value();
   3233   if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
   3234     Abort(kDoPushArgumentNotImplementedForDoubleType);
   3235   } else {
   3236     Register argument_reg = EmitLoadRegister(argument, ip);
   3237     __ push(argument_reg);
   3238   }
   3239 }
   3240 
   3241 void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
   3242 
   3243 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   3244   Register result = ToRegister(instr->result());
   3245   __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   3246 }
   3247 
   3248 void LCodeGen::DoContext(LContext* instr) {
   3249   // If there is a non-return use, the context must be moved to a register.
   3250   Register result = ToRegister(instr->result());
   3251   if (info()->IsOptimizing()) {
   3252     __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
   3253   } else {
   3254     // If there is no frame, the context must be in cp.
   3255     DCHECK(result.is(cp));
   3256   }
   3257 }
   3258 
   3259 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   3260   DCHECK(ToRegister(instr->context()).is(cp));
   3261   __ Move(scratch0(), instr->hydrogen()->pairs());
   3262   __ push(scratch0());
   3263   __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
   3264   __ push(scratch0());
   3265   __ Move(scratch0(), instr->hydrogen()->feedback_vector());
   3266   __ push(scratch0());
   3267   CallRuntime(Runtime::kDeclareGlobals, instr);
   3268 }
   3269 
   3270 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   3271                                  int formal_parameter_count, int arity,
   3272                                  bool is_tail_call, LInstruction* instr) {
   3273   bool dont_adapt_arguments =
   3274       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   3275   bool can_invoke_directly =
   3276       dont_adapt_arguments || formal_parameter_count == arity;
   3277 
   3278   Register function_reg = r3;
   3279 
   3280   LPointerMap* pointers = instr->pointer_map();
   3281 
   3282   if (can_invoke_directly) {
   3283     // Change context.
   3284     __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
   3285 
   3286     // Always initialize new target and number of actual arguments.
   3287     __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
   3288     __ mov(r2, Operand(arity));
   3289 
   3290     bool is_self_call = function.is_identical_to(info()->closure());
   3291 
   3292     // Invoke function.
   3293     if (is_self_call) {
   3294       Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
   3295       if (is_tail_call) {
   3296         __ Jump(self, RelocInfo::CODE_TARGET);
   3297       } else {
   3298         __ Call(self, RelocInfo::CODE_TARGET);
   3299       }
   3300     } else {
   3301       __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
   3302       if (is_tail_call) {
   3303         __ JumpToJSEntry(ip);
   3304       } else {
   3305         __ CallJSEntry(ip);
   3306       }
   3307     }
   3308 
   3309     if (!is_tail_call) {
   3310       // Set up deoptimization.
   3311       RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   3312     }
   3313   } else {
   3314     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3315     ParameterCount actual(arity);
   3316     ParameterCount expected(formal_parameter_count);
   3317     InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
   3318     __ InvokeFunction(function_reg, expected, actual, flag, generator);
   3319   }
   3320 }
   3321 
   3322 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
   3323   DCHECK(instr->context() != NULL);
   3324   DCHECK(ToRegister(instr->context()).is(cp));
   3325   Register input = ToRegister(instr->value());
   3326   Register result = ToRegister(instr->result());
   3327   Register scratch = scratch0();
   3328 
   3329   // Deoptimize if not a heap number.
   3330   __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   3331   __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
   3332   DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
   3333 
   3334   Label done;
   3335   Register exponent = scratch0();
   3336   scratch = no_reg;
   3337   __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
   3338   // Check the sign of the argument. If the argument is positive, just
   3339   // return it.
   3340   __ Cmp32(exponent, Operand::Zero());
   3341   // Move the input to the result if necessary.
   3342   __ Move(result, input);
   3343   __ bge(&done);
   3344 
   3345   // Input is negative. Reverse its sign.
   3346   // Preserve the value of all registers.
   3347   {
   3348     PushSafepointRegistersScope scope(this);
   3349 
   3350     // Registers were saved at the safepoint, so we can use
   3351     // many scratch registers.
   3352     Register tmp1 = input.is(r3) ? r2 : r3;
   3353     Register tmp2 = input.is(r4) ? r2 : r4;
   3354     Register tmp3 = input.is(r5) ? r2 : r5;
   3355     Register tmp4 = input.is(r6) ? r2 : r6;
   3356 
   3357     // exponent: floating point exponent value.
   3358 
   3359     Label allocated, slow;
   3360     __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
   3361     __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
   3362     __ b(&allocated);
   3363 
   3364     // Slow case: Call the runtime system to do the number allocation.
   3365     __ bind(&slow);
   3366 
   3367     CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
   3368                             instr->context());
   3369     // Set the pointer to the new heap number in tmp.
   3370     if (!tmp1.is(r2)) __ LoadRR(tmp1, r2);
   3371     // Restore input_reg after call to runtime.
   3372     __ LoadFromSafepointRegisterSlot(input, input);
   3373     __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
   3374 
   3375     __ bind(&allocated);
   3376     // exponent: floating point exponent value.
   3377     // tmp1: allocated heap number.
   3378 
   3379     // Clear the sign bit.
   3380     __ nilf(exponent, Operand(~HeapNumber::kSignMask));
   3381     __ StoreW(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
   3382     __ LoadlW(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
   3383     __ StoreW(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
   3384 
   3385     __ StoreToSafepointRegisterSlot(tmp1, result);
   3386   }
   3387 
   3388   __ bind(&done);
   3389 }
   3390 
   3391 void LCodeGen::EmitMathAbs(LMathAbs* instr) {
   3392   Register input = ToRegister(instr->value());
   3393   Register result = ToRegister(instr->result());
   3394   Label done;
   3395   __ CmpP(input, Operand::Zero());
   3396   __ Move(result, input);
   3397   __ bge(&done, Label::kNear);
   3398   __ LoadComplementRR(result, result);
   3399   // Deoptimize on overflow.
   3400   DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
   3401   __ bind(&done);
   3402 }
   3403 
   3404 #if V8_TARGET_ARCH_S390X
   3405 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
   3406   Register input = ToRegister(instr->value());
   3407   Register result = ToRegister(instr->result());
   3408   Label done;
   3409   __ Cmp32(input, Operand::Zero());
   3410   __ Move(result, input);
   3411   __ bge(&done, Label::kNear);
   3412 
   3413   // Deoptimize on overflow.
   3414   __ Cmp32(input, Operand(0x80000000));
   3415   DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
   3416 
   3417   __ LoadComplementRR(result, result);
   3418   __ bind(&done);
   3419 }
   3420 #endif
   3421 
   3422 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   3423   // Class for deferred case.
   3424   class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
   3425    public:
   3426     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
   3427         : LDeferredCode(codegen), instr_(instr) {}
   3428     void Generate() override {
   3429       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
   3430     }
   3431     LInstruction* instr() override { return instr_; }
   3432 
   3433    private:
   3434     LMathAbs* instr_;
   3435   };
   3436 
   3437   Representation r = instr->hydrogen()->value()->representation();
   3438   if (r.IsDouble()) {
   3439     DoubleRegister input = ToDoubleRegister(instr->value());
   3440     DoubleRegister result = ToDoubleRegister(instr->result());
   3441     __ lpdbr(result, input);
   3442 #if V8_TARGET_ARCH_S390X
   3443   } else if (r.IsInteger32()) {
   3444     EmitInteger32MathAbs(instr);
   3445   } else if (r.IsSmi()) {
   3446 #else
   3447   } else if (r.IsSmiOrInteger32()) {
   3448 #endif
   3449     EmitMathAbs(instr);
   3450   } else {
   3451     // Representation is tagged.
   3452     DeferredMathAbsTaggedHeapNumber* deferred =
   3453         new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
   3454     Register input = ToRegister(instr->value());
   3455     // Smi check.
   3456     __ JumpIfNotSmi(input, deferred->entry());
   3457     // If smi, handle it directly.
   3458     EmitMathAbs(instr);
   3459     __ bind(deferred->exit());
   3460   }
   3461 }
   3462 
   3463 void LCodeGen::DoMathFloor(LMathFloor* instr) {
   3464   DoubleRegister input = ToDoubleRegister(instr->value());
   3465   Register result = ToRegister(instr->result());
   3466   Register input_high = scratch0();
   3467   Register scratch = ip;
   3468   Label done, exact;
   3469 
   3470   __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
   3471                    &exact);
   3472   DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
   3473 
   3474   __ bind(&exact);
   3475   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3476     // Test for -0.
   3477     __ CmpP(result, Operand::Zero());
   3478     __ bne(&done, Label::kNear);
   3479     __ Cmp32(input_high, Operand::Zero());
   3480     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
   3481   }
   3482   __ bind(&done);
   3483 }
   3484 
   3485 void LCodeGen::DoMathRound(LMathRound* instr) {
   3486   DoubleRegister input = ToDoubleRegister(instr->value());
   3487   Register result = ToRegister(instr->result());
   3488   DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
   3489   DoubleRegister input_plus_dot_five = double_scratch1;
   3490   Register scratch1 = scratch0();
   3491   Register scratch2 = ip;
   3492   DoubleRegister dot_five = double_scratch0();
   3493   Label convert, done;
   3494 
   3495   __ LoadDoubleLiteral(dot_five, 0.5, r0);
   3496   __ lpdbr(double_scratch1, input);
   3497   __ cdbr(double_scratch1, dot_five);
   3498   DeoptimizeIf(unordered, instr, DeoptimizeReason::kLostPrecisionOrNaN);
   3499   // If input is in [-0.5, -0], the result is -0.
   3500   // If input is in [+0, +0.5[, the result is +0.
   3501   // If the input is +0.5, the result is 1.
   3502   __ bgt(&convert, Label::kNear);  // Out of [-0.5, +0.5].
   3503   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3504     // [-0.5, -0] (negative) yields minus zero.
   3505     __ TestDoubleSign(input, scratch1);
   3506     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
   3507   }
   3508   Label return_zero;
   3509   __ cdbr(input, dot_five);
   3510   __ bne(&return_zero, Label::kNear);
   3511   __ LoadImmP(result, Operand(1));  // +0.5.
   3512   __ b(&done, Label::kNear);
   3513   // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
   3514   // flag kBailoutOnMinusZero.
   3515   __ bind(&return_zero);
   3516   __ LoadImmP(result, Operand::Zero());
   3517   __ b(&done, Label::kNear);
   3518 
   3519   __ bind(&convert);
   3520   __ ldr(input_plus_dot_five, input);
   3521   __ adbr(input_plus_dot_five, dot_five);
   3522   // Reuse dot_five (double_scratch0) as we no longer need this value.
   3523   __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
   3524                    double_scratch0(), &done, &done);
   3525   DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
   3526   __ bind(&done);
   3527 }
   3528 
   3529 void LCodeGen::DoMathFround(LMathFround* instr) {
   3530   DoubleRegister input_reg = ToDoubleRegister(instr->value());
   3531   DoubleRegister output_reg = ToDoubleRegister(instr->result());
   3532 
   3533   // Round double to float
   3534   __ ledbr(output_reg, input_reg);
   3535   // Extend from float to double
   3536   __ ldebr(output_reg, output_reg);
   3537 }
   3538 
   3539 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   3540   DoubleRegister input = ToDoubleRegister(instr->value());
   3541   DoubleRegister result = ToDoubleRegister(instr->result());
   3542   __ sqdbr(result, input);
   3543 }
   3544 
   3545 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   3546   DoubleRegister input = ToDoubleRegister(instr->value());
   3547   DoubleRegister result = ToDoubleRegister(instr->result());
   3548   DoubleRegister temp = double_scratch0();
   3549 
   3550   // Note that according to ECMA-262 15.8.2.13:
   3551   // Math.pow(-Infinity, 0.5) == Infinity
   3552   // Math.sqrt(-Infinity) == NaN
   3553   Label skip, done;
   3554 
   3555   __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
   3556   __ cdbr(input, temp);
   3557   __ bne(&skip, Label::kNear);
   3558   __ lcdbr(result, temp);
   3559   __ b(&done, Label::kNear);
   3560 
   3561   // Add +0 to convert -0 to +0.
   3562   __ bind(&skip);
   3563   __ ldr(result, input);
   3564   __ lzdr(kDoubleRegZero);
   3565   __ adbr(result, kDoubleRegZero);
   3566   __ sqdbr(result, result);
   3567   __ bind(&done);
   3568 }
   3569 
   3570 void LCodeGen::DoPower(LPower* instr) {
   3571   Representation exponent_type = instr->hydrogen()->right()->representation();
   3572   // Having marked this as a call, we can use any registers.
   3573   // Just make sure that the input/output registers are the expected ones.
   3574   Register tagged_exponent = MathPowTaggedDescriptor::exponent();
   3575   DCHECK(!instr->right()->IsDoubleRegister() ||
   3576          ToDoubleRegister(instr->right()).is(d2));
   3577   DCHECK(!instr->right()->IsRegister() ||
   3578          ToRegister(instr->right()).is(tagged_exponent));
   3579   DCHECK(ToDoubleRegister(instr->left()).is(d1));
   3580   DCHECK(ToDoubleRegister(instr->result()).is(d3));
   3581 
   3582   if (exponent_type.IsSmi()) {
   3583     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3584     __ CallStub(&stub);
   3585   } else if (exponent_type.IsTagged()) {
   3586     Label no_deopt;
   3587     __ JumpIfSmi(tagged_exponent, &no_deopt);
   3588     __ LoadP(r9, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
   3589     __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
   3590     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
   3591     __ bind(&no_deopt);
   3592     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3593     __ CallStub(&stub);
   3594   } else if (exponent_type.IsInteger32()) {
   3595     MathPowStub stub(isolate(), MathPowStub::INTEGER);
   3596     __ CallStub(&stub);
   3597   } else {
   3598     DCHECK(exponent_type.IsDouble());
   3599     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
   3600     __ CallStub(&stub);
   3601   }
   3602 }
   3603 
   3604 void LCodeGen::DoMathCos(LMathCos* instr) {
   3605   __ PrepareCallCFunction(0, 1, scratch0());
   3606   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
   3607   __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
   3608   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
   3609 }
   3610 
   3611 void LCodeGen::DoMathSin(LMathSin* instr) {
   3612   __ PrepareCallCFunction(0, 1, scratch0());
   3613   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
   3614   __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
   3615   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
   3616 }
   3617 
   3618 void LCodeGen::DoMathExp(LMathExp* instr) {
   3619   __ PrepareCallCFunction(0, 1, scratch0());
   3620   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
   3621   __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
   3622   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
   3623 }
   3624 
   3625 void LCodeGen::DoMathLog(LMathLog* instr) {
   3626   __ PrepareCallCFunction(0, 1, scratch0());
   3627   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
   3628   __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
   3629   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
   3630 }
   3631 
   3632 void LCodeGen::DoMathClz32(LMathClz32* instr) {
   3633   Register input = ToRegister(instr->value());
   3634   Register result = ToRegister(instr->result());
   3635   Label done;
   3636   __ llgfr(result, input);
   3637   __ flogr(r0, result);
   3638   __ LoadRR(result, r0);
   3639   __ CmpP(r0, Operand::Zero());
   3640   __ beq(&done, Label::kNear);
   3641   __ SubP(result, Operand(32));
   3642   __ bind(&done);
   3643 }
   3644 
   3645 void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
   3646                                   Register scratch1, Register scratch2,
   3647                                   Register scratch3) {
   3648 #if DEBUG
   3649   if (actual.is_reg()) {
   3650     DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
   3651   } else {
   3652     DCHECK(!AreAliased(scratch1, scratch2, scratch3));
   3653   }
   3654 #endif
   3655   if (FLAG_code_comments) {
   3656     if (actual.is_reg()) {
   3657       Comment(";;; PrepareForTailCall, actual: %s {",
   3658               RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
   3659                   actual.reg().code()));
   3660     } else {
   3661       Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
   3662     }
   3663   }
   3664 
   3665   // Check if next frame is an arguments adaptor frame.
   3666   Register caller_args_count_reg = scratch1;
   3667   Label no_arguments_adaptor, formal_parameter_count_loaded;
   3668   __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3669   __ LoadP(scratch3,
   3670            MemOperand(scratch2, StandardFrameConstants::kContextOffset));
   3671   __ CmpSmiLiteral(scratch3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
   3672   __ bne(&no_arguments_adaptor);
   3673 
   3674   // Drop current frame and load arguments count from arguments adaptor frame.
   3675   __ LoadRR(fp, scratch2);
   3676   __ LoadP(caller_args_count_reg,
   3677            MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
   3678   __ SmiUntag(caller_args_count_reg);
   3679   __ b(&formal_parameter_count_loaded);
   3680 
   3681   __ bind(&no_arguments_adaptor);
   3682   // Load caller's formal parameter count
   3683   __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
   3684 
   3685   __ bind(&formal_parameter_count_loaded);
   3686   __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
   3687 
   3688   Comment(";;; }");
   3689 }
   3690 
   3691 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   3692   HInvokeFunction* hinstr = instr->hydrogen();
   3693   DCHECK(ToRegister(instr->context()).is(cp));
   3694   DCHECK(ToRegister(instr->function()).is(r3));
   3695   DCHECK(instr->HasPointerMap());
   3696 
   3697   bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
   3698 
   3699   if (is_tail_call) {
   3700     DCHECK(!info()->saves_caller_doubles());
   3701     ParameterCount actual(instr->arity());
   3702     // It is safe to use r5, r6 and r7 as scratch registers here given that
   3703     // 1) we are not going to return to caller function anyway,
   3704     // 2) r5 (new.target) will be initialized below.
   3705     PrepareForTailCall(actual, r5, r6, r7);
   3706   }
   3707 
   3708   Handle<JSFunction> known_function = hinstr->known_function();
   3709   if (known_function.is_null()) {
   3710     LPointerMap* pointers = instr->pointer_map();
   3711     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3712     ParameterCount actual(instr->arity());
   3713     InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
   3714     __ InvokeFunction(r3, no_reg, actual, flag, generator);
   3715   } else {
   3716     CallKnownFunction(known_function, hinstr->formal_parameter_count(),
   3717                       instr->arity(), is_tail_call, instr);
   3718   }
   3719 }
   3720 
   3721 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   3722   DCHECK(ToRegister(instr->result()).is(r2));
   3723 
   3724   if (instr->hydrogen()->IsTailCall()) {
   3725     if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
   3726 
   3727     if (instr->target()->IsConstantOperand()) {
   3728       LConstantOperand* target = LConstantOperand::cast(instr->target());
   3729       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   3730       __ Jump(code, RelocInfo::CODE_TARGET);
   3731     } else {
   3732       DCHECK(instr->target()->IsRegister());
   3733       Register target = ToRegister(instr->target());
   3734       __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
   3735       __ JumpToJSEntry(ip);
   3736     }
   3737   } else {
   3738     LPointerMap* pointers = instr->pointer_map();
   3739     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3740 
   3741     if (instr->target()->IsConstantOperand()) {
   3742       LConstantOperand* target = LConstantOperand::cast(instr->target());
   3743       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   3744       generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
   3745       __ Call(code, RelocInfo::CODE_TARGET);
   3746     } else {
   3747       DCHECK(instr->target()->IsRegister());
   3748       Register target = ToRegister(instr->target());
   3749       generator.BeforeCall(__ CallSize(target));
   3750       __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
   3751       __ CallJSEntry(ip);
   3752     }
   3753     generator.AfterCall();
   3754   }
   3755 }
   3756 
   3757 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   3758   DCHECK(ToRegister(instr->context()).is(cp));
   3759   DCHECK(ToRegister(instr->constructor()).is(r3));
   3760   DCHECK(ToRegister(instr->result()).is(r2));
   3761 
   3762   __ mov(r2, Operand(instr->arity()));
   3763   __ Move(r4, instr->hydrogen()->site());
   3764 
   3765   ElementsKind kind = instr->hydrogen()->elements_kind();
   3766   AllocationSiteOverrideMode override_mode =
   3767       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
   3768           ? DISABLE_ALLOCATION_SITES
   3769           : DONT_OVERRIDE;
   3770 
   3771   if (instr->arity() == 0) {
   3772     ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
   3773     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3774   } else if (instr->arity() == 1) {
   3775     Label done;
   3776     if (IsFastPackedElementsKind(kind)) {
   3777       Label packed_case;
   3778       // We might need a change here
   3779       // look at the first argument
   3780       __ LoadP(r7, MemOperand(sp, 0));
   3781       __ CmpP(r7, Operand::Zero());
   3782       __ beq(&packed_case, Label::kNear);
   3783 
   3784       ElementsKind holey_kind = GetHoleyElementsKind(kind);
   3785       ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
   3786                                               override_mode);
   3787       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3788       __ b(&done, Label::kNear);
   3789       __ bind(&packed_case);
   3790     }
   3791 
   3792     ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
   3793     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3794     __ bind(&done);
   3795   } else {
   3796     ArrayNArgumentsConstructorStub stub(isolate());
   3797     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3798   }
   3799 }
   3800 
   3801 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
   3802   CallRuntime(instr->function(), instr->arity(), instr);
   3803 }
   3804 
   3805 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
   3806   Register function = ToRegister(instr->function());
   3807   Register code_object = ToRegister(instr->code_object());
   3808   __ lay(code_object,
   3809          MemOperand(code_object, Code::kHeaderSize - kHeapObjectTag));
   3810   __ StoreP(code_object,
   3811             FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
   3812 }
   3813 
   3814 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
   3815   Register result = ToRegister(instr->result());
   3816   Register base = ToRegister(instr->base_object());
   3817   if (instr->offset()->IsConstantOperand()) {
   3818     LConstantOperand* offset = LConstantOperand::cast(instr->offset());
   3819     __ lay(result, MemOperand(base, ToInteger32(offset)));
   3820   } else {
   3821     Register offset = ToRegister(instr->offset());
   3822     __ lay(result, MemOperand(base, offset));
   3823   }
   3824 }
   3825 
   3826 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
   3827   HStoreNamedField* hinstr = instr->hydrogen();
   3828   Representation representation = instr->representation();
   3829 
   3830   Register object = ToRegister(instr->object());
   3831   Register scratch = scratch0();
   3832   HObjectAccess access = hinstr->access();
   3833   int offset = access.offset();
   3834 
   3835   if (access.IsExternalMemory()) {
   3836     Register value = ToRegister(instr->value());
   3837     MemOperand operand = MemOperand(object, offset);
   3838     __ StoreRepresentation(value, operand, representation, r0);
   3839     return;
   3840   }
   3841 
   3842   __ AssertNotSmi(object);
   3843 
   3844 #if V8_TARGET_ARCH_S390X
   3845   DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
   3846          IsInteger32(LConstantOperand::cast(instr->value())));
   3847 #else
   3848   DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
   3849          IsSmi(LConstantOperand::cast(instr->value())));
   3850 #endif
   3851   if (!FLAG_unbox_double_fields && representation.IsDouble()) {
   3852     DCHECK(access.IsInobject());
   3853     DCHECK(!hinstr->has_transition());
   3854     DCHECK(!hinstr->NeedsWriteBarrier());
   3855     DoubleRegister value = ToDoubleRegister(instr->value());
   3856     DCHECK(offset >= 0);
   3857     __ StoreDouble(value, FieldMemOperand(object, offset));
   3858     return;
   3859   }
   3860 
   3861   if (hinstr->has_transition()) {
   3862     Handle<Map> transition = hinstr->transition_map();
   3863     AddDeprecationDependency(transition);
   3864     __ mov(scratch, Operand(transition));
   3865     __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
   3866     if (hinstr->NeedsWriteBarrierForMap()) {
   3867       Register temp = ToRegister(instr->temp());
   3868       // Update the write barrier for the map field.
   3869       __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
   3870                            kSaveFPRegs);
   3871     }
   3872   }
   3873 
   3874   // Do the store.
   3875   Register record_dest = object;
   3876   Register record_value = no_reg;
   3877   Register record_scratch = scratch;
   3878 #if V8_TARGET_ARCH_S390X
   3879   if (FLAG_unbox_double_fields && representation.IsDouble()) {
   3880     DCHECK(access.IsInobject());
   3881     DoubleRegister value = ToDoubleRegister(instr->value());
   3882     __ StoreDouble(value, FieldMemOperand(object, offset));
   3883     if (hinstr->NeedsWriteBarrier()) {
   3884       record_value = ToRegister(instr->value());
   3885     }
   3886   } else {
   3887     if (representation.IsSmi() &&
   3888         hinstr->value()->representation().IsInteger32()) {
   3889       DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
   3890       // 64-bit Smi optimization
   3891       // Store int value directly to upper half of the smi.
   3892       offset = SmiWordOffset(offset);
   3893       representation = Representation::Integer32();
   3894     }
   3895 #endif
   3896     if (access.IsInobject()) {
   3897       Register value = ToRegister(instr->value());
   3898       MemOperand operand = FieldMemOperand(object, offset);
   3899       __ StoreRepresentation(value, operand, representation, r0);
   3900       record_value = value;
   3901     } else {
   3902       Register value = ToRegister(instr->value());
   3903       __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
   3904       MemOperand operand = FieldMemOperand(scratch, offset);
   3905       __ StoreRepresentation(value, operand, representation, r0);
   3906       record_dest = scratch;
   3907       record_value = value;
   3908       record_scratch = object;
   3909     }
   3910 #if V8_TARGET_ARCH_S390X
   3911   }
   3912 #endif
   3913 
   3914   if (hinstr->NeedsWriteBarrier()) {
   3915     __ RecordWriteField(record_dest, offset, record_value, record_scratch,
   3916                         GetLinkRegisterState(), kSaveFPRegs,
   3917                         EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
   3918                         hinstr->PointersToHereCheckForValue());
   3919   }
   3920 }
   3921 
   3922 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   3923   Representation representation = instr->hydrogen()->length()->representation();
   3924   DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
   3925   DCHECK(representation.IsSmiOrInteger32());
   3926   Register temp = scratch0();
   3927 
   3928   Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
   3929   if (instr->length()->IsConstantOperand()) {
   3930     int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
   3931     Register index = ToRegister(instr->index());
   3932     if (representation.IsSmi()) {
   3933       __ CmpLogicalSmiLiteral(index, Smi::FromInt(length), temp);
   3934     } else {
   3935       __ CmpLogical32(index, Operand(length));
   3936     }
   3937     cc = CommuteCondition(cc);
   3938   } else if (instr->index()->IsConstantOperand()) {
   3939     int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
   3940     Register length = ToRegister(instr->length());
   3941     if (representation.IsSmi()) {
   3942       __ CmpLogicalSmiLiteral(length, Smi::FromInt(index), temp);
   3943     } else {
   3944       __ CmpLogical32(length, Operand(index));
   3945     }
   3946   } else {
   3947     Register index = ToRegister(instr->index());
   3948     Register length = ToRegister(instr->length());
   3949     if (representation.IsSmi()) {
   3950       __ CmpLogicalP(length, index);
   3951     } else {
   3952       __ CmpLogical32(length, index);
   3953     }
   3954   }
   3955   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
   3956     Label done;
   3957     __ b(NegateCondition(cc), &done, Label::kNear);
   3958     __ stop("eliminated bounds check failed");
   3959     __ bind(&done);
   3960   } else {
   3961     DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
   3962   }
   3963 }
   3964 
   3965 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
   3966   Register external_pointer = ToRegister(instr->elements());
   3967   Register key = no_reg;
   3968   ElementsKind elements_kind = instr->elements_kind();
   3969   bool key_is_constant = instr->key()->IsConstantOperand();
   3970   int constant_key = 0;
   3971   if (key_is_constant) {
   3972     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   3973     if (constant_key & 0xF0000000) {
   3974       Abort(kArrayIndexConstantValueTooBig);
   3975     }
   3976   } else {
   3977     key = ToRegister(instr->key());
   3978   }
   3979   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   3980   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
   3981   bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
   3982   int base_offset = instr->base_offset();
   3983 
   3984   if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
   3985     Register address = scratch0();
   3986     DoubleRegister value(ToDoubleRegister(instr->value()));
   3987     if (key_is_constant) {
   3988       if (constant_key != 0) {
   3989         base_offset += constant_key << element_size_shift;
   3990         if (!is_int20(base_offset)) {
   3991           __ mov(address, Operand(base_offset));
   3992           __ AddP(address, external_pointer);
   3993         } else {
   3994           __ AddP(address, external_pointer, Operand(base_offset));
   3995         }
   3996         base_offset = 0;
   3997       } else {
   3998         address = external_pointer;
   3999       }
   4000     } else {
   4001       __ IndexToArrayOffset(address, key, element_size_shift, key_is_smi,
   4002                             keyMaybeNegative);
   4003       __ AddP(address, external_pointer);
   4004     }
   4005     if (elements_kind == FLOAT32_ELEMENTS) {
   4006       __ ledbr(double_scratch0(), value);
   4007       __ StoreFloat32(double_scratch0(), MemOperand(address, base_offset));
   4008     } else {  // Storing doubles, not floats.
   4009       __ StoreDouble(value, MemOperand(address, base_offset));
   4010     }
   4011   } else {
   4012     Register value(ToRegister(instr->value()));
   4013     MemOperand mem_operand =
   4014         PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
   4015                             constant_key, element_size_shift, base_offset,
   4016                             keyMaybeNegative);
   4017     switch (elements_kind) {
   4018       case UINT8_ELEMENTS:
   4019       case UINT8_CLAMPED_ELEMENTS:
   4020       case INT8_ELEMENTS:
   4021         if (key_is_constant) {
   4022           __ StoreByte(value, mem_operand, r0);
   4023         } else {
   4024           __ StoreByte(value, mem_operand);
   4025         }
   4026         break;
   4027       case INT16_ELEMENTS:
   4028       case UINT16_ELEMENTS:
   4029         if (key_is_constant) {
   4030           __ StoreHalfWord(value, mem_operand, r0);
   4031         } else {
   4032           __ StoreHalfWord(value, mem_operand);
   4033         }
   4034         break;
   4035       case INT32_ELEMENTS:
   4036       case UINT32_ELEMENTS:
   4037         if (key_is_constant) {
   4038           __ StoreW(value, mem_operand, r0);
   4039         } else {
   4040           __ StoreW(value, mem_operand);
   4041         }
   4042         break;
   4043       case FLOAT32_ELEMENTS:
   4044       case FLOAT64_ELEMENTS:
   4045       case FAST_DOUBLE_ELEMENTS:
   4046       case FAST_ELEMENTS:
   4047       case FAST_SMI_ELEMENTS:
   4048       case FAST_HOLEY_DOUBLE_ELEMENTS:
   4049       case FAST_HOLEY_ELEMENTS:
   4050       case FAST_HOLEY_SMI_ELEMENTS:
   4051       case DICTIONARY_ELEMENTS:
   4052       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
   4053       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
   4054       case FAST_STRING_WRAPPER_ELEMENTS:
   4055       case SLOW_STRING_WRAPPER_ELEMENTS:
   4056       case NO_ELEMENTS:
   4057         UNREACHABLE();
   4058         break;
   4059     }
   4060   }
   4061 }
   4062 
   4063 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
   4064   DoubleRegister value = ToDoubleRegister(instr->value());
   4065   Register elements = ToRegister(instr->elements());
   4066   Register key = no_reg;
   4067   Register scratch = scratch0();
   4068   DoubleRegister double_scratch = double_scratch0();
   4069   bool key_is_constant = instr->key()->IsConstantOperand();
   4070   int constant_key = 0;
   4071 
   4072   // Calculate the effective address of the slot in the array to store the
   4073   // double value.
   4074   if (key_is_constant) {
   4075     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   4076     if (constant_key & 0xF0000000) {
   4077       Abort(kArrayIndexConstantValueTooBig);
   4078     }
   4079   } else {
   4080     key = ToRegister(instr->key());
   4081   }
   4082   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   4083   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
   4084   bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
   4085   int base_offset = instr->base_offset() + constant_key * kDoubleSize;
   4086   bool use_scratch = false;
   4087   intptr_t address_offset = base_offset;
   4088 
   4089   if (key_is_constant) {
   4090     // Memory references support up to 20-bits signed displacement in RXY form
   4091     if (!is_int20((address_offset))) {
   4092       __ mov(scratch, Operand(address_offset));
   4093       address_offset = 0;
   4094       use_scratch = true;
   4095     }
   4096   } else {
   4097     use_scratch = true;
   4098     __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi,
   4099                           keyMaybeNegative);
   4100     // Memory references support up to 20-bits signed displacement in RXY form
   4101     if (!is_int20((address_offset))) {
   4102       __ AddP(scratch, Operand(address_offset));
   4103       address_offset = 0;
   4104     }
   4105   }
   4106 
   4107   if (instr->NeedsCanonicalization()) {
   4108     // Turn potential sNaN value into qNaN.
   4109     __ CanonicalizeNaN(double_scratch, value);
   4110     DCHECK(address_offset >= 0);
   4111     if (use_scratch)
   4112       __ StoreDouble(double_scratch,
   4113                      MemOperand(scratch, elements, address_offset));
   4114     else
   4115       __ StoreDouble(double_scratch, MemOperand(elements, address_offset));
   4116   } else {
   4117     if (use_scratch)
   4118       __ StoreDouble(value, MemOperand(scratch, elements, address_offset));
   4119     else
   4120       __ StoreDouble(value, MemOperand(elements, address_offset));
   4121   }
   4122 }
   4123 
   4124 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
   4125   HStoreKeyed* hinstr = instr->hydrogen();
   4126   Register value = ToRegister(instr->value());
   4127   Register elements = ToRegister(instr->elements());
   4128   Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
   4129   Register scratch = scratch0();
   4130   int offset = instr->base_offset();
   4131 
   4132   // Do the store.
   4133   if (instr->key()->IsConstantOperand()) {
   4134     DCHECK(!hinstr->NeedsWriteBarrier());
   4135     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   4136     offset += ToInteger32(const_operand) * kPointerSize;
   4137   } else {
   4138     // Even though the HLoadKeyed instruction forces the input
   4139     // representation for the key to be an integer, the input gets replaced
   4140     // during bound check elimination with the index argument to the bounds
   4141     // check, which can be tagged, so that case must be handled here, too.
   4142     if (hinstr->key()->representation().IsSmi()) {
   4143       __ SmiToPtrArrayOffset(scratch, key);
   4144     } else {
   4145       if (instr->hydrogen()->IsDehoisted() ||
   4146           !CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
   4147 #if V8_TARGET_ARCH_S390X
   4148         // If array access is dehoisted, the key, being an int32, can contain
   4149         // a negative value, as needs to be sign-extended to 64-bit for
   4150         // memory access.
   4151         __ lgfr(key, key);
   4152 #endif
   4153         __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2));
   4154       } else {
   4155         // Small optimization to reduce pathlength.  After Bounds Check,
   4156         // the key is guaranteed to be non-negative.  Leverage RISBG,
   4157         // which also performs zero-extension.
   4158         __ risbg(scratch, key, Operand(32 - kPointerSizeLog2),
   4159                  Operand(63 - kPointerSizeLog2), Operand(kPointerSizeLog2),
   4160                  true);
   4161       }
   4162     }
   4163   }
   4164 
   4165   Representation representation = hinstr->value()->representation();
   4166 
   4167 #if V8_TARGET_ARCH_S390X
   4168   // 64-bit Smi optimization
   4169   if (representation.IsInteger32()) {
   4170     DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
   4171     DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
   4172     // Store int value directly to upper half of the smi.
   4173     offset = SmiWordOffset(offset);
   4174   }
   4175 #endif
   4176 
   4177   if (instr->key()->IsConstantOperand()) {
   4178     __ StoreRepresentation(value, MemOperand(elements, offset), representation,
   4179                            scratch);
   4180   } else {
   4181     __ StoreRepresentation(value, MemOperand(scratch, elements, offset),
   4182                            representation, r0);
   4183   }
   4184 
   4185   if (hinstr->NeedsWriteBarrier()) {
   4186     SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
   4187                                 ? OMIT_SMI_CHECK
   4188                                 : INLINE_SMI_CHECK;
   4189     // Compute address of modified element and store it into key register.
   4190     if (instr->key()->IsConstantOperand()) {
   4191       __ lay(key, MemOperand(elements, offset));
   4192     } else {
   4193       __ lay(key, MemOperand(scratch, elements, offset));
   4194     }
   4195     __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
   4196                    EMIT_REMEMBERED_SET, check_needed,
   4197                    hinstr->PointersToHereCheckForValue());
   4198   }
   4199 }
   4200 
   4201 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
   4202   // By cases: external, fast double
   4203   if (instr->is_fixed_typed_array()) {
   4204     DoStoreKeyedExternalArray(instr);
   4205   } else if (instr->hydrogen()->value()->representation().IsDouble()) {
   4206     DoStoreKeyedFixedDoubleArray(instr);
   4207   } else {
   4208     DoStoreKeyedFixedArray(instr);
   4209   }
   4210 }
   4211 
   4212 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
   4213   class DeferredMaybeGrowElements final : public LDeferredCode {
   4214    public:
   4215     DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
   4216         : LDeferredCode(codegen), instr_(instr) {}
   4217     void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
   4218     LInstruction* instr() override { return instr_; }
   4219 
   4220    private:
   4221     LMaybeGrowElements* instr_;
   4222   };
   4223 
   4224   Register result = r2;
   4225   DeferredMaybeGrowElements* deferred =
   4226       new (zone()) DeferredMaybeGrowElements(this, instr);
   4227   LOperand* key = instr->key();
   4228   LOperand* current_capacity = instr->current_capacity();
   4229 
   4230   DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
   4231   DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
   4232   DCHECK(key->IsConstantOperand() || key->IsRegister());
   4233   DCHECK(current_capacity->IsConstantOperand() ||
   4234          current_capacity->IsRegister());
   4235 
   4236   if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
   4237     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
   4238     int32_t constant_capacity =
   4239         ToInteger32(LConstantOperand::cast(current_capacity));
   4240     if (constant_key >= constant_capacity) {
   4241       // Deferred case.
   4242       __ b(deferred->entry());
   4243     }
   4244   } else if (key->IsConstantOperand()) {
   4245     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
   4246     __ Cmp32(ToRegister(current_capacity), Operand(constant_key));
   4247     __ ble(deferred->entry());
   4248   } else if (current_capacity->IsConstantOperand()) {
   4249     int32_t constant_capacity =
   4250         ToInteger32(LConstantOperand::cast(current_capacity));
   4251     __ Cmp32(ToRegister(key), Operand(constant_capacity));
   4252     __ bge(deferred->entry());
   4253   } else {
   4254     __ Cmp32(ToRegister(key), ToRegister(current_capacity));
   4255     __ bge(deferred->entry());
   4256   }
   4257 
   4258   if (instr->elements()->IsRegister()) {
   4259     __ Move(result, ToRegister(instr->elements()));
   4260   } else {
   4261     __ LoadP(result, ToMemOperand(instr->elements()));
   4262   }
   4263 
   4264   __ bind(deferred->exit());
   4265 }
   4266 
   4267 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
   4268   // TODO(3095996): Get rid of this. For now, we need to make the
   4269   // result register contain a valid pointer because it is already
   4270   // contained in the register pointer map.
   4271   Register result = r2;
   4272   __ LoadImmP(result, Operand::Zero());
   4273 
   4274   // We have to call a stub.
   4275   {
   4276     PushSafepointRegistersScope scope(this);
   4277     if (instr->object()->IsRegister()) {
   4278       __ Move(result, ToRegister(instr->object()));
   4279     } else {
   4280       __ LoadP(result, ToMemOperand(instr->object()));
   4281     }
   4282 
   4283     LOperand* key = instr->key();
   4284     if (key->IsConstantOperand()) {
   4285       LConstantOperand* constant_key = LConstantOperand::cast(key);
   4286       int32_t int_key = ToInteger32(constant_key);
   4287       if (Smi::IsValid(int_key)) {
   4288         __ LoadSmiLiteral(r5, Smi::FromInt(int_key));
   4289       } else {
   4290         // We should never get here at runtime because there is a smi check on
   4291         // the key before this point.
   4292         __ stop("expected smi");
   4293       }
   4294     } else {
   4295       __ SmiTag(r5, ToRegister(key));
   4296     }
   4297 
   4298     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
   4299     __ CallStub(&stub);
   4300     RecordSafepointWithLazyDeopt(
   4301         instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   4302     __ StoreToSafepointRegisterSlot(result, result);
   4303   }
   4304 
   4305   // Deopt on smi, which means the elements array changed to dictionary mode.
   4306   __ TestIfSmi(result);
   4307   DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
   4308 }
   4309 
   4310 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
   4311   Register object_reg = ToRegister(instr->object());
   4312   Register scratch = scratch0();
   4313 
   4314   Handle<Map> from_map = instr->original_map();
   4315   Handle<Map> to_map = instr->transitioned_map();
   4316   ElementsKind from_kind = instr->from_kind();
   4317   ElementsKind to_kind = instr->to_kind();
   4318 
   4319   Label not_applicable;
   4320   __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
   4321   __ CmpP(scratch, Operand(from_map));
   4322   __ bne(&not_applicable);
   4323 
   4324   if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
   4325     Register new_map_reg = ToRegister(instr->new_map_temp());
   4326     __ mov(new_map_reg, Operand(to_map));
   4327     __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
   4328     // Write barrier.
   4329     __ RecordWriteForMap(object_reg, new_map_reg, scratch,
   4330                          GetLinkRegisterState(), kDontSaveFPRegs);
   4331   } else {
   4332     DCHECK(ToRegister(instr->context()).is(cp));
   4333     DCHECK(object_reg.is(r2));
   4334     PushSafepointRegistersScope scope(this);
   4335     __ Move(r3, to_map);
   4336     TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
   4337     __ CallStub(&stub);
   4338     RecordSafepointWithRegisters(instr->pointer_map(), 0,
   4339                                  Safepoint::kLazyDeopt);
   4340   }
   4341   __ bind(&not_applicable);
   4342 }
   4343 
   4344 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
   4345   Register object = ToRegister(instr->object());
   4346   Register temp1 = ToRegister(instr->temp1());
   4347   Register temp2 = ToRegister(instr->temp2());
   4348   Label no_memento_found;
   4349   __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
   4350   DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
   4351   __ bind(&no_memento_found);
   4352 }
   4353 
   4354 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   4355   DCHECK(ToRegister(instr->context()).is(cp));
   4356   DCHECK(ToRegister(instr->left()).is(r3));
   4357   DCHECK(ToRegister(instr->right()).is(r2));
   4358   StringAddStub stub(isolate(), instr->hydrogen()->flags(),
   4359                      instr->hydrogen()->pretenure_flag());
   4360   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   4361 }
   4362 
   4363 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   4364   class DeferredStringCharCodeAt final : public LDeferredCode {
   4365    public:
   4366     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
   4367         : LDeferredCode(codegen), instr_(instr) {}
   4368     void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
   4369     LInstruction* instr() override { return instr_; }
   4370 
   4371    private:
   4372     LStringCharCodeAt* instr_;
   4373   };
   4374 
   4375   DeferredStringCharCodeAt* deferred =
   4376       new (zone()) DeferredStringCharCodeAt(this, instr);
   4377 
   4378   StringCharLoadGenerator::Generate(
   4379       masm(), ToRegister(instr->string()), ToRegister(instr->index()),
   4380       ToRegister(instr->result()), deferred->entry());
   4381   __ bind(deferred->exit());
   4382 }
   4383 
   4384 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
   4385   Register string = ToRegister(instr->string());
   4386   Register result = ToRegister(instr->result());
   4387   Register scratch = scratch0();
   4388 
   4389   // TODO(3095996): Get rid of this. For now, we need to make the
   4390   // result register contain a valid pointer because it is already
   4391   // contained in the register pointer map.
   4392   __ LoadImmP(result, Operand::Zero());
   4393 
   4394   PushSafepointRegistersScope scope(this);
   4395   __ push(string);
   4396   // Push the index as a smi. This is safe because of the checks in
   4397   // DoStringCharCodeAt above.
   4398   if (instr->index()->IsConstantOperand()) {
   4399     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   4400     __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
   4401     __ push(scratch);
   4402   } else {
   4403     Register index = ToRegister(instr->index());
   4404     __ SmiTag(index);
   4405     __ push(index);
   4406   }
   4407   CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
   4408                           instr->context());
   4409   __ AssertSmi(r2);
   4410   __ SmiUntag(r2);
   4411   __ StoreToSafepointRegisterSlot(r2, result);
   4412 }
   4413 
   4414 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
   4415   class DeferredStringCharFromCode final : public LDeferredCode {
   4416    public:
   4417     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
   4418         : LDeferredCode(codegen), instr_(instr) {}
   4419     void Generate() override {
   4420       codegen()->DoDeferredStringCharFromCode(instr_);
   4421     }
   4422     LInstruction* instr() override { return instr_; }
   4423 
   4424    private:
   4425     LStringCharFromCode* instr_;
   4426   };
   4427 
   4428   DeferredStringCharFromCode* deferred =
   4429       new (zone()) DeferredStringCharFromCode(this, instr);
   4430 
   4431   DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
   4432   Register char_code = ToRegister(instr->char_code());
   4433   Register result = ToRegister(instr->result());
   4434   DCHECK(!char_code.is(result));
   4435 
   4436   __ CmpLogicalP(char_code, Operand(String::kMaxOneByteCharCode));
   4437   __ bgt(deferred->entry());
   4438   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
   4439   __ ShiftLeftP(r0, char_code, Operand(kPointerSizeLog2));
   4440   __ AddP(result, r0);
   4441   __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
   4442   __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
   4443   __ beq(deferred->entry());
   4444   __ bind(deferred->exit());
   4445 }
   4446 
   4447 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
   4448   Register char_code = ToRegister(instr->char_code());
   4449   Register result = ToRegister(instr->result());
   4450 
   4451   // TODO(3095996): Get rid of this. For now, we need to make the
   4452   // result register contain a valid pointer because it is already
   4453   // contained in the register pointer map.
   4454   __ LoadImmP(result, Operand::Zero());
   4455 
   4456   PushSafepointRegistersScope scope(this);
   4457   __ SmiTag(char_code);
   4458   __ push(char_code);
   4459   CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
   4460                           instr->context());
   4461   __ StoreToSafepointRegisterSlot(r2, result);
   4462 }
   4463 
   4464 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   4465   LOperand* input = instr->value();
   4466   DCHECK(input->IsRegister() || input->IsStackSlot());
   4467   LOperand* output = instr->result();
   4468   DCHECK(output->IsDoubleRegister());
   4469   if (input->IsStackSlot()) {
   4470     Register scratch = scratch0();
   4471     __ LoadP(scratch, ToMemOperand(input));
   4472     __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
   4473   } else {
   4474     __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
   4475   }
   4476 }
   4477 
   4478 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
   4479   LOperand* input = instr->value();
   4480   LOperand* output = instr->result();
   4481   __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
   4482 }
   4483 
   4484 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
   4485   class DeferredNumberTagI final : public LDeferredCode {
   4486    public:
   4487     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
   4488         : LDeferredCode(codegen), instr_(instr) {}
   4489     void Generate() override {
   4490       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
   4491                                        instr_->temp2(), SIGNED_INT32);
   4492     }
   4493     LInstruction* instr() override { return instr_; }
   4494 
   4495    private:
   4496     LNumberTagI* instr_;
   4497   };
   4498 
   4499   Register src = ToRegister(instr->value());
   4500   Register dst = ToRegister(instr->result());
   4501 
   4502   DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
   4503 #if V8_TARGET_ARCH_S390X
   4504   __ SmiTag(dst, src);
   4505 #else
   4506   // Add src to itself to defect SMI overflow.
   4507   __ Add32(dst, src, src);
   4508   __ b(overflow, deferred->entry());
   4509 #endif
   4510   __ bind(deferred->exit());
   4511 }
   4512 
   4513 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
   4514   class DeferredNumberTagU final : public LDeferredCode {
   4515    public:
   4516     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
   4517         : LDeferredCode(codegen), instr_(instr) {}
   4518     void Generate() override {
   4519       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
   4520                                        instr_->temp2(), UNSIGNED_INT32);
   4521     }
   4522     LInstruction* instr() override { return instr_; }
   4523 
   4524    private:
   4525     LNumberTagU* instr_;
   4526   };
   4527 
   4528   Register input = ToRegister(instr->value());
   4529   Register result = ToRegister(instr->result());
   4530 
   4531   DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
   4532   __ CmpLogicalP(input, Operand(Smi::kMaxValue));
   4533   __ bgt(deferred->entry());
   4534   __ SmiTag(result, input);
   4535   __ bind(deferred->exit());
   4536 }
   4537 
   4538 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
   4539                                      LOperand* temp1, LOperand* temp2,
   4540                                      IntegerSignedness signedness) {
   4541   Label done, slow;
   4542   Register src = ToRegister(value);
   4543   Register dst = ToRegister(instr->result());
   4544   Register tmp1 = scratch0();
   4545   Register tmp2 = ToRegister(temp1);
   4546   Register tmp3 = ToRegister(temp2);
   4547   DoubleRegister dbl_scratch = double_scratch0();
   4548 
   4549   if (signedness == SIGNED_INT32) {
   4550     // There was overflow, so bits 30 and 31 of the original integer
   4551     // disagree. Try to allocate a heap number in new space and store
   4552     // the value in there. If that fails, call the runtime system.
   4553     if (dst.is(src)) {
   4554       __ SmiUntag(src, dst);
   4555       __ xilf(src, Operand(HeapNumber::kSignMask));
   4556     }
   4557     __ ConvertIntToDouble(src, dbl_scratch);
   4558   } else {
   4559     __ ConvertUnsignedIntToDouble(src, dbl_scratch);
   4560   }
   4561 
   4562   if (FLAG_inline_new) {
   4563     __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
   4564     __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
   4565     __ b(&done);
   4566   }
   4567 
   4568   // Slow case: Call the runtime system to do the number allocation.
   4569   __ bind(&slow);
   4570   {
   4571     // TODO(3095996): Put a valid pointer value in the stack slot where the
   4572     // result register is stored, as this register is in the pointer map, but
   4573     // contains an integer value.
   4574     __ LoadImmP(dst, Operand::Zero());
   4575 
   4576     // Preserve the value of all registers.
   4577     PushSafepointRegistersScope scope(this);
   4578     // Reset the context register.
   4579     if (!dst.is(cp)) {
   4580       __ LoadImmP(cp, Operand::Zero());
   4581     }
   4582     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4583     RecordSafepointWithRegisters(instr->pointer_map(), 0,
   4584                                  Safepoint::kNoLazyDeopt);
   4585     __ StoreToSafepointRegisterSlot(r2, dst);
   4586   }
   4587 
   4588   // Done. Put the value in dbl_scratch into the value of the allocated heap
   4589   // number.
   4590   __ bind(&done);
   4591   __ StoreDouble(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
   4592 }
   4593 
   4594 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
   4595   class DeferredNumberTagD final : public LDeferredCode {
   4596    public:
   4597     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
   4598         : LDeferredCode(codegen), instr_(instr) {}
   4599     void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
   4600     LInstruction* instr() override { return instr_; }
   4601 
   4602    private:
   4603     LNumberTagD* instr_;
   4604   };
   4605 
   4606   DoubleRegister input_reg = ToDoubleRegister(instr->value());
   4607   Register scratch = scratch0();
   4608   Register reg = ToRegister(instr->result());
   4609   Register temp1 = ToRegister(instr->temp());
   4610   Register temp2 = ToRegister(instr->temp2());
   4611 
   4612   DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
   4613   if (FLAG_inline_new) {
   4614     __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
   4615     __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
   4616   } else {
   4617     __ b(deferred->entry());
   4618   }
   4619   __ bind(deferred->exit());
   4620   __ StoreDouble(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
   4621 }
   4622 
   4623 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
   4624   // TODO(3095996): Get rid of this. For now, we need to make the
   4625   // result register contain a valid pointer because it is already
   4626   // contained in the register pointer map.
   4627   Register reg = ToRegister(instr->result());
   4628   __ LoadImmP(reg, Operand::Zero());
   4629 
   4630   PushSafepointRegistersScope scope(this);
   4631   // Reset the context register.
   4632   if (!reg.is(cp)) {
   4633     __ LoadImmP(cp, Operand::Zero());
   4634   }
   4635   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4636   RecordSafepointWithRegisters(instr->pointer_map(), 0,
   4637                                Safepoint::kNoLazyDeopt);
   4638   __ StoreToSafepointRegisterSlot(r2, reg);
   4639 }
   4640 
   4641 void LCodeGen::DoSmiTag(LSmiTag* instr) {
   4642   HChange* hchange = instr->hydrogen();
   4643   Register input = ToRegister(instr->value());
   4644   Register output = ToRegister(instr->result());
   4645   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4646       hchange->value()->CheckFlag(HValue::kUint32)) {
   4647     __ TestUnsignedSmiCandidate(input, r0);
   4648     DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, cr0);
   4649   }
   4650 #if !V8_TARGET_ARCH_S390X
   4651   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4652       !hchange->value()->CheckFlag(HValue::kUint32)) {
   4653     __ SmiTagCheckOverflow(output, input, r0);
   4654     DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
   4655   } else {
   4656 #endif
   4657     __ SmiTag(output, input);
   4658 #if !V8_TARGET_ARCH_S390X
   4659   }
   4660 #endif
   4661 }
   4662 
   4663 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
   4664   Register input = ToRegister(instr->value());
   4665   Register result = ToRegister(instr->result());
   4666   if (instr->needs_check()) {
   4667     __ tmll(input, Operand(kHeapObjectTag));
   4668     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
   4669     __ SmiUntag(result, input);
   4670   } else {
   4671     __ SmiUntag(result, input);
   4672   }
   4673 }
   4674 
   4675 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
   4676                                 DoubleRegister result_reg,
   4677                                 NumberUntagDMode mode) {
   4678   bool can_convert_undefined_to_nan = instr->truncating();
   4679   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
   4680 
   4681   Register scratch = scratch0();
   4682   DCHECK(!result_reg.is(double_scratch0()));
   4683 
   4684   Label convert, load_smi, done;
   4685 
   4686   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
   4687     // Smi check.
   4688     __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
   4689 
   4690     // Heap number map check.
   4691     __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   4692     __ CmpP(scratch, RootMemOperand(Heap::kHeapNumberMapRootIndex));
   4693 
   4694     if (can_convert_undefined_to_nan) {
   4695       __ bne(&convert, Label::kNear);
   4696     } else {
   4697       DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
   4698     }
   4699     // load heap number
   4700     __ LoadDouble(result_reg,
   4701                   FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   4702     if (deoptimize_on_minus_zero) {
   4703       __ TestDoubleIsMinusZero(result_reg, scratch, ip);
   4704       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
   4705     }
   4706     __ b(&done, Label::kNear);
   4707     if (can_convert_undefined_to_nan) {
   4708       __ bind(&convert);
   4709       // Convert undefined (and hole) to NaN.
   4710       __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
   4711       DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
   4712       __ LoadRoot(scratch, Heap::kNanValueRootIndex);
   4713       __ LoadDouble(result_reg,
   4714                     FieldMemOperand(scratch, HeapNumber::kValueOffset));
   4715       __ b(&done, Label::kNear);
   4716     }
   4717   } else {
   4718     __ SmiUntag(scratch, input_reg);
   4719     DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
   4720   }
   4721   // Smi to double register conversion
   4722   __ bind(&load_smi);
   4723   // scratch: untagged value of input_reg
   4724   __ ConvertIntToDouble(scratch, result_reg);
   4725   __ bind(&done);
   4726 }
   4727 
   4728 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   4729   Register input_reg = ToRegister(instr->value());
   4730   Register scratch1 = scratch0();
   4731   Register scratch2 = ToRegister(instr->temp());
   4732   DoubleRegister double_scratch = double_scratch0();
   4733   DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
   4734 
   4735   DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
   4736   DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
   4737 
   4738   Label done;
   4739 
   4740   // Heap number map check.
   4741   __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   4742   __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
   4743 
   4744   if (instr->truncating()) {
   4745     Label truncate;
   4746     __ beq(&truncate);
   4747     __ CompareInstanceType(scratch1, scratch1, ODDBALL_TYPE);
   4748     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball);
   4749     __ bind(&truncate);
   4750     __ LoadRR(scratch2, input_reg);
   4751     __ TruncateHeapNumberToI(input_reg, scratch2);
   4752   } else {
   4753     // Deoptimize if we don't have a heap number.
   4754     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
   4755 
   4756     __ LoadDouble(double_scratch2,
   4757                   FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   4758     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4759       // preserve heap number pointer in scratch2 for minus zero check below
   4760       __ LoadRR(scratch2, input_reg);
   4761     }
   4762     __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
   4763                              double_scratch);
   4764     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
   4765 
   4766     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4767       __ CmpP(input_reg, Operand::Zero());
   4768       __ bne(&done, Label::kNear);
   4769       __ TestHeapNumberSign(scratch2, scratch1);
   4770       DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
   4771     }
   4772   }
   4773   __ bind(&done);
   4774 }
   4775 
   4776 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
   4777   class DeferredTaggedToI final : public LDeferredCode {
   4778    public:
   4779     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
   4780         : LDeferredCode(codegen), instr_(instr) {}
   4781     void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
   4782     LInstruction* instr() override { return instr_; }
   4783 
   4784    private:
   4785     LTaggedToI* instr_;
   4786   };
   4787 
   4788   LOperand* input = instr->value();
   4789   DCHECK(input->IsRegister());
   4790   DCHECK(input->Equals(instr->result()));
   4791 
   4792   Register input_reg = ToRegister(input);
   4793 
   4794   if (instr->hydrogen()->value()->representation().IsSmi()) {
   4795     __ SmiUntag(input_reg);
   4796   } else {
   4797     DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
   4798 
   4799     // Branch to deferred code if the input is a HeapObject.
   4800     __ JumpIfNotSmi(input_reg, deferred->entry());
   4801 
   4802     __ SmiUntag(input_reg);
   4803     __ bind(deferred->exit());
   4804   }
   4805 }
   4806 
   4807 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   4808   LOperand* input = instr->value();
   4809   DCHECK(input->IsRegister());
   4810   LOperand* result = instr->result();
   4811   DCHECK(result->IsDoubleRegister());
   4812 
   4813   Register input_reg = ToRegister(input);
   4814   DoubleRegister result_reg = ToDoubleRegister(result);
   4815 
   4816   HValue* value = instr->hydrogen()->value();
   4817   NumberUntagDMode mode = value->representation().IsSmi()
   4818                               ? NUMBER_CANDIDATE_IS_SMI
   4819                               : NUMBER_CANDIDATE_IS_ANY_TAGGED;
   4820 
   4821   EmitNumberUntagD(instr, input_reg, result_reg, mode);
   4822 }
   4823 
   4824 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
   4825   Register result_reg = ToRegister(instr->result());
   4826   Register scratch1 = scratch0();
   4827   DoubleRegister double_input = ToDoubleRegister(instr->value());
   4828   DoubleRegister double_scratch = double_scratch0();
   4829 
   4830   if (instr->truncating()) {
   4831     __ TruncateDoubleToI(result_reg, double_input);
   4832   } else {
   4833     __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
   4834                              double_scratch);
   4835     // Deoptimize if the input wasn't a int32 (inside a double).
   4836     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
   4837     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4838       Label done;
   4839       __ CmpP(result_reg, Operand::Zero());
   4840       __ bne(&done, Label::kNear);
   4841       __ TestDoubleSign(double_input, scratch1);
   4842       DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
   4843       __ bind(&done);
   4844     }
   4845   }
   4846 }
   4847 
   4848 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
   4849   Register result_reg = ToRegister(instr->result());
   4850   Register scratch1 = scratch0();
   4851   DoubleRegister double_input = ToDoubleRegister(instr->value());
   4852   DoubleRegister double_scratch = double_scratch0();
   4853 
   4854   if (instr->truncating()) {
   4855     __ TruncateDoubleToI(result_reg, double_input);
   4856   } else {
   4857     __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
   4858                              double_scratch);
   4859     // Deoptimize if the input wasn't a int32 (inside a double).
   4860     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
   4861     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4862       Label done;
   4863       __ CmpP(result_reg, Operand::Zero());
   4864       __ bne(&done, Label::kNear);
   4865       __ TestDoubleSign(double_input, scratch1);
   4866       DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
   4867       __ bind(&done);
   4868     }
   4869   }
   4870 #if V8_TARGET_ARCH_S390X
   4871   __ SmiTag(result_reg);
   4872 #else
   4873   __ SmiTagCheckOverflow(result_reg, r0);
   4874   DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
   4875 #endif
   4876 }
   4877 
   4878 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   4879   LOperand* input = instr->value();
   4880   __ TestIfSmi(ToRegister(input));
   4881   DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
   4882 }
   4883 
   4884 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
   4885   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   4886     LOperand* input = instr->value();
   4887     __ TestIfSmi(ToRegister(input));
   4888     DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
   4889   }
   4890 }
   4891 
   4892 void LCodeGen::DoCheckArrayBufferNotNeutered(
   4893     LCheckArrayBufferNotNeutered* instr) {
   4894   Register view = ToRegister(instr->view());
   4895   Register scratch = scratch0();
   4896 
   4897   __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
   4898   __ LoadlW(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
   4899   __ And(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
   4900   DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, cr0);
   4901 }
   4902 
   4903 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   4904   Register input = ToRegister(instr->value());
   4905   Register scratch = scratch0();
   4906 
   4907   __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   4908 
   4909   if (instr->hydrogen()->is_interval_check()) {
   4910     InstanceType first;
   4911     InstanceType last;
   4912     instr->hydrogen()->GetCheckInterval(&first, &last);
   4913 
   4914     __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
   4915                       Operand(first));
   4916 
   4917     // If there is only one type in the interval check for equality.
   4918     if (first == last) {
   4919       DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
   4920     } else {
   4921       DeoptimizeIf(lt, instr, DeoptimizeReason::kWrongInstanceType);
   4922       // Omit check for the last type.
   4923       if (last != LAST_TYPE) {
   4924         __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
   4925                           Operand(last));
   4926         DeoptimizeIf(gt, instr, DeoptimizeReason::kWrongInstanceType);
   4927       }
   4928     }
   4929   } else {
   4930     uint8_t mask;
   4931     uint8_t tag;
   4932     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
   4933 
   4934     __ LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   4935 
   4936     if (base::bits::IsPowerOfTwo32(mask)) {
   4937       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
   4938       __ AndP(scratch, Operand(mask));
   4939       DeoptimizeIf(tag == 0 ? ne : eq, instr,
   4940                    DeoptimizeReason::kWrongInstanceType);
   4941     } else {
   4942       __ AndP(scratch, Operand(mask));
   4943       __ CmpP(scratch, Operand(tag));
   4944       DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
   4945     }
   4946   }
   4947 }
   4948 
   4949 void LCodeGen::DoCheckValue(LCheckValue* instr) {
   4950   Register reg = ToRegister(instr->value());
   4951   Handle<HeapObject> object = instr->hydrogen()->object().handle();
   4952   AllowDeferredHandleDereference smi_check;
   4953   if (isolate()->heap()->InNewSpace(*object)) {
   4954     Register reg = ToRegister(instr->value());
   4955     Handle<Cell> cell = isolate()->factory()->NewCell(object);
   4956     __ mov(ip, Operand(cell));
   4957     __ CmpP(reg, FieldMemOperand(ip, Cell::kValueOffset));
   4958   } else {
   4959     __ CmpP(reg, Operand(object));
   4960   }
   4961   DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
   4962 }
   4963 
   4964 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   4965   Register temp = ToRegister(instr->temp());
   4966   {
   4967     PushSafepointRegistersScope scope(this);
   4968     __ push(object);
   4969     __ LoadImmP(cp, Operand::Zero());
   4970     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
   4971     RecordSafepointWithRegisters(instr->pointer_map(), 1,
   4972                                  Safepoint::kNoLazyDeopt);
   4973     __ StoreToSafepointRegisterSlot(r2, temp);
   4974   }
   4975   __ TestIfSmi(temp);
   4976   DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0);
   4977 }
   4978 
   4979 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
   4980   class DeferredCheckMaps final : public LDeferredCode {
   4981    public:
   4982     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
   4983         : LDeferredCode(codegen), instr_(instr), object_(object) {
   4984       SetExit(check_maps());
   4985     }
   4986     void Generate() override {
   4987       codegen()->DoDeferredInstanceMigration(instr_, object_);
   4988     }
   4989     Label* check_maps() { return &check_maps_; }
   4990     LInstruction* instr() override { return instr_; }
   4991 
   4992    private:
   4993     LCheckMaps* instr_;
   4994     Label check_maps_;
   4995     Register object_;
   4996   };
   4997 
   4998   if (instr->hydrogen()->IsStabilityCheck()) {
   4999     const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   5000     for (int i = 0; i < maps->size(); ++i) {
   5001       AddStabilityDependency(maps->at(i).handle());
   5002     }
   5003     return;
   5004   }
   5005 
   5006   LOperand* input = instr->value();
   5007   DCHECK(input->IsRegister());
   5008   Register reg = ToRegister(input);
   5009 
   5010   DeferredCheckMaps* deferred = NULL;
   5011   if (instr->hydrogen()->HasMigrationTarget()) {
   5012     deferred = new (zone()) DeferredCheckMaps(this, instr, reg);
   5013     __ bind(deferred->check_maps());
   5014   }
   5015 
   5016   const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   5017   Label success;
   5018   for (int i = 0; i < maps->size() - 1; i++) {
   5019     Handle<Map> map = maps->at(i).handle();
   5020     __ CompareMap(reg, map, &success);
   5021     __ beq(&success);
   5022   }
   5023 
   5024   Handle<Map> map = maps->at(maps->size() - 1).handle();
   5025   __ CompareMap(reg, map, &success);
   5026   if (instr->hydrogen()->HasMigrationTarget()) {
   5027     __ bne(deferred->entry());
   5028   } else {
   5029     DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
   5030   }
   5031 
   5032   __ bind(&success);
   5033 }
   5034 
   5035 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
   5036   DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
   5037   Register result_reg = ToRegister(instr->result());
   5038   __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
   5039 }
   5040 
   5041 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
   5042   Register unclamped_reg = ToRegister(instr->unclamped());
   5043   Register result_reg = ToRegister(instr->result());
   5044   __ ClampUint8(result_reg, unclamped_reg);
   5045 }
   5046 
   5047 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
   5048   Register scratch = scratch0();
   5049   Register input_reg = ToRegister(instr->unclamped());
   5050   Register result_reg = ToRegister(instr->result());
   5051   DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
   5052   Label is_smi, done, heap_number;
   5053 
   5054   // Both smi and heap number cases are handled.
   5055   __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
   5056 
   5057   // Check for heap number
   5058   __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   5059   __ CmpP(scratch, Operand(factory()->heap_number_map()));
   5060   __ beq(&heap_number, Label::kNear);
   5061 
   5062   // Check for undefined. Undefined is converted to zero for clamping
   5063   // conversions.
   5064   __ CmpP(input_reg, Operand(factory()->undefined_value()));
   5065   DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
   5066   __ LoadImmP(result_reg, Operand::Zero());
   5067   __ b(&done, Label::kNear);
   5068 
   5069   // Heap number
   5070   __ bind(&heap_number);
   5071   __ LoadDouble(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   5072   __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
   5073   __ b(&done, Label::kNear);
   5074 
   5075   // smi
   5076   __ bind(&is_smi);
   5077   __ ClampUint8(result_reg, result_reg);
   5078 
   5079   __ bind(&done);
   5080 }
   5081 
   5082 void LCodeGen::DoAllocate(LAllocate* instr) {
   5083   class DeferredAllocate final : public LDeferredCode {
   5084    public:
   5085     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
   5086         : LDeferredCode(codegen), instr_(instr) {}
   5087     void Generate() override { codegen()->DoDeferredAllocate(instr_); }
   5088     LInstruction* instr() override { return instr_; }
   5089 
   5090    private:
   5091     LAllocate* instr_;
   5092   };
   5093 
   5094   DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
   5095 
   5096   Register result = ToRegister(instr->result());
   5097   Register scratch = ToRegister(instr->temp1());
   5098   Register scratch2 = ToRegister(instr->temp2());
   5099 
   5100   // Allocate memory for the object.
   5101   AllocationFlags flags = NO_ALLOCATION_FLAGS;
   5102   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   5103     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   5104   }
   5105   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5106     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5107     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   5108   }
   5109 
   5110   if (instr->hydrogen()->IsAllocationFoldingDominator()) {
   5111     flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
   5112   }
   5113 
   5114   DCHECK(!instr->hydrogen()->IsAllocationFolded());
   5115 
   5116   if (instr->size()->IsConstantOperand()) {
   5117     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5118     CHECK(size <= kMaxRegularHeapObjectSize);
   5119     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   5120   } else {
   5121     Register size = ToRegister(instr->size());
   5122     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   5123   }
   5124 
   5125   __ bind(deferred->exit());
   5126 
   5127   if (instr->hydrogen()->MustPrefillWithFiller()) {
   5128     if (instr->size()->IsConstantOperand()) {
   5129       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5130       __ LoadIntLiteral(scratch, size);
   5131     } else {
   5132       scratch = ToRegister(instr->size());
   5133     }
   5134     __ lay(scratch, MemOperand(scratch, -kPointerSize));
   5135     Label loop;
   5136     __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
   5137     __ bind(&loop);
   5138     __ StoreP(scratch2, MemOperand(scratch, result, -kHeapObjectTag));
   5139 #if V8_TARGET_ARCH_S390X
   5140     __ lay(scratch, MemOperand(scratch, -kPointerSize));
   5141 #else
   5142     // TODO(joransiu): Improve the following sequence.
   5143     // Need to use AHI instead of LAY as top nibble is not set with LAY, causing
   5144     // incorrect result with the signed compare
   5145     __ AddP(scratch, Operand(-kPointerSize));
   5146 #endif
   5147     __ CmpP(scratch, Operand::Zero());
   5148     __ bge(&loop);
   5149   }
   5150 }
   5151 
   5152 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
   5153   Register result = ToRegister(instr->result());
   5154 
   5155   // TODO(3095996): Get rid of this. For now, we need to make the
   5156   // result register contain a valid pointer because it is already
   5157   // contained in the register pointer map.
   5158   __ LoadSmiLiteral(result, Smi::kZero);
   5159 
   5160   PushSafepointRegistersScope scope(this);
   5161   if (instr->size()->IsRegister()) {
   5162     Register size = ToRegister(instr->size());
   5163     DCHECK(!size.is(result));
   5164     __ SmiTag(size);
   5165     __ push(size);
   5166   } else {
   5167     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5168 #if !V8_TARGET_ARCH_S390X
   5169     if (size >= 0 && size <= Smi::kMaxValue) {
   5170 #endif
   5171       __ Push(Smi::FromInt(size));
   5172 #if !V8_TARGET_ARCH_S390X
   5173     } else {
   5174       // We should never get here at runtime => abort
   5175       __ stop("invalid allocation size");
   5176       return;
   5177     }
   5178 #endif
   5179   }
   5180 
   5181   int flags = AllocateDoubleAlignFlag::encode(
   5182       instr->hydrogen()->MustAllocateDoubleAligned());
   5183   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5184     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5185     flags = AllocateTargetSpace::update(flags, OLD_SPACE);
   5186   } else {
   5187     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
   5188   }
   5189   __ Push(Smi::FromInt(flags));
   5190 
   5191   CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
   5192                           instr->context());
   5193   __ StoreToSafepointRegisterSlot(r2, result);
   5194 
   5195   if (instr->hydrogen()->IsAllocationFoldingDominator()) {
   5196     AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
   5197     if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5198       DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5199       allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
   5200     }
   5201     // If the allocation folding dominator allocate triggered a GC, allocation
   5202     // happend in the runtime. We have to reset the top pointer to virtually
   5203     // undo the allocation.
   5204     ExternalReference allocation_top =
   5205         AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
   5206     Register top_address = scratch0();
   5207     __ SubP(r2, r2, Operand(kHeapObjectTag));
   5208     __ mov(top_address, Operand(allocation_top));
   5209     __ StoreP(r2, MemOperand(top_address));
   5210     __ AddP(r2, r2, Operand(kHeapObjectTag));
   5211   }
   5212 }
   5213 
   5214 void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
   5215   DCHECK(instr->hydrogen()->IsAllocationFolded());
   5216   DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
   5217   Register result = ToRegister(instr->result());
   5218   Register scratch1 = ToRegister(instr->temp1());
   5219   Register scratch2 = ToRegister(instr->temp2());
   5220 
   5221   AllocationFlags flags = ALLOCATION_FOLDED;
   5222   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   5223     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   5224   }
   5225   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5226     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5227     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   5228   }
   5229   if (instr->size()->IsConstantOperand()) {
   5230     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5231     CHECK(size <= kMaxRegularHeapObjectSize);
   5232     __ FastAllocate(size, result, scratch1, scratch2, flags);
   5233   } else {
   5234     Register size = ToRegister(instr->size());
   5235     __ FastAllocate(size, result, scratch1, scratch2, flags);
   5236   }
   5237 }
   5238 
   5239 void LCodeGen::DoTypeof(LTypeof* instr) {
   5240   DCHECK(ToRegister(instr->value()).is(r5));
   5241   DCHECK(ToRegister(instr->result()).is(r2));
   5242   Label end, do_call;
   5243   Register value_register = ToRegister(instr->value());
   5244   __ JumpIfNotSmi(value_register, &do_call);
   5245   __ mov(r2, Operand(isolate()->factory()->number_string()));
   5246   __ b(&end);
   5247   __ bind(&do_call);
   5248   Callable callable = CodeFactory::Typeof(isolate());
   5249   CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
   5250   __ bind(&end);
   5251 }
   5252 
   5253 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   5254   Register input = ToRegister(instr->value());
   5255 
   5256   Condition final_branch_condition =
   5257       EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
   5258                    instr->type_literal());
   5259   if (final_branch_condition != kNoCondition) {
   5260     EmitBranch(instr, final_branch_condition);
   5261   }
   5262 }
   5263 
   5264 Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
   5265                                  Register input, Handle<String> type_name) {
   5266   Condition final_branch_condition = kNoCondition;
   5267   Register scratch = scratch0();
   5268   Factory* factory = isolate()->factory();
   5269   if (String::Equals(type_name, factory->number_string())) {
   5270     __ JumpIfSmi(input, true_label);
   5271     __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   5272     __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
   5273     final_branch_condition = eq;
   5274 
   5275   } else if (String::Equals(type_name, factory->string_string())) {
   5276     __ JumpIfSmi(input, false_label);
   5277     __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
   5278     final_branch_condition = lt;
   5279 
   5280   } else if (String::Equals(type_name, factory->symbol_string())) {
   5281     __ JumpIfSmi(input, false_label);
   5282     __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
   5283     final_branch_condition = eq;
   5284 
   5285   } else if (String::Equals(type_name, factory->boolean_string())) {
   5286     __ CompareRoot(input, Heap::kTrueValueRootIndex);
   5287     __ beq(true_label);
   5288     __ CompareRoot(input, Heap::kFalseValueRootIndex);
   5289     final_branch_condition = eq;
   5290 
   5291   } else if (String::Equals(type_name, factory->undefined_string())) {
   5292     __ CompareRoot(input, Heap::kNullValueRootIndex);
   5293     __ beq(false_label);
   5294     __ JumpIfSmi(input, false_label);
   5295     // Check for undetectable objects => true.
   5296     __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   5297     __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5298     __ ExtractBit(r0, scratch, Map::kIsUndetectable);
   5299     __ CmpP(r0, Operand::Zero());
   5300     final_branch_condition = ne;
   5301 
   5302   } else if (String::Equals(type_name, factory->function_string())) {
   5303     __ JumpIfSmi(input, false_label);
   5304     __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   5305     __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5306     __ AndP(scratch, scratch,
   5307             Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
   5308     __ CmpP(scratch, Operand(1 << Map::kIsCallable));
   5309     final_branch_condition = eq;
   5310 
   5311   } else if (String::Equals(type_name, factory->object_string())) {
   5312     __ JumpIfSmi(input, false_label);
   5313     __ CompareRoot(input, Heap::kNullValueRootIndex);
   5314     __ beq(true_label);
   5315     STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
   5316     __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
   5317     __ blt(false_label);
   5318     // Check for callable or undetectable objects => false.
   5319     __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5320     __ AndP(r0, scratch,
   5321             Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
   5322     __ CmpP(r0, Operand::Zero());
   5323     final_branch_condition = eq;
   5324 
   5325 // clang-format off
   5326 #define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type)        \
   5327   } else if (String::Equals(type_name, factory->type##_string())) {  \
   5328     __ JumpIfSmi(input, false_label);                                \
   5329     __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); \
   5330     __ CompareRoot(scratch, Heap::k##Type##MapRootIndex);            \
   5331     final_branch_condition = eq;
   5332   SIMD128_TYPES(SIMD128_TYPE)
   5333 #undef SIMD128_TYPE
   5334     // clang-format on
   5335 
   5336   } else {
   5337     __ b(false_label);
   5338   }
   5339 
   5340   return final_branch_condition;
   5341 }
   5342 
   5343 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
   5344   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
   5345     // Ensure that we have enough space after the previous lazy-bailout
   5346     // instruction for patching the code here.
   5347     int current_pc = masm()->pc_offset();
   5348     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
   5349       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
   5350       DCHECK_EQ(0, padding_size % 2);
   5351       while (padding_size > 0) {
   5352         __ nop();
   5353         padding_size -= 2;
   5354       }
   5355     }
   5356   }
   5357   last_lazy_deopt_pc_ = masm()->pc_offset();
   5358 }
   5359 
   5360 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   5361   last_lazy_deopt_pc_ = masm()->pc_offset();
   5362   DCHECK(instr->HasEnvironment());
   5363   LEnvironment* env = instr->environment();
   5364   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5365   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5366 }
   5367 
   5368 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
   5369   Deoptimizer::BailoutType type = instr->hydrogen()->type();
   5370   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
   5371   // needed return address), even though the implementation of LAZY and EAGER is
   5372   // now identical. When LAZY is eventually completely folded into EAGER, remove
   5373   // the special case below.
   5374   if (info()->IsStub() && type == Deoptimizer::EAGER) {
   5375     type = Deoptimizer::LAZY;
   5376   }
   5377 
   5378   DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
   5379 }
   5380 
   5381 void LCodeGen::DoDummy(LDummy* instr) {
   5382   // Nothing to see here, move on!
   5383 }
   5384 
   5385 void LCodeGen::DoDummyUse(LDummyUse* instr) {
   5386   // Nothing to see here, move on!
   5387 }
   5388 
   5389 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   5390   PushSafepointRegistersScope scope(this);
   5391   LoadContextFromDeferred(instr->context());
   5392   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   5393   RecordSafepointWithLazyDeopt(
   5394       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   5395   DCHECK(instr->HasEnvironment());
   5396   LEnvironment* env = instr->environment();
   5397   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5398 }
   5399 
   5400 void LCodeGen::DoStackCheck(LStackCheck* instr) {
   5401   class DeferredStackCheck final : public LDeferredCode {
   5402    public:
   5403     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
   5404         : LDeferredCode(codegen), instr_(instr) {}
   5405     void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
   5406     LInstruction* instr() override { return instr_; }
   5407 
   5408    private:
   5409     LStackCheck* instr_;
   5410   };
   5411 
   5412   DCHECK(instr->HasEnvironment());
   5413   LEnvironment* env = instr->environment();
   5414   // There is no LLazyBailout instruction for stack-checks. We have to
   5415   // prepare for lazy deoptimization explicitly here.
   5416   if (instr->hydrogen()->is_function_entry()) {
   5417     // Perform stack overflow check.
   5418     Label done;
   5419     __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
   5420     __ bge(&done, Label::kNear);
   5421     DCHECK(instr->context()->IsRegister());
   5422     DCHECK(ToRegister(instr->context()).is(cp));
   5423     CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
   5424              instr);
   5425     __ bind(&done);
   5426   } else {
   5427     DCHECK(instr->hydrogen()->is_backwards_branch());
   5428     // Perform stack overflow check if this goto needs it before jumping.
   5429     DeferredStackCheck* deferred_stack_check =
   5430         new (zone()) DeferredStackCheck(this, instr);
   5431     __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
   5432     __ blt(deferred_stack_check->entry());
   5433     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   5434     __ bind(instr->done_label());
   5435     deferred_stack_check->SetExit(instr->done_label());
   5436     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5437     // Don't record a deoptimization index for the safepoint here.
   5438     // This will be done explicitly when emitting call and the safepoint in
   5439     // the deferred code.
   5440   }
   5441 }
   5442 
   5443 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
   5444   // This is a pseudo-instruction that ensures that the environment here is
   5445   // properly registered for deoptimization and records the assembler's PC
   5446   // offset.
   5447   LEnvironment* environment = instr->environment();
   5448 
   5449   // If the environment were already registered, we would have no way of
   5450   // backpatching it with the spill slot operands.
   5451   DCHECK(!environment->HasBeenRegistered());
   5452   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   5453 
   5454   GenerateOsrPrologue();
   5455 }
   5456 
   5457 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   5458   Label use_cache, call_runtime;
   5459   __ CheckEnumCache(&call_runtime);
   5460 
   5461   __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
   5462   __ b(&use_cache);
   5463 
   5464   // Get the set of properties to enumerate.
   5465   __ bind(&call_runtime);
   5466   __ push(r2);
   5467   CallRuntime(Runtime::kForInEnumerate, instr);
   5468   __ bind(&use_cache);
   5469 }
   5470 
   5471 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
   5472   Register map = ToRegister(instr->map());
   5473   Register result = ToRegister(instr->result());
   5474   Label load_cache, done;
   5475   __ EnumLength(result, map);
   5476   __ CmpSmiLiteral(result, Smi::kZero, r0);
   5477   __ bne(&load_cache, Label::kNear);
   5478   __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
   5479   __ b(&done, Label::kNear);
   5480 
   5481   __ bind(&load_cache);
   5482   __ LoadInstanceDescriptors(map, result);
   5483   __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
   5484   __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
   5485   __ CmpP(result, Operand::Zero());
   5486   DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache);
   5487 
   5488   __ bind(&done);
   5489 }
   5490 
   5491 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
   5492   Register object = ToRegister(instr->value());
   5493   Register map = ToRegister(instr->map());
   5494   __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
   5495   __ CmpP(map, scratch0());
   5496   DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
   5497 }
   5498 
   5499 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
   5500                                            Register result, Register object,
   5501                                            Register index) {
   5502   PushSafepointRegistersScope scope(this);
   5503   __ Push(object, index);
   5504   __ LoadImmP(cp, Operand::Zero());
   5505   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
   5506   RecordSafepointWithRegisters(instr->pointer_map(), 2,
   5507                                Safepoint::kNoLazyDeopt);
   5508   __ StoreToSafepointRegisterSlot(r2, result);
   5509 }
   5510 
   5511 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
   5512   class DeferredLoadMutableDouble final : public LDeferredCode {
   5513    public:
   5514     DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
   5515                               Register result, Register object, Register index)
   5516         : LDeferredCode(codegen),
   5517           instr_(instr),
   5518           result_(result),
   5519           object_(object),
   5520           index_(index) {}
   5521     void Generate() override {
   5522       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
   5523     }
   5524     LInstruction* instr() override { return instr_; }
   5525 
   5526    private:
   5527     LLoadFieldByIndex* instr_;
   5528     Register result_;
   5529     Register object_;
   5530     Register index_;
   5531   };
   5532 
   5533   Register object = ToRegister(instr->object());
   5534   Register index = ToRegister(instr->index());
   5535   Register result = ToRegister(instr->result());
   5536   Register scratch = scratch0();
   5537 
   5538   DeferredLoadMutableDouble* deferred;
   5539   deferred = new (zone())
   5540       DeferredLoadMutableDouble(this, instr, result, object, index);
   5541 
   5542   Label out_of_object, done;
   5543 
   5544   __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
   5545   __ bne(deferred->entry());
   5546   __ ShiftRightArithP(index, index, Operand(1));
   5547 
   5548   __ CmpP(index, Operand::Zero());
   5549   __ blt(&out_of_object, Label::kNear);
   5550 
   5551   __ SmiToPtrArrayOffset(r0, index);
   5552   __ AddP(scratch, object, r0);
   5553   __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
   5554 
   5555   __ b(&done, Label::kNear);
   5556 
   5557   __ bind(&out_of_object);
   5558   __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   5559   // Index is equal to negated out of object property index plus 1.
   5560   __ SmiToPtrArrayOffset(r0, index);
   5561   __ SubP(scratch, result, r0);
   5562   __ LoadP(result,
   5563            FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
   5564   __ bind(deferred->exit());
   5565   __ bind(&done);
   5566 }
   5567 
   5568 #undef __
   5569 
   5570 }  // namespace internal
   5571 }  // namespace v8
   5572