Home | History | Annotate | Download | only in s390
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 //
      3 // Use of this source code is governed by a BSD-style license that can be
      4 // found in the LICENSE file.
      5 
      6 #include "src/crankshaft/s390/lithium-codegen-s390.h"
      7 
      8 #include "src/base/bits.h"
      9 #include "src/builtins/builtins-constructor.h"
     10 #include "src/code-factory.h"
     11 #include "src/code-stubs.h"
     12 #include "src/crankshaft/hydrogen-osr.h"
     13 #include "src/crankshaft/s390/lithium-gap-resolver-s390.h"
     14 #include "src/ic/ic.h"
     15 #include "src/ic/stub-cache.h"
     16 
     17 namespace v8 {
     18 namespace internal {
     19 
     20 class SafepointGenerator final : public CallWrapper {
     21  public:
     22   SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
     23                      Safepoint::DeoptMode mode)
     24       : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
     25   virtual ~SafepointGenerator() {}
     26 
     27   void BeforeCall(int call_size) const override {}
     28 
     29   void AfterCall() const override {
     30     codegen_->RecordSafepoint(pointers_, deopt_mode_);
     31   }
     32 
     33  private:
     34   LCodeGen* codegen_;
     35   LPointerMap* pointers_;
     36   Safepoint::DeoptMode deopt_mode_;
     37 };
     38 
     39 LCodeGen::PushSafepointRegistersScope::PushSafepointRegistersScope(
     40     LCodeGen* codegen)
     41     : codegen_(codegen) {
     42   DCHECK(codegen_->info()->is_calling());
     43   DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
     44   codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
     45   StoreRegistersStateStub stub(codegen_->isolate());
     46   codegen_->masm_->CallStub(&stub);
     47 }
     48 
     49 LCodeGen::PushSafepointRegistersScope::~PushSafepointRegistersScope() {
     50   DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
     51   RestoreRegistersStateStub stub(codegen_->isolate());
     52   codegen_->masm_->CallStub(&stub);
     53   codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
     54 }
     55 
     56 #define __ masm()->
     57 
     58 bool LCodeGen::GenerateCode() {
     59   LPhase phase("Z_Code generation", chunk());
     60   DCHECK(is_unused());
     61   status_ = GENERATING;
     62 
     63   // Open a frame scope to indicate that there is a frame on the stack.  The
     64   // NONE indicates that the scope shouldn't actually generate code to set up
     65   // the frame (that is done in GeneratePrologue).
     66   FrameScope frame_scope(masm_, StackFrame::NONE);
     67 
     68   return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
     69          GenerateJumpTable() && GenerateSafepointTable();
     70 }
     71 
     72 void LCodeGen::FinishCode(Handle<Code> code) {
     73   DCHECK(is_done());
     74   code->set_stack_slots(GetTotalFrameSlotCount());
     75   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
     76   PopulateDeoptimizationData(code);
     77 }
     78 
     79 void LCodeGen::SaveCallerDoubles() {
     80   DCHECK(info()->saves_caller_doubles());
     81   DCHECK(NeedsEagerFrame());
     82   Comment(";;; Save clobbered callee double registers");
     83   int count = 0;
     84   BitVector* doubles = chunk()->allocated_double_registers();
     85   BitVector::Iterator save_iterator(doubles);
     86   while (!save_iterator.Done()) {
     87     __ StoreDouble(DoubleRegister::from_code(save_iterator.Current()),
     88                    MemOperand(sp, count * kDoubleSize));
     89     save_iterator.Advance();
     90     count++;
     91   }
     92 }
     93 
     94 void LCodeGen::RestoreCallerDoubles() {
     95   DCHECK(info()->saves_caller_doubles());
     96   DCHECK(NeedsEagerFrame());
     97   Comment(";;; Restore clobbered callee double registers");
     98   BitVector* doubles = chunk()->allocated_double_registers();
     99   BitVector::Iterator save_iterator(doubles);
    100   int count = 0;
    101   while (!save_iterator.Done()) {
    102     __ LoadDouble(DoubleRegister::from_code(save_iterator.Current()),
    103                   MemOperand(sp, count * kDoubleSize));
    104     save_iterator.Advance();
    105     count++;
    106   }
    107 }
    108 
    109 bool LCodeGen::GeneratePrologue() {
    110   DCHECK(is_generating());
    111 
    112   if (info()->IsOptimizing()) {
    113     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
    114 
    115     // r3: Callee's JS function.
    116     // cp: Callee's context.
    117     // fp: Caller's frame pointer.
    118     // lr: Caller's pc.
    119     // ip: Our own function entry (required by the prologue)
    120   }
    121 
    122   int prologue_offset = masm_->pc_offset();
    123 
    124   if (prologue_offset) {
    125     // Prologue logic requires its starting address in ip and the
    126     // corresponding offset from the function entry.  Need to add
    127     // 4 bytes for the size of AHI/AGHI that AddP expands into.
    128     prologue_offset += sizeof(FourByteInstr);
    129     __ AddP(ip, ip, Operand(prologue_offset));
    130   }
    131   info()->set_prologue_offset(prologue_offset);
    132   if (NeedsEagerFrame()) {
    133     if (info()->IsStub()) {
    134       __ StubPrologue(StackFrame::STUB, ip, prologue_offset);
    135     } else {
    136       __ Prologue(info()->GeneratePreagedPrologue(), ip, prologue_offset);
    137     }
    138     frame_is_built_ = true;
    139   }
    140 
    141   // Reserve space for the stack slots needed by the code.
    142   int slots = GetStackSlotCount();
    143   if (slots > 0) {
    144     __ lay(sp, MemOperand(sp, -(slots * kPointerSize)));
    145     if (FLAG_debug_code) {
    146       __ Push(r2, r3);
    147       __ mov(r2, Operand(slots * kPointerSize));
    148       __ mov(r3, Operand(kSlotsZapValue));
    149       Label loop;
    150       __ bind(&loop);
    151       __ StoreP(r3, MemOperand(sp, r2, kPointerSize));
    152       __ lay(r2, MemOperand(r2, -kPointerSize));
    153       __ CmpP(r2, Operand::Zero());
    154       __ bne(&loop);
    155       __ Pop(r2, r3);
    156     }
    157   }
    158 
    159   if (info()->saves_caller_doubles()) {
    160     SaveCallerDoubles();
    161   }
    162   return !is_aborted();
    163 }
    164 
    165 void LCodeGen::DoPrologue(LPrologue* instr) {
    166   Comment(";;; Prologue begin");
    167 
    168   // Possibly allocate a local context.
    169   if (info()->scope()->NeedsContext()) {
    170     Comment(";;; Allocate local context");
    171     bool need_write_barrier = true;
    172     // Argument to NewContext is the function, which is in r3.
    173     int slots = info()->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
    174     Safepoint::DeoptMode deopt_mode = Safepoint::kNoLazyDeopt;
    175     if (info()->scope()->is_script_scope()) {
    176       __ push(r3);
    177       __ Push(info()->scope()->scope_info());
    178       __ CallRuntime(Runtime::kNewScriptContext);
    179       deopt_mode = Safepoint::kLazyDeopt;
    180     } else {
    181       if (slots <=
    182           ConstructorBuiltinsAssembler::MaximumFunctionContextSlots()) {
    183         Callable callable = CodeFactory::FastNewFunctionContext(
    184             isolate(), info()->scope()->scope_type());
    185         __ mov(FastNewFunctionContextDescriptor::SlotsRegister(),
    186                Operand(slots));
    187         __ Call(callable.code(), RelocInfo::CODE_TARGET);
    188         // Result of the FastNewFunctionContext builtin is always in new space.
    189         need_write_barrier = false;
    190       } else {
    191         __ push(r3);
    192         __ Push(Smi::FromInt(info()->scope()->scope_type()));
    193         __ CallRuntime(Runtime::kNewFunctionContext);
    194       }
    195     }
    196     RecordSafepoint(deopt_mode);
    197 
    198     // Context is returned in both r2 and cp.  It replaces the context
    199     // passed to us.  It's saved in the stack and kept live in cp.
    200     __ LoadRR(cp, r2);
    201     __ StoreP(r2, MemOperand(fp, StandardFrameConstants::kContextOffset));
    202     // Copy any necessary parameters into the context.
    203     int num_parameters = info()->scope()->num_parameters();
    204     int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
    205     for (int i = first_parameter; i < num_parameters; i++) {
    206       Variable* var = (i == -1) ? info()->scope()->receiver()
    207                                 : info()->scope()->parameter(i);
    208       if (var->IsContextSlot()) {
    209         int parameter_offset = StandardFrameConstants::kCallerSPOffset +
    210                                (num_parameters - 1 - i) * kPointerSize;
    211         // Load parameter from stack.
    212         __ LoadP(r2, MemOperand(fp, parameter_offset));
    213         // Store it in the context.
    214         MemOperand target = ContextMemOperand(cp, var->index());
    215         __ StoreP(r2, target);
    216         // Update the write barrier. This clobbers r5 and r2.
    217         if (need_write_barrier) {
    218           __ RecordWriteContextSlot(cp, target.offset(), r2, r5,
    219                                     GetLinkRegisterState(), kSaveFPRegs);
    220         } else if (FLAG_debug_code) {
    221           Label done;
    222           __ JumpIfInNewSpace(cp, r2, &done);
    223           __ Abort(kExpectedNewSpaceObject);
    224           __ bind(&done);
    225         }
    226       }
    227     }
    228     Comment(";;; End allocate local context");
    229   }
    230 
    231   Comment(";;; Prologue end");
    232 }
    233 
    234 void LCodeGen::GenerateOsrPrologue() {
    235   // Generate the OSR entry prologue at the first unknown OSR value, or if there
    236   // are none, at the OSR entrypoint instruction.
    237   if (osr_pc_offset_ >= 0) return;
    238 
    239   osr_pc_offset_ = masm()->pc_offset();
    240 
    241   // Adjust the frame size, subsuming the unoptimized frame into the
    242   // optimized frame.
    243   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
    244   DCHECK(slots >= 0);
    245   __ lay(sp, MemOperand(sp, -slots * kPointerSize));
    246 }
    247 
    248 void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
    249   if (instr->IsCall()) {
    250     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
    251   }
    252   if (!instr->IsLazyBailout() && !instr->IsGap()) {
    253     safepoints_.BumpLastLazySafepointIndex();
    254   }
    255 }
    256 
    257 bool LCodeGen::GenerateDeferredCode() {
    258   DCHECK(is_generating());
    259   if (deferred_.length() > 0) {
    260     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
    261       LDeferredCode* code = deferred_[i];
    262 
    263       HValue* value =
    264           instructions_->at(code->instruction_index())->hydrogen_value();
    265       RecordAndWritePosition(value->position());
    266 
    267       Comment(
    268           ";;; <@%d,#%d> "
    269           "-------------------- Deferred %s --------------------",
    270           code->instruction_index(), code->instr()->hydrogen_value()->id(),
    271           code->instr()->Mnemonic());
    272       __ bind(code->entry());
    273       if (NeedsDeferredFrame()) {
    274         Comment(";;; Build frame");
    275         DCHECK(!frame_is_built_);
    276         DCHECK(info()->IsStub());
    277         frame_is_built_ = true;
    278         __ Load(scratch0(),
    279                 Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
    280         __ PushCommonFrame(scratch0());
    281         Comment(";;; Deferred code");
    282       }
    283       code->Generate();
    284       if (NeedsDeferredFrame()) {
    285         Comment(";;; Destroy frame");
    286         DCHECK(frame_is_built_);
    287         __ PopCommonFrame(scratch0());
    288         frame_is_built_ = false;
    289       }
    290       __ b(code->exit());
    291     }
    292   }
    293 
    294   return !is_aborted();
    295 }
    296 
    297 bool LCodeGen::GenerateJumpTable() {
    298   // Check that the jump table is accessible from everywhere in the function
    299   // code, i.e. that offsets in halfworld to the table can be encoded in the
    300   // 32-bit signed immediate of a branch instruction.
    301   // To simplify we consider the code size from the first instruction to the
    302   // end of the jump table. We also don't consider the pc load delta.
    303   // Each entry in the jump table generates one instruction and inlines one
    304   // 32bit data after it.
    305   // TODO(joransiu): The Int24 condition can likely be relaxed for S390
    306   if (!is_int24(masm()->pc_offset() + jump_table_.length() * 7)) {
    307     Abort(kGeneratedCodeIsTooLarge);
    308   }
    309 
    310   if (jump_table_.length() > 0) {
    311     Label needs_frame, call_deopt_entry;
    312 
    313     Comment(";;; -------------------- Jump table --------------------");
    314     Address base = jump_table_[0].address;
    315 
    316     Register entry_offset = scratch0();
    317 
    318     int length = jump_table_.length();
    319     for (int i = 0; i < length; i++) {
    320       Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
    321       __ bind(&table_entry->label);
    322 
    323       DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
    324       Address entry = table_entry->address;
    325       DeoptComment(table_entry->deopt_info);
    326 
    327       // Second-level deopt table entries are contiguous and small, so instead
    328       // of loading the full, absolute address of each one, load an immediate
    329       // offset which will be added to the base address later.
    330       __ mov(entry_offset, Operand(entry - base));
    331 
    332       if (table_entry->needs_frame) {
    333         DCHECK(!info()->saves_caller_doubles());
    334         Comment(";;; call deopt with frame");
    335         __ PushCommonFrame();
    336         __ b(r14, &needs_frame);
    337       } else {
    338         __ b(r14, &call_deopt_entry);
    339       }
    340     }
    341 
    342     if (needs_frame.is_linked()) {
    343       __ bind(&needs_frame);
    344       // This variant of deopt can only be used with stubs. Since we don't
    345       // have a function pointer to install in the stack frame that we're
    346       // building, install a special marker there instead.
    347       DCHECK(info()->IsStub());
    348       __ Load(ip, Operand(StackFrame::TypeToMarker(StackFrame::STUB)));
    349       __ push(ip);
    350       DCHECK(info()->IsStub());
    351     }
    352 
    353     Comment(";;; call deopt");
    354     __ bind(&call_deopt_entry);
    355 
    356     if (info()->saves_caller_doubles()) {
    357       DCHECK(info()->IsStub());
    358       RestoreCallerDoubles();
    359     }
    360 
    361     // Add the base address to the offset previously loaded in entry_offset.
    362     __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
    363     __ AddP(ip, entry_offset, ip);
    364     __ Jump(ip);
    365   }
    366 
    367   // The deoptimization jump table is the last part of the instruction
    368   // sequence. Mark the generated code as done unless we bailed out.
    369   if (!is_aborted()) status_ = DONE;
    370   return !is_aborted();
    371 }
    372 
    373 bool LCodeGen::GenerateSafepointTable() {
    374   DCHECK(is_done());
    375   safepoints_.Emit(masm(), GetTotalFrameSlotCount());
    376   return !is_aborted();
    377 }
    378 
    379 Register LCodeGen::ToRegister(int code) const {
    380   return Register::from_code(code);
    381 }
    382 
    383 DoubleRegister LCodeGen::ToDoubleRegister(int code) const {
    384   return DoubleRegister::from_code(code);
    385 }
    386 
    387 Register LCodeGen::ToRegister(LOperand* op) const {
    388   DCHECK(op->IsRegister());
    389   return ToRegister(op->index());
    390 }
    391 
    392 Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
    393   if (op->IsRegister()) {
    394     return ToRegister(op->index());
    395   } else if (op->IsConstantOperand()) {
    396     LConstantOperand* const_op = LConstantOperand::cast(op);
    397     HConstant* constant = chunk_->LookupConstant(const_op);
    398     Handle<Object> literal = constant->handle(isolate());
    399     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    400     if (r.IsInteger32()) {
    401       AllowDeferredHandleDereference get_number;
    402       DCHECK(literal->IsNumber());
    403       __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
    404     } else if (r.IsDouble()) {
    405       Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
    406     } else {
    407       DCHECK(r.IsSmiOrTagged());
    408       __ Move(scratch, literal);
    409     }
    410     return scratch;
    411   } else if (op->IsStackSlot()) {
    412     __ LoadP(scratch, ToMemOperand(op));
    413     return scratch;
    414   }
    415   UNREACHABLE();
    416   return scratch;
    417 }
    418 
    419 void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
    420                                        Register dst) {
    421   DCHECK(IsInteger32(const_op));
    422   HConstant* constant = chunk_->LookupConstant(const_op);
    423   int32_t value = constant->Integer32Value();
    424   if (IsSmi(const_op)) {
    425     __ LoadSmiLiteral(dst, Smi::FromInt(value));
    426   } else {
    427     __ LoadIntLiteral(dst, value);
    428   }
    429 }
    430 
    431 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
    432   DCHECK(op->IsDoubleRegister());
    433   return ToDoubleRegister(op->index());
    434 }
    435 
    436 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
    437   HConstant* constant = chunk_->LookupConstant(op);
    438   DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
    439   return constant->handle(isolate());
    440 }
    441 
    442 bool LCodeGen::IsInteger32(LConstantOperand* op) const {
    443   return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
    444 }
    445 
    446 bool LCodeGen::IsSmi(LConstantOperand* op) const {
    447   return chunk_->LookupLiteralRepresentation(op).IsSmi();
    448 }
    449 
    450 int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
    451   return ToRepresentation(op, Representation::Integer32());
    452 }
    453 
    454 intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
    455                                     const Representation& r) const {
    456   HConstant* constant = chunk_->LookupConstant(op);
    457   int32_t value = constant->Integer32Value();
    458   if (r.IsInteger32()) return value;
    459   DCHECK(r.IsSmiOrTagged());
    460   return reinterpret_cast<intptr_t>(Smi::FromInt(value));
    461 }
    462 
    463 Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
    464   HConstant* constant = chunk_->LookupConstant(op);
    465   return Smi::FromInt(constant->Integer32Value());
    466 }
    467 
    468 double LCodeGen::ToDouble(LConstantOperand* op) const {
    469   HConstant* constant = chunk_->LookupConstant(op);
    470   DCHECK(constant->HasDoubleValue());
    471   return constant->DoubleValue();
    472 }
    473 
    474 Operand LCodeGen::ToOperand(LOperand* op) {
    475   if (op->IsConstantOperand()) {
    476     LConstantOperand* const_op = LConstantOperand::cast(op);
    477     HConstant* constant = chunk()->LookupConstant(const_op);
    478     Representation r = chunk_->LookupLiteralRepresentation(const_op);
    479     if (r.IsSmi()) {
    480       DCHECK(constant->HasSmiValue());
    481       return Operand(Smi::FromInt(constant->Integer32Value()));
    482     } else if (r.IsInteger32()) {
    483       DCHECK(constant->HasInteger32Value());
    484       return Operand(constant->Integer32Value());
    485     } else if (r.IsDouble()) {
    486       Abort(kToOperandUnsupportedDoubleImmediate);
    487     }
    488     DCHECK(r.IsTagged());
    489     return Operand(constant->handle(isolate()));
    490   } else if (op->IsRegister()) {
    491     return Operand(ToRegister(op));
    492   } else if (op->IsDoubleRegister()) {
    493     Abort(kToOperandIsDoubleRegisterUnimplemented);
    494     return Operand::Zero();
    495   }
    496   // Stack slots not implemented, use ToMemOperand instead.
    497   UNREACHABLE();
    498   return Operand::Zero();
    499 }
    500 
    501 static int ArgumentsOffsetWithoutFrame(int index) {
    502   DCHECK(index < 0);
    503   return -(index + 1) * kPointerSize;
    504 }
    505 
    506 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
    507   DCHECK(!op->IsRegister());
    508   DCHECK(!op->IsDoubleRegister());
    509   DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
    510   if (NeedsEagerFrame()) {
    511     return MemOperand(fp, FrameSlotToFPOffset(op->index()));
    512   } else {
    513     // Retrieve parameter without eager stack-frame relative to the
    514     // stack-pointer.
    515     return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
    516   }
    517 }
    518 
    519 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
    520   DCHECK(op->IsDoubleStackSlot());
    521   if (NeedsEagerFrame()) {
    522     return MemOperand(fp, FrameSlotToFPOffset(op->index()) + kPointerSize);
    523   } else {
    524     // Retrieve parameter without eager stack-frame relative to the
    525     // stack-pointer.
    526     return MemOperand(sp,
    527                       ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
    528   }
    529 }
    530 
    531 void LCodeGen::WriteTranslation(LEnvironment* environment,
    532                                 Translation* translation) {
    533   if (environment == NULL) return;
    534 
    535   // The translation includes one command per value in the environment.
    536   int translation_size = environment->translation_size();
    537 
    538   WriteTranslation(environment->outer(), translation);
    539   WriteTranslationFrame(environment, translation);
    540 
    541   int object_index = 0;
    542   int dematerialized_index = 0;
    543   for (int i = 0; i < translation_size; ++i) {
    544     LOperand* value = environment->values()->at(i);
    545     AddToTranslation(
    546         environment, translation, value, environment->HasTaggedValueAt(i),
    547         environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
    548   }
    549 }
    550 
    551 void LCodeGen::AddToTranslation(LEnvironment* environment,
    552                                 Translation* translation, LOperand* op,
    553                                 bool is_tagged, bool is_uint32,
    554                                 int* object_index_pointer,
    555                                 int* dematerialized_index_pointer) {
    556   if (op == LEnvironment::materialization_marker()) {
    557     int object_index = (*object_index_pointer)++;
    558     if (environment->ObjectIsDuplicateAt(object_index)) {
    559       int dupe_of = environment->ObjectDuplicateOfAt(object_index);
    560       translation->DuplicateObject(dupe_of);
    561       return;
    562     }
    563     int object_length = environment->ObjectLengthAt(object_index);
    564     if (environment->ObjectIsArgumentsAt(object_index)) {
    565       translation->BeginArgumentsObject(object_length);
    566     } else {
    567       translation->BeginCapturedObject(object_length);
    568     }
    569     int dematerialized_index = *dematerialized_index_pointer;
    570     int env_offset = environment->translation_size() + dematerialized_index;
    571     *dematerialized_index_pointer += object_length;
    572     for (int i = 0; i < object_length; ++i) {
    573       LOperand* value = environment->values()->at(env_offset + i);
    574       AddToTranslation(environment, translation, value,
    575                        environment->HasTaggedValueAt(env_offset + i),
    576                        environment->HasUint32ValueAt(env_offset + i),
    577                        object_index_pointer, dematerialized_index_pointer);
    578     }
    579     return;
    580   }
    581 
    582   if (op->IsStackSlot()) {
    583     int index = op->index();
    584     if (is_tagged) {
    585       translation->StoreStackSlot(index);
    586     } else if (is_uint32) {
    587       translation->StoreUint32StackSlot(index);
    588     } else {
    589       translation->StoreInt32StackSlot(index);
    590     }
    591   } else if (op->IsDoubleStackSlot()) {
    592     int index = op->index();
    593     translation->StoreDoubleStackSlot(index);
    594   } else if (op->IsRegister()) {
    595     Register reg = ToRegister(op);
    596     if (is_tagged) {
    597       translation->StoreRegister(reg);
    598     } else if (is_uint32) {
    599       translation->StoreUint32Register(reg);
    600     } else {
    601       translation->StoreInt32Register(reg);
    602     }
    603   } else if (op->IsDoubleRegister()) {
    604     DoubleRegister reg = ToDoubleRegister(op);
    605     translation->StoreDoubleRegister(reg);
    606   } else if (op->IsConstantOperand()) {
    607     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
    608     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
    609     translation->StoreLiteral(src_index);
    610   } else {
    611     UNREACHABLE();
    612   }
    613 }
    614 
    615 void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
    616                         LInstruction* instr) {
    617   CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
    618 }
    619 
    620 void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
    621                                LInstruction* instr,
    622                                SafepointMode safepoint_mode) {
    623   DCHECK(instr != NULL);
    624   __ Call(code, mode);
    625   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
    626 
    627   // Signal that we don't inline smi code before these stubs in the
    628   // optimizing code generator.
    629   if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
    630     __ nop();
    631   }
    632 }
    633 
    634 void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
    635                            LInstruction* instr, SaveFPRegsMode save_doubles) {
    636   DCHECK(instr != NULL);
    637 
    638   __ CallRuntime(function, num_arguments, save_doubles);
    639 
    640   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
    641 }
    642 
    643 void LCodeGen::LoadContextFromDeferred(LOperand* context) {
    644   if (context->IsRegister()) {
    645     __ Move(cp, ToRegister(context));
    646   } else if (context->IsStackSlot()) {
    647     __ LoadP(cp, ToMemOperand(context));
    648   } else if (context->IsConstantOperand()) {
    649     HConstant* constant =
    650         chunk_->LookupConstant(LConstantOperand::cast(context));
    651     __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
    652   } else {
    653     UNREACHABLE();
    654   }
    655 }
    656 
    657 void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
    658                                        LInstruction* instr, LOperand* context) {
    659   LoadContextFromDeferred(context);
    660   __ CallRuntimeSaveDoubles(id);
    661   RecordSafepointWithRegisters(instr->pointer_map(), argc,
    662                                Safepoint::kNoLazyDeopt);
    663 }
    664 
    665 void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
    666                                                     Safepoint::DeoptMode mode) {
    667   environment->set_has_been_used();
    668   if (!environment->HasBeenRegistered()) {
    669     // Physical stack frame layout:
    670     // -x ............. -4  0 ..................................... y
    671     // [incoming arguments] [spill slots] [pushed outgoing arguments]
    672 
    673     // Layout of the environment:
    674     // 0 ..................................................... size-1
    675     // [parameters] [locals] [expression stack including arguments]
    676 
    677     // Layout of the translation:
    678     // 0 ........................................................ size - 1 + 4
    679     // [expression stack including arguments] [locals] [4 words] [parameters]
    680     // |>------------  translation_size ------------<|
    681 
    682     int frame_count = 0;
    683     int jsframe_count = 0;
    684     for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
    685       ++frame_count;
    686       if (e->frame_type() == JS_FUNCTION) {
    687         ++jsframe_count;
    688       }
    689     }
    690     Translation translation(&translations_, frame_count, jsframe_count, zone());
    691     WriteTranslation(environment, &translation);
    692     int deoptimization_index = deoptimizations_.length();
    693     int pc_offset = masm()->pc_offset();
    694     environment->Register(deoptimization_index, translation.index(),
    695                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
    696     deoptimizations_.Add(environment, zone());
    697   }
    698 }
    699 
    700 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
    701                             DeoptimizeReason deopt_reason,
    702                             Deoptimizer::BailoutType bailout_type,
    703                             CRegister cr) {
    704   LEnvironment* environment = instr->environment();
    705   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
    706   DCHECK(environment->HasBeenRegistered());
    707   int id = environment->deoptimization_index();
    708   Address entry =
    709       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
    710   if (entry == NULL) {
    711     Abort(kBailoutWasNotPrepared);
    712     return;
    713   }
    714 
    715   if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
    716     Register scratch = scratch0();
    717     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
    718     Label no_deopt;
    719 
    720     // Store the condition on the stack if necessary
    721     if (cond != al) {
    722       Label done;
    723       __ LoadImmP(scratch, Operand::Zero());
    724       __ b(NegateCondition(cond), &done, Label::kNear);
    725       __ LoadImmP(scratch, Operand(1));
    726       __ bind(&done);
    727       __ push(scratch);
    728     }
    729 
    730     Label done;
    731     __ Push(r3);
    732     __ mov(scratch, Operand(count));
    733     __ LoadW(r3, MemOperand(scratch));
    734     __ Sub32(r3, r3, Operand(1));
    735     __ Cmp32(r3, Operand::Zero());
    736     __ bne(&no_deopt, Label::kNear);
    737 
    738     __ LoadImmP(r3, Operand(FLAG_deopt_every_n_times));
    739     __ StoreW(r3, MemOperand(scratch));
    740     __ Pop(r3);
    741 
    742     if (cond != al) {
    743       // Clean up the stack before the deoptimizer call
    744       __ pop(scratch);
    745     }
    746 
    747     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
    748 
    749     __ b(&done);
    750 
    751     __ bind(&no_deopt);
    752     __ StoreW(r3, MemOperand(scratch));
    753     __ Pop(r3);
    754 
    755     if (cond != al) {
    756       // Clean up the stack before the deoptimizer call
    757       __ pop(scratch);
    758     }
    759 
    760     __ bind(&done);
    761 
    762     if (cond != al) {
    763       cond = ne;
    764       __ CmpP(scratch, Operand::Zero());
    765     }
    766   }
    767 
    768   if (info()->ShouldTrapOnDeopt()) {
    769     __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
    770   }
    771 
    772   Deoptimizer::DeoptInfo deopt_info = MakeDeoptInfo(instr, deopt_reason, id);
    773 
    774   DCHECK(info()->IsStub() || frame_is_built_);
    775   // Go through jump table if we need to handle condition, build frame, or
    776   // restore caller doubles.
    777   if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
    778     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
    779   } else {
    780     Deoptimizer::JumpTableEntry table_entry(entry, deopt_info, bailout_type,
    781                                             !frame_is_built_);
    782     // We often have several deopts to the same entry, reuse the last
    783     // jump entry if this is the case.
    784     if (FLAG_trace_deopt || isolate()->is_profiling() ||
    785         jump_table_.is_empty() ||
    786         !table_entry.IsEquivalentTo(jump_table_.last())) {
    787       jump_table_.Add(table_entry, zone());
    788     }
    789     __ b(cond, &jump_table_.last().label /*, cr*/);
    790   }
    791 }
    792 
    793 void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
    794                             DeoptimizeReason deopt_reason, CRegister cr) {
    795   Deoptimizer::BailoutType bailout_type =
    796       info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
    797   DeoptimizeIf(cond, instr, deopt_reason, bailout_type, cr);
    798 }
    799 
    800 void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
    801                                             SafepointMode safepoint_mode) {
    802   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
    803     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
    804   } else {
    805     DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
    806     RecordSafepointWithRegisters(instr->pointer_map(), 0,
    807                                  Safepoint::kLazyDeopt);
    808   }
    809 }
    810 
    811 void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
    812                                int arguments, Safepoint::DeoptMode deopt_mode) {
    813   DCHECK(expected_safepoint_kind_ == kind);
    814 
    815   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
    816   Safepoint safepoint =
    817       safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
    818   for (int i = 0; i < operands->length(); i++) {
    819     LOperand* pointer = operands->at(i);
    820     if (pointer->IsStackSlot()) {
    821       safepoint.DefinePointerSlot(pointer->index(), zone());
    822     } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
    823       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
    824     }
    825   }
    826 }
    827 
    828 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
    829                                Safepoint::DeoptMode deopt_mode) {
    830   RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
    831 }
    832 
    833 void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
    834   LPointerMap empty_pointers(zone());
    835   RecordSafepoint(&empty_pointers, deopt_mode);
    836 }
    837 
    838 void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
    839                                             int arguments,
    840                                             Safepoint::DeoptMode deopt_mode) {
    841   RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
    842 }
    843 
    844 static const char* LabelType(LLabel* label) {
    845   if (label->is_loop_header()) return " (loop header)";
    846   if (label->is_osr_entry()) return " (OSR entry)";
    847   return "";
    848 }
    849 
    850 void LCodeGen::DoLabel(LLabel* label) {
    851   Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
    852           current_instruction_, label->hydrogen_value()->id(),
    853           label->block_id(), LabelType(label));
    854   __ bind(label->label());
    855   current_block_ = label->block_id();
    856   DoGap(label);
    857 }
    858 
    859 void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
    860 
    861 void LCodeGen::DoGap(LGap* gap) {
    862   for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
    863        i++) {
    864     LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
    865     LParallelMove* move = gap->GetParallelMove(inner_pos);
    866     if (move != NULL) DoParallelMove(move);
    867   }
    868 }
    869 
    870 void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
    871 
    872 void LCodeGen::DoParameter(LParameter* instr) {
    873   // Nothing to do.
    874 }
    875 
    876 void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
    877   GenerateOsrPrologue();
    878 }
    879 
    880 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
    881   Register dividend = ToRegister(instr->dividend());
    882   int32_t divisor = instr->divisor();
    883   DCHECK(dividend.is(ToRegister(instr->result())));
    884 
    885   // Theoretically, a variation of the branch-free code for integer division by
    886   // a power of 2 (calculating the remainder via an additional multiplication
    887   // (which gets simplified to an 'and') and subtraction) should be faster, and
    888   // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
    889   // indicate that positive dividends are heavily favored, so the branching
    890   // version performs better.
    891   HMod* hmod = instr->hydrogen();
    892   int32_t shift = WhichPowerOf2Abs(divisor);
    893   Label dividend_is_not_negative, done;
    894   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
    895     __ CmpP(dividend, Operand::Zero());
    896     __ bge(&dividend_is_not_negative, Label::kNear);
    897     if (shift) {
    898       // Note that this is correct even for kMinInt operands.
    899       __ LoadComplementRR(dividend, dividend);
    900       __ ExtractBitRange(dividend, dividend, shift - 1, 0);
    901       __ LoadComplementRR(dividend, dividend);
    902       if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    903         DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
    904       }
    905     } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    906       __ mov(dividend, Operand::Zero());
    907     } else {
    908       DeoptimizeIf(al, instr, DeoptimizeReason::kMinusZero);
    909     }
    910     __ b(&done, Label::kNear);
    911   }
    912 
    913   __ bind(&dividend_is_not_negative);
    914   if (shift) {
    915     __ ExtractBitRange(dividend, dividend, shift - 1, 0);
    916   } else {
    917     __ mov(dividend, Operand::Zero());
    918   }
    919   __ bind(&done);
    920 }
    921 
    922 void LCodeGen::DoModByConstI(LModByConstI* instr) {
    923   Register dividend = ToRegister(instr->dividend());
    924   int32_t divisor = instr->divisor();
    925   Register result = ToRegister(instr->result());
    926   DCHECK(!dividend.is(result));
    927 
    928   if (divisor == 0) {
    929     DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
    930     return;
    931   }
    932 
    933   __ TruncatingDiv(result, dividend, Abs(divisor));
    934   __ mov(ip, Operand(Abs(divisor)));
    935   __ Mul(result, result, ip);
    936   __ SubP(result, dividend, result /*, LeaveOE, SetRC*/);
    937 
    938   // Check for negative zero.
    939   HMod* hmod = instr->hydrogen();
    940   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    941     Label remainder_not_zero;
    942     __ bne(&remainder_not_zero, Label::kNear /*, cr0*/);
    943     __ Cmp32(dividend, Operand::Zero());
    944     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
    945     __ bind(&remainder_not_zero);
    946   }
    947 }
    948 
    949 void LCodeGen::DoModI(LModI* instr) {
    950   HMod* hmod = instr->hydrogen();
    951   Register left_reg = ToRegister(instr->left());
    952   Register right_reg = ToRegister(instr->right());
    953   Register result_reg = ToRegister(instr->result());
    954   Label done;
    955 
    956   // Check for x % 0.
    957   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
    958     __ Cmp32(right_reg, Operand::Zero());
    959     DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
    960   }
    961 
    962   // Check for kMinInt % -1, dr will return undefined, which is not what we
    963   // want. We have to deopt if we care about -0, because we can't return that.
    964   if (hmod->CheckFlag(HValue::kCanOverflow)) {
    965     Label no_overflow_possible;
    966     __ Cmp32(left_reg, Operand(kMinInt));
    967     __ bne(&no_overflow_possible, Label::kNear);
    968     __ Cmp32(right_reg, Operand(-1));
    969     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    970       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
    971     } else {
    972       __ b(ne, &no_overflow_possible, Label::kNear);
    973       __ mov(result_reg, Operand::Zero());
    974       __ b(&done, Label::kNear);
    975     }
    976     __ bind(&no_overflow_possible);
    977   }
    978 
    979   // Divide instruction dr will implicity use register pair
    980   // r0 & r1 below.
    981   DCHECK(!left_reg.is(r1));
    982   DCHECK(!right_reg.is(r1));
    983   DCHECK(!result_reg.is(r1));
    984   __ LoadRR(r0, left_reg);
    985   __ srda(r0, Operand(32));
    986   __ dr(r0, right_reg);  // R0:R1 = R1 / divisor - R0 remainder
    987 
    988   __ LoadAndTestP_ExtendSrc(result_reg, r0);  // Copy remainder to resultreg
    989 
    990   // If we care about -0, test if the dividend is <0 and the result is 0.
    991   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
    992     __ bne(&done, Label::kNear);
    993     __ Cmp32(left_reg, Operand::Zero());
    994     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
    995   }
    996 
    997   __ bind(&done);
    998 }
    999 
   1000 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
   1001   Register dividend = ToRegister(instr->dividend());
   1002   int32_t divisor = instr->divisor();
   1003   Register result = ToRegister(instr->result());
   1004   DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
   1005   DCHECK(!result.is(dividend));
   1006 
   1007   // Check for (0 / -x) that will produce negative zero.
   1008   HDiv* hdiv = instr->hydrogen();
   1009   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1010     __ Cmp32(dividend, Operand::Zero());
   1011     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
   1012   }
   1013   // Check for (kMinInt / -1).
   1014   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
   1015     __ Cmp32(dividend, Operand(0x80000000));
   1016     DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
   1017   }
   1018 
   1019   int32_t shift = WhichPowerOf2Abs(divisor);
   1020 
   1021   // Deoptimize if remainder will not be 0.
   1022   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
   1023     __ TestBitRange(dividend, shift - 1, 0, r0);
   1024     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision, cr0);
   1025   }
   1026 
   1027   if (divisor == -1) {  // Nice shortcut, not needed for correctness.
   1028     __ LoadComplementRR(result, dividend);
   1029     return;
   1030   }
   1031   if (shift == 0) {
   1032     __ LoadRR(result, dividend);
   1033   } else {
   1034     if (shift == 1) {
   1035       __ ShiftRight(result, dividend, Operand(31));
   1036     } else {
   1037       __ ShiftRightArith(result, dividend, Operand(31));
   1038       __ ShiftRight(result, result, Operand(32 - shift));
   1039     }
   1040     __ AddP(result, dividend, result);
   1041     __ ShiftRightArith(result, result, Operand(shift));
   1042 #if V8_TARGET_ARCH_S390X
   1043     __ lgfr(result, result);
   1044 #endif
   1045   }
   1046   if (divisor < 0) __ LoadComplementRR(result, result);
   1047 }
   1048 
   1049 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
   1050   Register dividend = ToRegister(instr->dividend());
   1051   int32_t divisor = instr->divisor();
   1052   Register result = ToRegister(instr->result());
   1053   DCHECK(!dividend.is(result));
   1054 
   1055   if (divisor == 0) {
   1056     DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
   1057     return;
   1058   }
   1059 
   1060   // Check for (0 / -x) that will produce negative zero.
   1061   HDiv* hdiv = instr->hydrogen();
   1062   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1063     __ Cmp32(dividend, Operand::Zero());
   1064     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
   1065   }
   1066 
   1067   __ TruncatingDiv(result, dividend, Abs(divisor));
   1068   if (divisor < 0) __ LoadComplementRR(result, result);
   1069 
   1070   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
   1071     Register scratch = scratch0();
   1072     __ mov(ip, Operand(divisor));
   1073     __ Mul(scratch, result, ip);
   1074     __ Cmp32(scratch, dividend);
   1075     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
   1076   }
   1077 }
   1078 
   1079 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
   1080 void LCodeGen::DoDivI(LDivI* instr) {
   1081   HBinaryOperation* hdiv = instr->hydrogen();
   1082   const Register dividend = ToRegister(instr->dividend());
   1083   const Register divisor = ToRegister(instr->divisor());
   1084   Register result = ToRegister(instr->result());
   1085 
   1086   DCHECK(!dividend.is(result));
   1087   DCHECK(!divisor.is(result));
   1088 
   1089   // Check for x / 0.
   1090   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1091     __ Cmp32(divisor, Operand::Zero());
   1092     DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
   1093   }
   1094 
   1095   // Check for (0 / -x) that will produce negative zero.
   1096   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1097     Label dividend_not_zero;
   1098     __ Cmp32(dividend, Operand::Zero());
   1099     __ bne(&dividend_not_zero, Label::kNear);
   1100     __ Cmp32(divisor, Operand::Zero());
   1101     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
   1102     __ bind(&dividend_not_zero);
   1103   }
   1104 
   1105   // Check for (kMinInt / -1).
   1106   if (hdiv->CheckFlag(HValue::kCanOverflow)) {
   1107     Label dividend_not_min_int;
   1108     __ Cmp32(dividend, Operand(kMinInt));
   1109     __ bne(&dividend_not_min_int, Label::kNear);
   1110     __ Cmp32(divisor, Operand(-1));
   1111     DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
   1112     __ bind(&dividend_not_min_int);
   1113   }
   1114 
   1115   __ LoadRR(r0, dividend);
   1116   __ srda(r0, Operand(32));
   1117   __ dr(r0, divisor);  // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
   1118 
   1119   __ LoadAndTestP_ExtendSrc(result, r1);  // Move quotient to result register
   1120 
   1121   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
   1122     // Deoptimize if remainder is not 0.
   1123     __ Cmp32(r0, Operand::Zero());
   1124     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
   1125   }
   1126 }
   1127 
   1128 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
   1129   HBinaryOperation* hdiv = instr->hydrogen();
   1130   Register dividend = ToRegister(instr->dividend());
   1131   Register result = ToRegister(instr->result());
   1132   int32_t divisor = instr->divisor();
   1133   bool can_overflow = hdiv->CheckFlag(HValue::kLeftCanBeMinInt);
   1134 
   1135   // If the divisor is positive, things are easy: There can be no deopts and we
   1136   // can simply do an arithmetic right shift.
   1137   int32_t shift = WhichPowerOf2Abs(divisor);
   1138   if (divisor > 0) {
   1139     if (shift || !result.is(dividend)) {
   1140       __ ShiftRightArith(result, dividend, Operand(shift));
   1141 #if V8_TARGET_ARCH_S390X
   1142       __ lgfr(result, result);
   1143 #endif
   1144     }
   1145     return;
   1146   }
   1147 
   1148 // If the divisor is negative, we have to negate and handle edge cases.
   1149 #if V8_TARGET_ARCH_S390X
   1150   if (divisor == -1 && can_overflow) {
   1151     __ Cmp32(dividend, Operand(0x80000000));
   1152     DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
   1153   }
   1154 #endif
   1155 
   1156   __ LoadComplementRR(result, dividend);
   1157   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1158     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero, cr0);
   1159   }
   1160 
   1161 // If the negation could not overflow, simply shifting is OK.
   1162 #if !V8_TARGET_ARCH_S390X
   1163   if (!can_overflow) {
   1164 #endif
   1165     if (shift) {
   1166       __ ShiftRightArithP(result, result, Operand(shift));
   1167     }
   1168     return;
   1169 #if !V8_TARGET_ARCH_S390X
   1170   }
   1171 
   1172   // Dividing by -1 is basically negation, unless we overflow.
   1173   if (divisor == -1) {
   1174     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
   1175     return;
   1176   }
   1177 
   1178   Label overflow_label, done;
   1179   __ b(overflow, &overflow_label, Label::kNear);
   1180   __ ShiftRightArith(result, result, Operand(shift));
   1181 #if V8_TARGET_ARCH_S390X
   1182   __ lgfr(result, result);
   1183 #endif
   1184   __ b(&done, Label::kNear);
   1185   __ bind(&overflow_label);
   1186   __ mov(result, Operand(kMinInt / divisor));
   1187   __ bind(&done);
   1188 #endif
   1189 }
   1190 
   1191 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
   1192   Register dividend = ToRegister(instr->dividend());
   1193   int32_t divisor = instr->divisor();
   1194   Register result = ToRegister(instr->result());
   1195   DCHECK(!dividend.is(result));
   1196 
   1197   if (divisor == 0) {
   1198     DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
   1199     return;
   1200   }
   1201 
   1202   // Check for (0 / -x) that will produce negative zero.
   1203   HMathFloorOfDiv* hdiv = instr->hydrogen();
   1204   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
   1205     __ Cmp32(dividend, Operand::Zero());
   1206     DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
   1207   }
   1208 
   1209   // Easy case: We need no dynamic check for the dividend and the flooring
   1210   // division is the same as the truncating division.
   1211   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
   1212       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
   1213     __ TruncatingDiv(result, dividend, Abs(divisor));
   1214     if (divisor < 0) __ LoadComplementRR(result, result);
   1215     return;
   1216   }
   1217 
   1218   // In the general case we may need to adjust before and after the truncating
   1219   // division to get a flooring division.
   1220   Register temp = ToRegister(instr->temp());
   1221   DCHECK(!temp.is(dividend) && !temp.is(result));
   1222   Label needs_adjustment, done;
   1223   __ Cmp32(dividend, Operand::Zero());
   1224   __ b(divisor > 0 ? lt : gt, &needs_adjustment);
   1225   __ TruncatingDiv(result, dividend, Abs(divisor));
   1226   if (divisor < 0) __ LoadComplementRR(result, result);
   1227   __ b(&done, Label::kNear);
   1228   __ bind(&needs_adjustment);
   1229   __ AddP(temp, dividend, Operand(divisor > 0 ? 1 : -1));
   1230   __ TruncatingDiv(result, temp, Abs(divisor));
   1231   if (divisor < 0) __ LoadComplementRR(result, result);
   1232   __ SubP(result, result, Operand(1));
   1233   __ bind(&done);
   1234 }
   1235 
   1236 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
   1237 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
   1238   HBinaryOperation* hdiv = instr->hydrogen();
   1239   const Register dividend = ToRegister(instr->dividend());
   1240   const Register divisor = ToRegister(instr->divisor());
   1241   Register result = ToRegister(instr->result());
   1242 
   1243   DCHECK(!dividend.is(result));
   1244   DCHECK(!divisor.is(result));
   1245 
   1246   // Check for x / 0.
   1247   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
   1248     __ Cmp32(divisor, Operand::Zero());
   1249     DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
   1250   }
   1251 
   1252   // Check for (0 / -x) that will produce negative zero.
   1253   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
   1254     Label dividend_not_zero;
   1255     __ Cmp32(dividend, Operand::Zero());
   1256     __ bne(&dividend_not_zero, Label::kNear);
   1257     __ Cmp32(divisor, Operand::Zero());
   1258     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
   1259     __ bind(&dividend_not_zero);
   1260   }
   1261 
   1262   // Check for (kMinInt / -1).
   1263   if (hdiv->CheckFlag(HValue::kCanOverflow)) {
   1264     Label no_overflow_possible;
   1265     __ Cmp32(dividend, Operand(kMinInt));
   1266     __ bne(&no_overflow_possible, Label::kNear);
   1267     __ Cmp32(divisor, Operand(-1));
   1268     if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
   1269       DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
   1270     } else {
   1271       __ bne(&no_overflow_possible, Label::kNear);
   1272       __ LoadRR(result, dividend);
   1273     }
   1274     __ bind(&no_overflow_possible);
   1275   }
   1276 
   1277   __ LoadRR(r0, dividend);
   1278   __ srda(r0, Operand(32));
   1279   __ dr(r0, divisor);  // R0:R1 = R1 / divisor - R0 remainder - R1 quotient
   1280 
   1281   __ lr(result, r1);  // Move quotient to result register
   1282 
   1283   Label done;
   1284   Register scratch = scratch0();
   1285   // If both operands have the same sign then we are done.
   1286   __ Xor(scratch, dividend, divisor);
   1287   __ ltr(scratch, scratch);  // use 32 bit version LoadAndTestRR even in 64 bit
   1288   __ bge(&done, Label::kNear);
   1289 
   1290   // If there is no remainder then we are done.
   1291   if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
   1292     __ msrkc(scratch, result, divisor);
   1293   } else {
   1294     __ lr(scratch, result);
   1295     __ msr(scratch, divisor);
   1296   }
   1297   __ Cmp32(dividend, scratch);
   1298   __ beq(&done, Label::kNear);
   1299 
   1300   // We performed a truncating division. Correct the result.
   1301   __ Sub32(result, result, Operand(1));
   1302   __ bind(&done);
   1303 }
   1304 
   1305 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
   1306   DoubleRegister addend = ToDoubleRegister(instr->addend());
   1307   DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
   1308   DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
   1309   DoubleRegister result = ToDoubleRegister(instr->result());
   1310 
   1311   // Unable to use madbr as the intermediate value is not rounded
   1312   // to proper precision
   1313   __ ldr(result, multiplier);
   1314   __ mdbr(result, multiplicand);
   1315   __ adbr(result, addend);
   1316 }
   1317 
   1318 void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
   1319   DoubleRegister minuend = ToDoubleRegister(instr->minuend());
   1320   DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
   1321   DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
   1322   DoubleRegister result = ToDoubleRegister(instr->result());
   1323 
   1324   // Unable to use msdbr as the intermediate value is not rounded
   1325   // to proper precision
   1326   __ ldr(result, multiplier);
   1327   __ mdbr(result, multiplicand);
   1328   __ sdbr(result, minuend);
   1329 }
   1330 
   1331 void LCodeGen::DoMulI(LMulI* instr) {
   1332   Register scratch = scratch0();
   1333   Register result = ToRegister(instr->result());
   1334   // Note that result may alias left.
   1335   Register left = ToRegister(instr->left());
   1336   LOperand* right_op = instr->right();
   1337 
   1338   bool bailout_on_minus_zero =
   1339       instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
   1340   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1341 
   1342   if (right_op->IsConstantOperand()) {
   1343     int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
   1344 
   1345     if (bailout_on_minus_zero && (constant < 0)) {
   1346       // The case of a null constant will be handled separately.
   1347       // If constant is negative and left is null, the result should be -0.
   1348       __ CmpP(left, Operand::Zero());
   1349       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
   1350     }
   1351 
   1352     switch (constant) {
   1353       case -1:
   1354         if (can_overflow) {
   1355 #if V8_TARGET_ARCH_S390X
   1356           if (instr->hydrogen()->representation().IsSmi()) {
   1357 #endif
   1358             __ LoadComplementRR(result, left);
   1359             DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   1360 #if V8_TARGET_ARCH_S390X
   1361           } else {
   1362             __ LoadComplementRR(result, left);
   1363             __ TestIfInt32(result, r0);
   1364             DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
   1365           }
   1366 #endif
   1367         } else {
   1368           __ LoadComplementRR(result, left);
   1369         }
   1370         break;
   1371       case 0:
   1372         if (bailout_on_minus_zero) {
   1373 // If left is strictly negative and the constant is null, the
   1374 // result is -0. Deoptimize if required, otherwise return 0.
   1375 #if V8_TARGET_ARCH_S390X
   1376           if (instr->hydrogen()->representation().IsSmi()) {
   1377 #endif
   1378             __ Cmp32(left, Operand::Zero());
   1379 #if V8_TARGET_ARCH_S390X
   1380           } else {
   1381             __ Cmp32(left, Operand::Zero());
   1382           }
   1383 #endif
   1384           DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
   1385         }
   1386         __ LoadImmP(result, Operand::Zero());
   1387         break;
   1388       case 1:
   1389         __ Move(result, left);
   1390         break;
   1391       default:
   1392         // Multiplying by powers of two and powers of two plus or minus
   1393         // one can be done faster with shifted operands.
   1394         // For other constants we emit standard code.
   1395         int32_t mask = constant >> 31;
   1396         uint32_t constant_abs = (constant + mask) ^ mask;
   1397 
   1398         if (base::bits::IsPowerOfTwo32(constant_abs)) {
   1399           int32_t shift = WhichPowerOf2(constant_abs);
   1400           __ ShiftLeftP(result, left, Operand(shift));
   1401           // Correct the sign of the result if the constant is negative.
   1402           if (constant < 0) __ LoadComplementRR(result, result);
   1403         } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
   1404           int32_t shift = WhichPowerOf2(constant_abs - 1);
   1405           __ ShiftLeftP(scratch, left, Operand(shift));
   1406           __ AddP(result, scratch, left);
   1407           // Correct the sign of the result if the constant is negative.
   1408           if (constant < 0) __ LoadComplementRR(result, result);
   1409         } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
   1410           int32_t shift = WhichPowerOf2(constant_abs + 1);
   1411           __ ShiftLeftP(scratch, left, Operand(shift));
   1412           __ SubP(result, scratch, left);
   1413           // Correct the sign of the result if the constant is negative.
   1414           if (constant < 0) __ LoadComplementRR(result, result);
   1415         } else {
   1416           // Generate standard code.
   1417           __ Move(result, left);
   1418           __ MulP(result, Operand(constant));
   1419         }
   1420     }
   1421 
   1422   } else {
   1423     DCHECK(right_op->IsRegister());
   1424     Register right = ToRegister(right_op);
   1425 
   1426     if (can_overflow) {
   1427       if (CpuFeatures::IsSupported(MISC_INSTR_EXT2)) {
   1428         // result = left * right.
   1429         if (instr->hydrogen()->representation().IsSmi()) {
   1430           __ SmiUntag(scratch, right);
   1431           __ MulPWithCondition(result, left, scratch);
   1432         } else {
   1433           __ msrkc(result, left, right);
   1434           __ LoadW(result, result);
   1435         }
   1436         DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   1437       } else {
   1438 #if V8_TARGET_ARCH_S390X
   1439         // result = left * right.
   1440         if (instr->hydrogen()->representation().IsSmi()) {
   1441           __ SmiUntag(result, left);
   1442           __ SmiUntag(scratch, right);
   1443           __ msgr(result, scratch);
   1444         } else {
   1445           __ LoadRR(result, left);
   1446           __ msgr(result, right);
   1447         }
   1448         __ TestIfInt32(result, r0);
   1449         DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
   1450         if (instr->hydrogen()->representation().IsSmi()) {
   1451           __ SmiTag(result);
   1452         }
   1453 #else
   1454         // r0:scratch = scratch * right
   1455         if (instr->hydrogen()->representation().IsSmi()) {
   1456           __ SmiUntag(scratch, left);
   1457           __ mr_z(r0, right);
   1458           __ LoadRR(result, scratch);
   1459         } else {
   1460           // r0:scratch = scratch * right
   1461           __ LoadRR(scratch, left);
   1462           __ mr_z(r0, right);
   1463           __ LoadRR(result, scratch);
   1464         }
   1465         __ TestIfInt32(r0, result, scratch);
   1466         DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
   1467 #endif
   1468       }
   1469     } else {
   1470       if (instr->hydrogen()->representation().IsSmi()) {
   1471         __ SmiUntag(result, left);
   1472         __ Mul(result, result, right);
   1473       } else {
   1474         __ Mul(result, left, right);
   1475       }
   1476     }
   1477 
   1478     if (bailout_on_minus_zero) {
   1479       Label done;
   1480 #if V8_TARGET_ARCH_S390X
   1481       if (instr->hydrogen()->representation().IsSmi()) {
   1482 #endif
   1483         __ XorP(r0, left, right);
   1484         __ LoadAndTestRR(r0, r0);
   1485         __ bge(&done, Label::kNear);
   1486 #if V8_TARGET_ARCH_S390X
   1487       } else {
   1488         __ XorP(r0, left, right);
   1489         __ Cmp32(r0, Operand::Zero());
   1490         __ bge(&done, Label::kNear);
   1491       }
   1492 #endif
   1493       // Bail out if the result is minus zero.
   1494       __ CmpP(result, Operand::Zero());
   1495       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
   1496       __ bind(&done);
   1497     }
   1498   }
   1499 }
   1500 
   1501 void LCodeGen::DoBitI(LBitI* instr) {
   1502   LOperand* left_op = instr->left();
   1503   LOperand* right_op = instr->right();
   1504   DCHECK(left_op->IsRegister());
   1505   Register left = ToRegister(left_op);
   1506   Register result = ToRegister(instr->result());
   1507 
   1508   if (right_op->IsConstantOperand()) {
   1509     switch (instr->op()) {
   1510       case Token::BIT_AND:
   1511         __ AndP(result, left, Operand(ToOperand(right_op)));
   1512         break;
   1513       case Token::BIT_OR:
   1514         __ OrP(result, left, Operand(ToOperand(right_op)));
   1515         break;
   1516       case Token::BIT_XOR:
   1517         __ XorP(result, left, Operand(ToOperand(right_op)));
   1518         break;
   1519       default:
   1520         UNREACHABLE();
   1521         break;
   1522     }
   1523   } else if (right_op->IsStackSlot()) {
   1524     // Reg-Mem instruction clobbers, so copy src to dst first.
   1525     if (!left.is(result)) __ LoadRR(result, left);
   1526     switch (instr->op()) {
   1527       case Token::BIT_AND:
   1528         __ AndP(result, ToMemOperand(right_op));
   1529         break;
   1530       case Token::BIT_OR:
   1531         __ OrP(result, ToMemOperand(right_op));
   1532         break;
   1533       case Token::BIT_XOR:
   1534         __ XorP(result, ToMemOperand(right_op));
   1535         break;
   1536       default:
   1537         UNREACHABLE();
   1538         break;
   1539     }
   1540   } else {
   1541     DCHECK(right_op->IsRegister());
   1542 
   1543     switch (instr->op()) {
   1544       case Token::BIT_AND:
   1545         __ AndP(result, left, ToRegister(right_op));
   1546         break;
   1547       case Token::BIT_OR:
   1548         __ OrP(result, left, ToRegister(right_op));
   1549         break;
   1550       case Token::BIT_XOR:
   1551         __ XorP(result, left, ToRegister(right_op));
   1552         break;
   1553       default:
   1554         UNREACHABLE();
   1555         break;
   1556     }
   1557   }
   1558 }
   1559 
   1560 void LCodeGen::DoShiftI(LShiftI* instr) {
   1561   // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
   1562   // result may alias either of them.
   1563   LOperand* right_op = instr->right();
   1564   Register left = ToRegister(instr->left());
   1565   Register result = ToRegister(instr->result());
   1566   Register scratch = scratch0();
   1567   if (right_op->IsRegister()) {
   1568     // Mask the right_op operand.
   1569     __ AndP(scratch, ToRegister(right_op), Operand(0x1F));
   1570     switch (instr->op()) {
   1571       case Token::ROR:
   1572         // rotate_right(a, b) == rotate_left(a, 32 - b)
   1573         __ LoadComplementRR(scratch, scratch);
   1574         __ rll(result, left, scratch, Operand(32));
   1575 #if V8_TARGET_ARCH_S390X
   1576         __ lgfr(result, result);
   1577 #endif
   1578         break;
   1579       case Token::SAR:
   1580         __ ShiftRightArith(result, left, scratch);
   1581 #if V8_TARGET_ARCH_S390X
   1582         __ lgfr(result, result);
   1583 #endif
   1584         break;
   1585       case Token::SHR:
   1586         __ ShiftRight(result, left, scratch);
   1587 #if V8_TARGET_ARCH_S390X
   1588         __ lgfr(result, result);
   1589 #endif
   1590         if (instr->can_deopt()) {
   1591 #if V8_TARGET_ARCH_S390X
   1592           __ ltgfr(result, result /*, SetRC*/);
   1593 #else
   1594           __ ltr(result, result);  // Set the <,==,> condition
   1595 #endif
   1596           DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue, cr0);
   1597         }
   1598         break;
   1599       case Token::SHL:
   1600         __ ShiftLeft(result, left, scratch);
   1601 #if V8_TARGET_ARCH_S390X
   1602         __ lgfr(result, result);
   1603 #endif
   1604         break;
   1605       default:
   1606         UNREACHABLE();
   1607         break;
   1608     }
   1609   } else {
   1610     // Mask the right_op operand.
   1611     int value = ToInteger32(LConstantOperand::cast(right_op));
   1612     uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
   1613     switch (instr->op()) {
   1614       case Token::ROR:
   1615         if (shift_count != 0) {
   1616           __ rll(result, left, Operand(32 - shift_count));
   1617 #if V8_TARGET_ARCH_S390X
   1618           __ lgfr(result, result);
   1619 #endif
   1620         } else {
   1621           __ Move(result, left);
   1622         }
   1623         break;
   1624       case Token::SAR:
   1625         if (shift_count != 0) {
   1626           __ ShiftRightArith(result, left, Operand(shift_count));
   1627 #if V8_TARGET_ARCH_S390X
   1628           __ lgfr(result, result);
   1629 #endif
   1630         } else {
   1631           __ Move(result, left);
   1632         }
   1633         break;
   1634       case Token::SHR:
   1635         if (shift_count != 0) {
   1636           __ ShiftRight(result, left, Operand(shift_count));
   1637 #if V8_TARGET_ARCH_S390X
   1638           __ lgfr(result, result);
   1639 #endif
   1640         } else {
   1641           if (instr->can_deopt()) {
   1642             __ Cmp32(left, Operand::Zero());
   1643             DeoptimizeIf(lt, instr, DeoptimizeReason::kNegativeValue);
   1644           }
   1645           __ Move(result, left);
   1646         }
   1647         break;
   1648       case Token::SHL:
   1649         if (shift_count != 0) {
   1650 #if V8_TARGET_ARCH_S390X
   1651           if (instr->hydrogen_value()->representation().IsSmi()) {
   1652             __ ShiftLeftP(result, left, Operand(shift_count));
   1653 #else
   1654           if (instr->hydrogen_value()->representation().IsSmi() &&
   1655               instr->can_deopt()) {
   1656             if (shift_count != 1) {
   1657               __ ShiftLeft(result, left, Operand(shift_count - 1));
   1658 #if V8_TARGET_ARCH_S390X
   1659               __ lgfr(result, result);
   1660 #endif
   1661               __ SmiTagCheckOverflow(result, result, scratch);
   1662             } else {
   1663               __ SmiTagCheckOverflow(result, left, scratch);
   1664             }
   1665             DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
   1666 #endif
   1667           } else {
   1668             __ ShiftLeft(result, left, Operand(shift_count));
   1669 #if V8_TARGET_ARCH_S390X
   1670             __ lgfr(result, result);
   1671 #endif
   1672           }
   1673         } else {
   1674           __ Move(result, left);
   1675         }
   1676         break;
   1677       default:
   1678         UNREACHABLE();
   1679         break;
   1680     }
   1681   }
   1682 }
   1683 
   1684 void LCodeGen::DoSubI(LSubI* instr) {
   1685   LOperand* left = instr->left();
   1686   LOperand* right = instr->right();
   1687   LOperand* result = instr->result();
   1688 
   1689   bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
   1690                      instr->hydrogen()->representation().IsExternal());
   1691 
   1692 #if V8_TARGET_ARCH_S390X
   1693   // The overflow detection needs to be tested on the lower 32-bits.
   1694   // As a result, on 64-bit, we need to force 32-bit arithmetic operations
   1695   // to set the CC overflow bit properly.  The result is then sign-extended.
   1696   bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1697 #else
   1698   bool checkOverflow = true;
   1699 #endif
   1700 
   1701   if (right->IsConstantOperand()) {
   1702     if (!isInteger || !checkOverflow) {
   1703       __ SubP(ToRegister(result), ToRegister(left), ToOperand(right));
   1704     } else {
   1705       // -(MinInt) will overflow
   1706       if (ToInteger32(LConstantOperand::cast(right)) == kMinInt) {
   1707         __ Load(scratch0(), ToOperand(right));
   1708         __ Sub32(ToRegister(result), ToRegister(left), scratch0());
   1709       } else {
   1710         __ Sub32(ToRegister(result), ToRegister(left), ToOperand(right));
   1711       }
   1712     }
   1713   } else if (right->IsRegister()) {
   1714     if (!isInteger)
   1715       __ SubP(ToRegister(result), ToRegister(left), ToRegister(right));
   1716     else if (!checkOverflow)
   1717       __ SubP_ExtendSrc(ToRegister(result), ToRegister(left),
   1718                         ToRegister(right));
   1719     else
   1720       __ Sub32(ToRegister(result), ToRegister(left), ToRegister(right));
   1721   } else {
   1722     if (!left->Equals(instr->result()))
   1723       __ LoadRR(ToRegister(result), ToRegister(left));
   1724 
   1725     MemOperand mem = ToMemOperand(right);
   1726     if (!isInteger) {
   1727       __ SubP(ToRegister(result), mem);
   1728     } else {
   1729 #if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
   1730       // We want to read the 32-bits directly from memory
   1731       MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
   1732 #else
   1733       MemOperand Upper32Mem = ToMemOperand(right);
   1734 #endif
   1735       if (checkOverflow) {
   1736         __ Sub32(ToRegister(result), Upper32Mem);
   1737       } else {
   1738         __ SubP_ExtendSrc(ToRegister(result), Upper32Mem);
   1739       }
   1740     }
   1741   }
   1742 
   1743 #if V8_TARGET_ARCH_S390X
   1744   if (isInteger && checkOverflow)
   1745     __ lgfr(ToRegister(result), ToRegister(result));
   1746 #endif
   1747   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1748     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   1749   }
   1750 }
   1751 
   1752 void LCodeGen::DoConstantI(LConstantI* instr) {
   1753   Register dst = ToRegister(instr->result());
   1754   if (instr->value() == 0)
   1755     __ XorP(dst, dst);
   1756   else
   1757     __ Load(dst, Operand(instr->value()));
   1758 }
   1759 
   1760 void LCodeGen::DoConstantS(LConstantS* instr) {
   1761   __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
   1762 }
   1763 
   1764 void LCodeGen::DoConstantD(LConstantD* instr) {
   1765   DCHECK(instr->result()->IsDoubleRegister());
   1766   DoubleRegister result = ToDoubleRegister(instr->result());
   1767   uint64_t bits = instr->bits();
   1768   __ LoadDoubleLiteral(result, bits, scratch0());
   1769 }
   1770 
   1771 void LCodeGen::DoConstantE(LConstantE* instr) {
   1772   __ mov(ToRegister(instr->result()), Operand(instr->value()));
   1773 }
   1774 
   1775 void LCodeGen::DoConstantT(LConstantT* instr) {
   1776   Handle<Object> object = instr->value(isolate());
   1777   AllowDeferredHandleDereference smi_check;
   1778   __ Move(ToRegister(instr->result()), object);
   1779 }
   1780 
   1781 MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
   1782                                            String::Encoding encoding) {
   1783   if (index->IsConstantOperand()) {
   1784     int offset = ToInteger32(LConstantOperand::cast(index));
   1785     if (encoding == String::TWO_BYTE_ENCODING) {
   1786       offset *= kUC16Size;
   1787     }
   1788     STATIC_ASSERT(kCharSize == 1);
   1789     return FieldMemOperand(string, SeqString::kHeaderSize + offset);
   1790   }
   1791   Register scratch = scratch0();
   1792   DCHECK(!scratch.is(string));
   1793   DCHECK(!scratch.is(ToRegister(index)));
   1794   // TODO(joransiu) : Fold Add into FieldMemOperand
   1795   if (encoding == String::ONE_BYTE_ENCODING) {
   1796     __ AddP(scratch, string, ToRegister(index));
   1797   } else {
   1798     STATIC_ASSERT(kUC16Size == 2);
   1799     __ ShiftLeftP(scratch, ToRegister(index), Operand(1));
   1800     __ AddP(scratch, string, scratch);
   1801   }
   1802   return FieldMemOperand(scratch, SeqString::kHeaderSize);
   1803 }
   1804 
   1805 void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
   1806   String::Encoding encoding = instr->hydrogen()->encoding();
   1807   Register string = ToRegister(instr->string());
   1808   Register result = ToRegister(instr->result());
   1809 
   1810   if (FLAG_debug_code) {
   1811     Register scratch = scratch0();
   1812     __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
   1813     __ llc(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   1814 
   1815     __ AndP(scratch, scratch,
   1816             Operand(kStringRepresentationMask | kStringEncodingMask));
   1817     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1818     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1819     __ CmpP(scratch,
   1820             Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
   1821                                                           : two_byte_seq_type));
   1822     __ Check(eq, kUnexpectedStringType);
   1823   }
   1824 
   1825   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   1826   if (encoding == String::ONE_BYTE_ENCODING) {
   1827     __ llc(result, operand);
   1828   } else {
   1829     __ llh(result, operand);
   1830   }
   1831 }
   1832 
   1833 void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
   1834   String::Encoding encoding = instr->hydrogen()->encoding();
   1835   Register string = ToRegister(instr->string());
   1836   Register value = ToRegister(instr->value());
   1837 
   1838   if (FLAG_debug_code) {
   1839     Register index = ToRegister(instr->index());
   1840     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
   1841     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
   1842     int encoding_mask =
   1843         instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
   1844             ? one_byte_seq_type
   1845             : two_byte_seq_type;
   1846     __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
   1847   }
   1848 
   1849   MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   1850   if (encoding == String::ONE_BYTE_ENCODING) {
   1851     __ stc(value, operand);
   1852   } else {
   1853     __ sth(value, operand);
   1854   }
   1855 }
   1856 
   1857 void LCodeGen::DoAddI(LAddI* instr) {
   1858   LOperand* left = instr->left();
   1859   LOperand* right = instr->right();
   1860   LOperand* result = instr->result();
   1861   bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
   1862                      instr->hydrogen()->representation().IsExternal());
   1863 #if V8_TARGET_ARCH_S390X
   1864   // The overflow detection needs to be tested on the lower 32-bits.
   1865   // As a result, on 64-bit, we need to force 32-bit arithmetic operations
   1866   // to set the CC overflow bit properly.  The result is then sign-extended.
   1867   bool checkOverflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   1868 #else
   1869   bool checkOverflow = true;
   1870 #endif
   1871 
   1872   if (right->IsConstantOperand()) {
   1873     if (!isInteger || !checkOverflow)
   1874       __ AddP(ToRegister(result), ToRegister(left), ToOperand(right));
   1875     else
   1876       __ Add32(ToRegister(result), ToRegister(left), ToOperand(right));
   1877   } else if (right->IsRegister()) {
   1878     if (!isInteger)
   1879       __ AddP(ToRegister(result), ToRegister(left), ToRegister(right));
   1880     else if (!checkOverflow)
   1881       __ AddP_ExtendSrc(ToRegister(result), ToRegister(left),
   1882                         ToRegister(right));
   1883     else
   1884       __ Add32(ToRegister(result), ToRegister(left), ToRegister(right));
   1885   } else {
   1886     if (!left->Equals(instr->result()))
   1887       __ LoadRR(ToRegister(result), ToRegister(left));
   1888 
   1889     MemOperand mem = ToMemOperand(right);
   1890     if (!isInteger) {
   1891       __ AddP(ToRegister(result), mem);
   1892     } else {
   1893 #if V8_TARGET_ARCH_S390X && !V8_TARGET_LITTLE_ENDIAN
   1894       // We want to read the 32-bits directly from memory
   1895       MemOperand Upper32Mem = MemOperand(mem.rb(), mem.rx(), mem.offset() + 4);
   1896 #else
   1897       MemOperand Upper32Mem = ToMemOperand(right);
   1898 #endif
   1899       if (checkOverflow) {
   1900         __ Add32(ToRegister(result), Upper32Mem);
   1901       } else {
   1902         __ AddP_ExtendSrc(ToRegister(result), Upper32Mem);
   1903       }
   1904     }
   1905   }
   1906 
   1907 #if V8_TARGET_ARCH_S390X
   1908   if (isInteger && checkOverflow)
   1909     __ lgfr(ToRegister(result), ToRegister(result));
   1910 #endif
   1911   // Doptimize on overflow
   1912   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
   1913     DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   1914   }
   1915 }
   1916 
   1917 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
   1918   LOperand* left = instr->left();
   1919   LOperand* right = instr->right();
   1920   HMathMinMax::Operation operation = instr->hydrogen()->operation();
   1921   Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
   1922   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
   1923     Register left_reg = ToRegister(left);
   1924     Register right_reg = EmitLoadRegister(right, ip);
   1925     Register result_reg = ToRegister(instr->result());
   1926     Label return_left, done;
   1927 #if V8_TARGET_ARCH_S390X
   1928     if (instr->hydrogen_value()->representation().IsSmi()) {
   1929 #endif
   1930       __ CmpP(left_reg, right_reg);
   1931 #if V8_TARGET_ARCH_S390X
   1932     } else {
   1933       __ Cmp32(left_reg, right_reg);
   1934     }
   1935 #endif
   1936     __ b(cond, &return_left, Label::kNear);
   1937     __ Move(result_reg, right_reg);
   1938     __ b(&done, Label::kNear);
   1939     __ bind(&return_left);
   1940     __ Move(result_reg, left_reg);
   1941     __ bind(&done);
   1942   } else {
   1943     DCHECK(instr->hydrogen()->representation().IsDouble());
   1944     DoubleRegister left_reg = ToDoubleRegister(left);
   1945     DoubleRegister right_reg = ToDoubleRegister(right);
   1946     DoubleRegister result_reg = ToDoubleRegister(instr->result());
   1947     Label check_nan_left, check_zero, return_left, return_right, done;
   1948     __ cdbr(left_reg, right_reg);
   1949     __ bunordered(&check_nan_left, Label::kNear);
   1950     __ beq(&check_zero);
   1951     __ b(cond, &return_left, Label::kNear);
   1952     __ b(&return_right, Label::kNear);
   1953 
   1954     __ bind(&check_zero);
   1955     __ lzdr(kDoubleRegZero);
   1956     __ cdbr(left_reg, kDoubleRegZero);
   1957     __ bne(&return_left, Label::kNear);  // left == right != 0.
   1958 
   1959     // At this point, both left and right are either 0 or -0.
   1960     // N.B. The following works because +0 + -0 == +0
   1961     if (operation == HMathMinMax::kMathMin) {
   1962       // For min we want logical-or of sign bit: -(-L + -R)
   1963       __ lcdbr(left_reg, left_reg);
   1964       __ ldr(result_reg, left_reg);
   1965       if (left_reg.is(right_reg)) {
   1966         __ adbr(result_reg, right_reg);
   1967       } else {
   1968         __ sdbr(result_reg, right_reg);
   1969       }
   1970       __ lcdbr(result_reg, result_reg);
   1971     } else {
   1972       // For max we want logical-and of sign bit: (L + R)
   1973       __ ldr(result_reg, left_reg);
   1974       __ adbr(result_reg, right_reg);
   1975     }
   1976     __ b(&done, Label::kNear);
   1977 
   1978     __ bind(&check_nan_left);
   1979     __ cdbr(left_reg, left_reg);
   1980     __ bunordered(&return_left, Label::kNear);  // left == NaN.
   1981 
   1982     __ bind(&return_right);
   1983     if (!right_reg.is(result_reg)) {
   1984       __ ldr(result_reg, right_reg);
   1985     }
   1986     __ b(&done, Label::kNear);
   1987 
   1988     __ bind(&return_left);
   1989     if (!left_reg.is(result_reg)) {
   1990       __ ldr(result_reg, left_reg);
   1991     }
   1992     __ bind(&done);
   1993   }
   1994 }
   1995 
   1996 void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
   1997   DoubleRegister left = ToDoubleRegister(instr->left());
   1998   DoubleRegister right = ToDoubleRegister(instr->right());
   1999   DoubleRegister result = ToDoubleRegister(instr->result());
   2000   switch (instr->op()) {
   2001     case Token::ADD:
   2002       if (CpuFeatures::IsSupported(VECTOR_FACILITY)) {
   2003         __ vfa(result, left, right);
   2004       } else {
   2005         DCHECK(result.is(left));
   2006         __ adbr(result, right);
   2007       }
   2008       break;
   2009     case Token::SUB:
   2010       if (CpuFeatures::IsSupported(VECTOR_FACILITY)) {
   2011         __ vfs(result, left, right);
   2012       } else {
   2013         DCHECK(result.is(left));
   2014         __ sdbr(result, right);
   2015       }
   2016       break;
   2017     case Token::MUL:
   2018       if (CpuFeatures::IsSupported(VECTOR_FACILITY)) {
   2019         __ vfm(result, left, right);
   2020       } else {
   2021         DCHECK(result.is(left));
   2022         __ mdbr(result, right);
   2023       }
   2024       break;
   2025     case Token::DIV:
   2026       if (CpuFeatures::IsSupported(VECTOR_FACILITY)) {
   2027         __ vfd(result, left, right);
   2028       } else {
   2029         DCHECK(result.is(left));
   2030         __ ddbr(result, right);
   2031       }
   2032       break;
   2033     case Token::MOD: {
   2034       __ PrepareCallCFunction(0, 2, scratch0());
   2035       __ MovToFloatParameters(left, right);
   2036       __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
   2037                        0, 2);
   2038       // Move the result in the double result register.
   2039       __ MovFromFloatResult(result);
   2040       break;
   2041     }
   2042     default:
   2043       UNREACHABLE();
   2044       break;
   2045   }
   2046 }
   2047 
   2048 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
   2049   DCHECK(ToRegister(instr->context()).is(cp));
   2050   DCHECK(ToRegister(instr->left()).is(r3));
   2051   DCHECK(ToRegister(instr->right()).is(r2));
   2052   DCHECK(ToRegister(instr->result()).is(r2));
   2053 
   2054   Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), instr->op()).code();
   2055   CallCode(code, RelocInfo::CODE_TARGET, instr);
   2056 }
   2057 
   2058 template <class InstrType>
   2059 void LCodeGen::EmitBranch(InstrType instr, Condition cond) {
   2060   int left_block = instr->TrueDestination(chunk_);
   2061   int right_block = instr->FalseDestination(chunk_);
   2062 
   2063   int next_block = GetNextEmittedBlock();
   2064 
   2065   if (right_block == left_block || cond == al) {
   2066     EmitGoto(left_block);
   2067   } else if (left_block == next_block) {
   2068     __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block));
   2069   } else if (right_block == next_block) {
   2070     __ b(cond, chunk_->GetAssemblyLabel(left_block));
   2071   } else {
   2072     __ b(cond, chunk_->GetAssemblyLabel(left_block));
   2073     __ b(chunk_->GetAssemblyLabel(right_block));
   2074   }
   2075 }
   2076 
   2077 template <class InstrType>
   2078 void LCodeGen::EmitTrueBranch(InstrType instr, Condition cond) {
   2079   int true_block = instr->TrueDestination(chunk_);
   2080   __ b(cond, chunk_->GetAssemblyLabel(true_block));
   2081 }
   2082 
   2083 template <class InstrType>
   2084 void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond) {
   2085   int false_block = instr->FalseDestination(chunk_);
   2086   __ b(cond, chunk_->GetAssemblyLabel(false_block));
   2087 }
   2088 
   2089 void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
   2090 
   2091 void LCodeGen::DoBranch(LBranch* instr) {
   2092   Representation r = instr->hydrogen()->value()->representation();
   2093   DoubleRegister dbl_scratch = double_scratch0();
   2094 
   2095   if (r.IsInteger32()) {
   2096     DCHECK(!info()->IsStub());
   2097     Register reg = ToRegister(instr->value());
   2098     __ Cmp32(reg, Operand::Zero());
   2099     EmitBranch(instr, ne);
   2100   } else if (r.IsSmi()) {
   2101     DCHECK(!info()->IsStub());
   2102     Register reg = ToRegister(instr->value());
   2103     __ CmpP(reg, Operand::Zero());
   2104     EmitBranch(instr, ne);
   2105   } else if (r.IsDouble()) {
   2106     DCHECK(!info()->IsStub());
   2107     DoubleRegister reg = ToDoubleRegister(instr->value());
   2108     __ lzdr(kDoubleRegZero);
   2109     __ cdbr(reg, kDoubleRegZero);
   2110     // Test the double value. Zero and NaN are false.
   2111     Condition lt_gt = static_cast<Condition>(lt | gt);
   2112 
   2113     EmitBranch(instr, lt_gt);
   2114   } else {
   2115     DCHECK(r.IsTagged());
   2116     Register reg = ToRegister(instr->value());
   2117     HType type = instr->hydrogen()->value()->type();
   2118     if (type.IsBoolean()) {
   2119       DCHECK(!info()->IsStub());
   2120       __ CompareRoot(reg, Heap::kTrueValueRootIndex);
   2121       EmitBranch(instr, eq);
   2122     } else if (type.IsSmi()) {
   2123       DCHECK(!info()->IsStub());
   2124       __ CmpP(reg, Operand::Zero());
   2125       EmitBranch(instr, ne);
   2126     } else if (type.IsJSArray()) {
   2127       DCHECK(!info()->IsStub());
   2128       EmitBranch(instr, al);
   2129     } else if (type.IsHeapNumber()) {
   2130       DCHECK(!info()->IsStub());
   2131       __ LoadDouble(dbl_scratch,
   2132                     FieldMemOperand(reg, HeapNumber::kValueOffset));
   2133       // Test the double value. Zero and NaN are false.
   2134       __ lzdr(kDoubleRegZero);
   2135       __ cdbr(dbl_scratch, kDoubleRegZero);
   2136       Condition lt_gt = static_cast<Condition>(lt | gt);
   2137       EmitBranch(instr, lt_gt);
   2138     } else if (type.IsString()) {
   2139       DCHECK(!info()->IsStub());
   2140       __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
   2141       __ CmpP(ip, Operand::Zero());
   2142       EmitBranch(instr, ne);
   2143     } else {
   2144       ToBooleanHints expected = instr->hydrogen()->expected_input_types();
   2145       // Avoid deopts in the case where we've never executed this path before.
   2146       if (expected == ToBooleanHint::kNone) expected = ToBooleanHint::kAny;
   2147 
   2148       if (expected & ToBooleanHint::kUndefined) {
   2149         // undefined -> false.
   2150         __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
   2151         __ beq(instr->FalseLabel(chunk_));
   2152       }
   2153       if (expected & ToBooleanHint::kBoolean) {
   2154         // Boolean -> its value.
   2155         __ CompareRoot(reg, Heap::kTrueValueRootIndex);
   2156         __ beq(instr->TrueLabel(chunk_));
   2157         __ CompareRoot(reg, Heap::kFalseValueRootIndex);
   2158         __ beq(instr->FalseLabel(chunk_));
   2159       }
   2160       if (expected & ToBooleanHint::kNull) {
   2161         // 'null' -> false.
   2162         __ CompareRoot(reg, Heap::kNullValueRootIndex);
   2163         __ beq(instr->FalseLabel(chunk_));
   2164       }
   2165 
   2166       if (expected & ToBooleanHint::kSmallInteger) {
   2167         // Smis: 0 -> false, all other -> true.
   2168         __ CmpP(reg, Operand::Zero());
   2169         __ beq(instr->FalseLabel(chunk_));
   2170         __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
   2171       } else if (expected & ToBooleanHint::kNeedsMap) {
   2172         // If we need a map later and have a Smi -> deopt.
   2173         __ TestIfSmi(reg);
   2174         DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
   2175       }
   2176 
   2177       const Register map = scratch0();
   2178       if (expected & ToBooleanHint::kNeedsMap) {
   2179         __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
   2180 
   2181         if (expected & ToBooleanHint::kCanBeUndetectable) {
   2182           // Undetectable -> false.
   2183           __ tm(FieldMemOperand(map, Map::kBitFieldOffset),
   2184                 Operand(1 << Map::kIsUndetectable));
   2185           __ bne(instr->FalseLabel(chunk_));
   2186         }
   2187       }
   2188 
   2189       if (expected & ToBooleanHint::kReceiver) {
   2190         // spec object -> true.
   2191         __ CompareInstanceType(map, ip, FIRST_JS_RECEIVER_TYPE);
   2192         __ bge(instr->TrueLabel(chunk_));
   2193       }
   2194 
   2195       if (expected & ToBooleanHint::kString) {
   2196         // String value -> false iff empty.
   2197         Label not_string;
   2198         __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
   2199         __ bge(&not_string, Label::kNear);
   2200         __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
   2201         __ CmpP(ip, Operand::Zero());
   2202         __ bne(instr->TrueLabel(chunk_));
   2203         __ b(instr->FalseLabel(chunk_));
   2204         __ bind(&not_string);
   2205       }
   2206 
   2207       if (expected & ToBooleanHint::kSymbol) {
   2208         // Symbol value -> true.
   2209         __ CompareInstanceType(map, ip, SYMBOL_TYPE);
   2210         __ beq(instr->TrueLabel(chunk_));
   2211       }
   2212 
   2213       if (expected & ToBooleanHint::kHeapNumber) {
   2214         // heap number -> false iff +0, -0, or NaN.
   2215         Label not_heap_number;
   2216         __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
   2217         __ bne(&not_heap_number, Label::kNear);
   2218         __ LoadDouble(dbl_scratch,
   2219                       FieldMemOperand(reg, HeapNumber::kValueOffset));
   2220         __ lzdr(kDoubleRegZero);
   2221         __ cdbr(dbl_scratch, kDoubleRegZero);
   2222         __ bunordered(instr->FalseLabel(chunk_));  // NaN -> false.
   2223         __ beq(instr->FalseLabel(chunk_));         // +0, -0 -> false.
   2224         __ b(instr->TrueLabel(chunk_));
   2225         __ bind(&not_heap_number);
   2226       }
   2227 
   2228       if (expected != ToBooleanHint::kAny) {
   2229         // We've seen something for the first time -> deopt.
   2230         // This can only happen if we are not generic already.
   2231         DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
   2232       }
   2233     }
   2234   }
   2235 }
   2236 
   2237 void LCodeGen::EmitGoto(int block) {
   2238   if (!IsNextEmittedBlock(block)) {
   2239     __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
   2240   }
   2241 }
   2242 
   2243 void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
   2244 
   2245 Condition LCodeGen::TokenToCondition(Token::Value op) {
   2246   Condition cond = kNoCondition;
   2247   switch (op) {
   2248     case Token::EQ:
   2249     case Token::EQ_STRICT:
   2250       cond = eq;
   2251       break;
   2252     case Token::NE:
   2253     case Token::NE_STRICT:
   2254       cond = ne;
   2255       break;
   2256     case Token::LT:
   2257       cond = lt;
   2258       break;
   2259     case Token::GT:
   2260       cond = gt;
   2261       break;
   2262     case Token::LTE:
   2263       cond = le;
   2264       break;
   2265     case Token::GTE:
   2266       cond = ge;
   2267       break;
   2268     case Token::IN:
   2269     case Token::INSTANCEOF:
   2270     default:
   2271       UNREACHABLE();
   2272   }
   2273   return cond;
   2274 }
   2275 
   2276 void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
   2277   LOperand* left = instr->left();
   2278   LOperand* right = instr->right();
   2279   bool is_unsigned =
   2280       instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
   2281       instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
   2282   Condition cond = TokenToCondition(instr->op());
   2283 
   2284   if (left->IsConstantOperand() && right->IsConstantOperand()) {
   2285     // We can statically evaluate the comparison.
   2286     double left_val = ToDouble(LConstantOperand::cast(left));
   2287     double right_val = ToDouble(LConstantOperand::cast(right));
   2288     int next_block = Token::EvalComparison(instr->op(), left_val, right_val)
   2289                          ? instr->TrueDestination(chunk_)
   2290                          : instr->FalseDestination(chunk_);
   2291     EmitGoto(next_block);
   2292   } else {
   2293     if (instr->is_double()) {
   2294       // Compare left and right operands as doubles and load the
   2295       // resulting flags into the normal status register.
   2296       __ cdbr(ToDoubleRegister(left), ToDoubleRegister(right));
   2297       // If a NaN is involved, i.e. the result is unordered,
   2298       // jump to false block label.
   2299       __ bunordered(instr->FalseLabel(chunk_));
   2300     } else {
   2301       if (right->IsConstantOperand()) {
   2302         int32_t value = ToInteger32(LConstantOperand::cast(right));
   2303         if (instr->hydrogen_value()->representation().IsSmi()) {
   2304           if (is_unsigned) {
   2305             __ CmpLogicalSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
   2306           } else {
   2307             __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
   2308           }
   2309         } else {
   2310           if (is_unsigned) {
   2311             __ CmpLogical32(ToRegister(left), ToOperand(right));
   2312           } else {
   2313             __ Cmp32(ToRegister(left), ToOperand(right));
   2314           }
   2315         }
   2316       } else if (left->IsConstantOperand()) {
   2317         int32_t value = ToInteger32(LConstantOperand::cast(left));
   2318         if (instr->hydrogen_value()->representation().IsSmi()) {
   2319           if (is_unsigned) {
   2320             __ CmpLogicalSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
   2321           } else {
   2322             __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
   2323           }
   2324         } else {
   2325           if (is_unsigned) {
   2326             __ CmpLogical32(ToRegister(right), ToOperand(left));
   2327           } else {
   2328             __ Cmp32(ToRegister(right), ToOperand(left));
   2329           }
   2330         }
   2331         // We commuted the operands, so commute the condition.
   2332         cond = CommuteCondition(cond);
   2333       } else if (instr->hydrogen_value()->representation().IsSmi()) {
   2334         if (is_unsigned) {
   2335           __ CmpLogicalP(ToRegister(left), ToRegister(right));
   2336         } else {
   2337           __ CmpP(ToRegister(left), ToRegister(right));
   2338         }
   2339       } else {
   2340         if (is_unsigned) {
   2341           __ CmpLogical32(ToRegister(left), ToRegister(right));
   2342         } else {
   2343           __ Cmp32(ToRegister(left), ToRegister(right));
   2344         }
   2345       }
   2346     }
   2347     EmitBranch(instr, cond);
   2348   }
   2349 }
   2350 
   2351 void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
   2352   Register left = ToRegister(instr->left());
   2353   Register right = ToRegister(instr->right());
   2354 
   2355   __ CmpP(left, right);
   2356   EmitBranch(instr, eq);
   2357 }
   2358 
   2359 void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
   2360   if (instr->hydrogen()->representation().IsTagged()) {
   2361     Register input_reg = ToRegister(instr->object());
   2362     __ CmpP(input_reg, Operand(factory()->the_hole_value()));
   2363     EmitBranch(instr, eq);
   2364     return;
   2365   }
   2366 
   2367   DoubleRegister input_reg = ToDoubleRegister(instr->object());
   2368   __ cdbr(input_reg, input_reg);
   2369   EmitFalseBranch(instr, ordered);
   2370 
   2371   Register scratch = scratch0();
   2372   // Convert to GPR and examine the upper 32 bits
   2373   __ lgdr(scratch, input_reg);
   2374   __ srlg(scratch, scratch, Operand(32));
   2375   __ Cmp32(scratch, Operand(kHoleNanUpper32));
   2376   EmitBranch(instr, eq);
   2377 }
   2378 
   2379 Condition LCodeGen::EmitIsString(Register input, Register temp1,
   2380                                  Label* is_not_string,
   2381                                  SmiCheck check_needed = INLINE_SMI_CHECK) {
   2382   if (check_needed == INLINE_SMI_CHECK) {
   2383     __ JumpIfSmi(input, is_not_string);
   2384   }
   2385   __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
   2386 
   2387   return lt;
   2388 }
   2389 
   2390 void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
   2391   Register reg = ToRegister(instr->value());
   2392   Register temp1 = ToRegister(instr->temp());
   2393 
   2394   SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
   2395                               ? OMIT_SMI_CHECK
   2396                               : INLINE_SMI_CHECK;
   2397   Condition true_cond =
   2398       EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
   2399 
   2400   EmitBranch(instr, true_cond);
   2401 }
   2402 
   2403 void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
   2404   Register input_reg = EmitLoadRegister(instr->value(), ip);
   2405   __ TestIfSmi(input_reg);
   2406   EmitBranch(instr, eq);
   2407 }
   2408 
   2409 void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
   2410   Register input = ToRegister(instr->value());
   2411   Register temp = ToRegister(instr->temp());
   2412 
   2413   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2414     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2415   }
   2416   __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
   2417   __ tm(FieldMemOperand(temp, Map::kBitFieldOffset),
   2418         Operand(1 << Map::kIsUndetectable));
   2419   EmitBranch(instr, ne);
   2420 }
   2421 
   2422 static Condition ComputeCompareCondition(Token::Value op) {
   2423   switch (op) {
   2424     case Token::EQ_STRICT:
   2425     case Token::EQ:
   2426       return eq;
   2427     case Token::LT:
   2428       return lt;
   2429     case Token::GT:
   2430       return gt;
   2431     case Token::LTE:
   2432       return le;
   2433     case Token::GTE:
   2434       return ge;
   2435     default:
   2436       UNREACHABLE();
   2437       return kNoCondition;
   2438   }
   2439 }
   2440 
   2441 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   2442   DCHECK(ToRegister(instr->context()).is(cp));
   2443   DCHECK(ToRegister(instr->left()).is(r3));
   2444   DCHECK(ToRegister(instr->right()).is(r2));
   2445 
   2446   Handle<Code> code = CodeFactory::StringCompare(isolate(), instr->op()).code();
   2447   CallCode(code, RelocInfo::CODE_TARGET, instr);
   2448   __ CompareRoot(r2, Heap::kTrueValueRootIndex);
   2449   EmitBranch(instr, eq);
   2450 }
   2451 
   2452 static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
   2453   InstanceType from = instr->from();
   2454   InstanceType to = instr->to();
   2455   if (from == FIRST_TYPE) return to;
   2456   DCHECK(from == to || to == LAST_TYPE);
   2457   return from;
   2458 }
   2459 
   2460 static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
   2461   InstanceType from = instr->from();
   2462   InstanceType to = instr->to();
   2463   if (from == to) return eq;
   2464   if (to == LAST_TYPE) return ge;
   2465   if (from == FIRST_TYPE) return le;
   2466   UNREACHABLE();
   2467   return eq;
   2468 }
   2469 
   2470 void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
   2471   Register scratch = scratch0();
   2472   Register input = ToRegister(instr->value());
   2473 
   2474   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   2475     __ JumpIfSmi(input, instr->FalseLabel(chunk_));
   2476   }
   2477 
   2478   __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
   2479   EmitBranch(instr, BranchCondition(instr->hydrogen()));
   2480 }
   2481 
   2482 // Branches to a label or falls through with the answer in flags.  Trashes
   2483 // the temp registers, but not the input.
   2484 void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
   2485                                Handle<String> class_name, Register input,
   2486                                Register temp, Register temp2) {
   2487   DCHECK(!input.is(temp));
   2488   DCHECK(!input.is(temp2));
   2489   DCHECK(!temp.is(temp2));
   2490 
   2491   __ JumpIfSmi(input, is_false);
   2492 
   2493   __ CompareObjectType(input, temp, temp2, FIRST_FUNCTION_TYPE);
   2494   STATIC_ASSERT(LAST_FUNCTION_TYPE == LAST_TYPE);
   2495   if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
   2496     __ bge(is_true);
   2497   } else {
   2498     __ bge(is_false);
   2499   }
   2500 
   2501   // Check if the constructor in the map is a function.
   2502   Register instance_type = ip;
   2503   __ GetMapConstructor(temp, temp, temp2, instance_type);
   2504 
   2505   // Objects with a non-function constructor have class 'Object'.
   2506   __ CmpP(instance_type, Operand(JS_FUNCTION_TYPE));
   2507   if (String::Equals(isolate()->factory()->Object_string(), class_name)) {
   2508     __ bne(is_true);
   2509   } else {
   2510     __ bne(is_false);
   2511   }
   2512 
   2513   // temp now contains the constructor function. Grab the
   2514   // instance class name from there.
   2515   __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
   2516   __ LoadP(temp,
   2517            FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
   2518   // The class name we are testing against is internalized since it's a literal.
   2519   // The name in the constructor is internalized because of the way the context
   2520   // is booted.  This routine isn't expected to work for random API-created
   2521   // classes and it doesn't have to because you can't access it with natives
   2522   // syntax.  Since both sides are internalized it is sufficient to use an
   2523   // identity comparison.
   2524   __ CmpP(temp, Operand(class_name));
   2525   // End with the answer in flags.
   2526 }
   2527 
   2528 void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
   2529   Register input = ToRegister(instr->value());
   2530   Register temp = scratch0();
   2531   Register temp2 = ToRegister(instr->temp());
   2532   Handle<String> class_name = instr->hydrogen()->class_name();
   2533 
   2534   EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
   2535                   class_name, input, temp, temp2);
   2536 
   2537   EmitBranch(instr, eq);
   2538 }
   2539 
   2540 void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
   2541   Register reg = ToRegister(instr->value());
   2542   Register temp = ToRegister(instr->temp());
   2543 
   2544   __ mov(temp, Operand(instr->map()));
   2545   __ CmpP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
   2546   EmitBranch(instr, eq);
   2547 }
   2548 
   2549 void LCodeGen::DoHasInPrototypeChainAndBranch(
   2550     LHasInPrototypeChainAndBranch* instr) {
   2551   Register const object = ToRegister(instr->object());
   2552   Register const object_map = scratch0();
   2553   Register const object_instance_type = ip;
   2554   Register const object_prototype = object_map;
   2555   Register const prototype = ToRegister(instr->prototype());
   2556 
   2557   // The {object} must be a spec object.  It's sufficient to know that {object}
   2558   // is not a smi, since all other non-spec objects have {null} prototypes and
   2559   // will be ruled out below.
   2560   if (instr->hydrogen()->ObjectNeedsSmiCheck()) {
   2561     __ TestIfSmi(object);
   2562     EmitFalseBranch(instr, eq);
   2563   }
   2564   // Loop through the {object}s prototype chain looking for the {prototype}.
   2565   __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
   2566   Label loop;
   2567   __ bind(&loop);
   2568 
   2569   // Deoptimize if the object needs to be access checked.
   2570   __ LoadlB(object_instance_type,
   2571             FieldMemOperand(object_map, Map::kBitFieldOffset));
   2572   __ TestBit(object_instance_type, Map::kIsAccessCheckNeeded, r0);
   2573   DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, cr0);
   2574   // Deoptimize for proxies.
   2575   __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
   2576   DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
   2577   __ LoadP(object_prototype,
   2578            FieldMemOperand(object_map, Map::kPrototypeOffset));
   2579   __ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
   2580   EmitFalseBranch(instr, eq);
   2581   __ CmpP(object_prototype, prototype);
   2582   EmitTrueBranch(instr, eq);
   2583   __ LoadP(object_map,
   2584            FieldMemOperand(object_prototype, HeapObject::kMapOffset));
   2585   __ b(&loop);
   2586 }
   2587 
   2588 void LCodeGen::DoCmpT(LCmpT* instr) {
   2589   DCHECK(ToRegister(instr->context()).is(cp));
   2590   Token::Value op = instr->op();
   2591 
   2592   Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   2593   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   2594   // This instruction also signals no smi code inlined
   2595   __ CmpP(r2, Operand::Zero());
   2596 
   2597   Condition condition = ComputeCompareCondition(op);
   2598   Label true_value, done;
   2599 
   2600   __ b(condition, &true_value, Label::kNear);
   2601 
   2602   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
   2603   __ b(&done, Label::kNear);
   2604 
   2605   __ bind(&true_value);
   2606   __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
   2607 
   2608   __ bind(&done);
   2609 }
   2610 
   2611 void LCodeGen::DoReturn(LReturn* instr) {
   2612   if (FLAG_trace && info()->IsOptimizing()) {
   2613     // Push the return value on the stack as the parameter.
   2614     // Runtime::TraceExit returns its parameter in r2.  We're leaving the code
   2615     // managed by the register allocator and tearing down the frame, it's
   2616     // safe to write to the context register.
   2617     __ push(r2);
   2618     __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
   2619     __ CallRuntime(Runtime::kTraceExit);
   2620   }
   2621   if (info()->saves_caller_doubles()) {
   2622     RestoreCallerDoubles();
   2623   }
   2624   if (instr->has_constant_parameter_count()) {
   2625     int parameter_count = ToInteger32(instr->constant_parameter_count());
   2626     int32_t sp_delta = (parameter_count + 1) * kPointerSize;
   2627     if (NeedsEagerFrame()) {
   2628       masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
   2629     } else if (sp_delta != 0) {
   2630       // TODO(joransiu): Clean this up into Macro Assembler
   2631       if (sp_delta >= 0 && sp_delta < 4096)
   2632         __ la(sp, MemOperand(sp, sp_delta));
   2633       else
   2634         __ lay(sp, MemOperand(sp, sp_delta));
   2635     }
   2636   } else {
   2637     DCHECK(info()->IsStub());  // Functions would need to drop one more value.
   2638     Register reg = ToRegister(instr->parameter_count());
   2639     // The argument count parameter is a smi
   2640     if (NeedsEagerFrame()) {
   2641       masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
   2642     }
   2643     __ SmiToPtrArrayOffset(r0, reg);
   2644     __ AddP(sp, sp, r0);
   2645   }
   2646 
   2647   __ Ret();
   2648 }
   2649 
   2650 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
   2651   Register context = ToRegister(instr->context());
   2652   Register result = ToRegister(instr->result());
   2653   __ LoadP(result, ContextMemOperand(context, instr->slot_index()));
   2654   if (instr->hydrogen()->RequiresHoleCheck()) {
   2655     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
   2656     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2657       DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
   2658     } else {
   2659       Label skip;
   2660       __ bne(&skip, Label::kNear);
   2661       __ mov(result, Operand(factory()->undefined_value()));
   2662       __ bind(&skip);
   2663     }
   2664   }
   2665 }
   2666 
   2667 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
   2668   Register context = ToRegister(instr->context());
   2669   Register value = ToRegister(instr->value());
   2670   Register scratch = scratch0();
   2671   MemOperand target = ContextMemOperand(context, instr->slot_index());
   2672 
   2673   Label skip_assignment;
   2674 
   2675   if (instr->hydrogen()->RequiresHoleCheck()) {
   2676     __ LoadP(scratch, target);
   2677     __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
   2678     if (instr->hydrogen()->DeoptimizesOnHole()) {
   2679       DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
   2680     } else {
   2681       __ bne(&skip_assignment);
   2682     }
   2683   }
   2684 
   2685   __ StoreP(value, target);
   2686   if (instr->hydrogen()->NeedsWriteBarrier()) {
   2687     SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
   2688                                 ? OMIT_SMI_CHECK
   2689                                 : INLINE_SMI_CHECK;
   2690     __ RecordWriteContextSlot(context, target.offset(), value, scratch,
   2691                               GetLinkRegisterState(), kSaveFPRegs,
   2692                               EMIT_REMEMBERED_SET, check_needed);
   2693   }
   2694 
   2695   __ bind(&skip_assignment);
   2696 }
   2697 
   2698 void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   2699   HObjectAccess access = instr->hydrogen()->access();
   2700   int offset = access.offset();
   2701   Register object = ToRegister(instr->object());
   2702 
   2703   if (access.IsExternalMemory()) {
   2704     Register result = ToRegister(instr->result());
   2705     MemOperand operand = MemOperand(object, offset);
   2706     __ LoadRepresentation(result, operand, access.representation(), r0);
   2707     return;
   2708   }
   2709 
   2710   if (instr->hydrogen()->representation().IsDouble()) {
   2711     DCHECK(access.IsInobject());
   2712     DoubleRegister result = ToDoubleRegister(instr->result());
   2713     __ LoadDouble(result, FieldMemOperand(object, offset));
   2714     return;
   2715   }
   2716 
   2717   Register result = ToRegister(instr->result());
   2718   if (!access.IsInobject()) {
   2719     __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   2720     object = result;
   2721   }
   2722 
   2723   Representation representation = access.representation();
   2724 
   2725 #if V8_TARGET_ARCH_S390X
   2726   // 64-bit Smi optimization
   2727   if (representation.IsSmi() &&
   2728       instr->hydrogen()->representation().IsInteger32()) {
   2729     // Read int value directly from upper half of the smi.
   2730     offset = SmiWordOffset(offset);
   2731     representation = Representation::Integer32();
   2732   }
   2733 #endif
   2734 
   2735   __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
   2736                         r0);
   2737 }
   2738 
   2739 void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
   2740   Register scratch = scratch0();
   2741   Register function = ToRegister(instr->function());
   2742   Register result = ToRegister(instr->result());
   2743 
   2744   // Get the prototype or initial map from the function.
   2745   __ LoadP(result,
   2746            FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
   2747 
   2748   // Check that the function has a prototype or an initial map.
   2749   __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
   2750   DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
   2751 
   2752   // If the function does not have an initial map, we're done.
   2753   Label done;
   2754   __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
   2755   __ bne(&done, Label::kNear);
   2756 
   2757   // Get the prototype from the initial map.
   2758   __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
   2759 
   2760   // All done.
   2761   __ bind(&done);
   2762 }
   2763 
   2764 void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
   2765   Register result = ToRegister(instr->result());
   2766   __ LoadRoot(result, instr->index());
   2767 }
   2768 
   2769 void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
   2770   Register arguments = ToRegister(instr->arguments());
   2771   Register result = ToRegister(instr->result());
   2772   // There are two words between the frame pointer and the last argument.
   2773   // Subtracting from length accounts for one of them add one more.
   2774   if (instr->length()->IsConstantOperand()) {
   2775     int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
   2776     if (instr->index()->IsConstantOperand()) {
   2777       int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   2778       int index = (const_length - const_index) + 1;
   2779       __ LoadP(result, MemOperand(arguments, index * kPointerSize));
   2780     } else {
   2781       Register index = ToRegister(instr->index());
   2782       __ SubP(result, index, Operand(const_length + 1));
   2783       __ LoadComplementRR(result, result);
   2784       __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
   2785       __ LoadP(result, MemOperand(arguments, result));
   2786     }
   2787   } else if (instr->index()->IsConstantOperand()) {
   2788     Register length = ToRegister(instr->length());
   2789     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   2790     int loc = const_index - 1;
   2791     if (loc != 0) {
   2792       __ SubP(result, length, Operand(loc));
   2793       __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
   2794       __ LoadP(result, MemOperand(arguments, result));
   2795     } else {
   2796       __ ShiftLeftP(result, length, Operand(kPointerSizeLog2));
   2797       __ LoadP(result, MemOperand(arguments, result));
   2798     }
   2799   } else {
   2800     Register length = ToRegister(instr->length());
   2801     Register index = ToRegister(instr->index());
   2802     __ SubP(result, length, index);
   2803     __ AddP(result, result, Operand(1));
   2804     __ ShiftLeftP(result, result, Operand(kPointerSizeLog2));
   2805     __ LoadP(result, MemOperand(arguments, result));
   2806   }
   2807 }
   2808 
   2809 void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
   2810   Register external_pointer = ToRegister(instr->elements());
   2811   Register key = no_reg;
   2812   ElementsKind elements_kind = instr->elements_kind();
   2813   bool key_is_constant = instr->key()->IsConstantOperand();
   2814   int constant_key = 0;
   2815   if (key_is_constant) {
   2816     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   2817     if (constant_key & 0xF0000000) {
   2818       Abort(kArrayIndexConstantValueTooBig);
   2819     }
   2820   } else {
   2821     key = ToRegister(instr->key());
   2822   }
   2823   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   2824   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
   2825   bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
   2826   int base_offset = instr->base_offset();
   2827   bool use_scratch = false;
   2828 
   2829   if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
   2830     DoubleRegister result = ToDoubleRegister(instr->result());
   2831     if (key_is_constant) {
   2832       base_offset += constant_key << element_size_shift;
   2833       if (!is_int20(base_offset)) {
   2834         __ mov(scratch0(), Operand(base_offset));
   2835         base_offset = 0;
   2836         use_scratch = true;
   2837       }
   2838     } else {
   2839       __ IndexToArrayOffset(scratch0(), key, element_size_shift, key_is_smi,
   2840                             keyMaybeNegative);
   2841       use_scratch = true;
   2842     }
   2843     if (elements_kind == FLOAT32_ELEMENTS) {
   2844       if (!use_scratch) {
   2845         __ ldeb(result, MemOperand(external_pointer, base_offset));
   2846       } else {
   2847         __ ldeb(result, MemOperand(scratch0(), external_pointer, base_offset));
   2848       }
   2849     } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
   2850       if (!use_scratch) {
   2851         __ LoadDouble(result, MemOperand(external_pointer, base_offset));
   2852       } else {
   2853         __ LoadDouble(result,
   2854                       MemOperand(scratch0(), external_pointer, base_offset));
   2855       }
   2856     }
   2857   } else {
   2858     Register result = ToRegister(instr->result());
   2859     MemOperand mem_operand =
   2860         PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
   2861                             constant_key, element_size_shift, base_offset,
   2862                             keyMaybeNegative);
   2863     switch (elements_kind) {
   2864       case INT8_ELEMENTS:
   2865         __ LoadB(result, mem_operand);
   2866         break;
   2867       case UINT8_ELEMENTS:
   2868       case UINT8_CLAMPED_ELEMENTS:
   2869         __ LoadlB(result, mem_operand);
   2870         break;
   2871       case INT16_ELEMENTS:
   2872         __ LoadHalfWordP(result, mem_operand);
   2873         break;
   2874       case UINT16_ELEMENTS:
   2875         __ LoadLogicalHalfWordP(result, mem_operand);
   2876         break;
   2877       case INT32_ELEMENTS:
   2878         __ LoadW(result, mem_operand, r0);
   2879         break;
   2880       case UINT32_ELEMENTS:
   2881         __ LoadlW(result, mem_operand, r0);
   2882         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
   2883           __ CmpLogical32(result, Operand(0x80000000));
   2884           DeoptimizeIf(ge, instr, DeoptimizeReason::kNegativeValue);
   2885         }
   2886         break;
   2887       case FLOAT32_ELEMENTS:
   2888       case FLOAT64_ELEMENTS:
   2889       case FAST_HOLEY_DOUBLE_ELEMENTS:
   2890       case FAST_HOLEY_ELEMENTS:
   2891       case FAST_HOLEY_SMI_ELEMENTS:
   2892       case FAST_DOUBLE_ELEMENTS:
   2893       case FAST_ELEMENTS:
   2894       case FAST_SMI_ELEMENTS:
   2895       case DICTIONARY_ELEMENTS:
   2896       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
   2897       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
   2898       case FAST_STRING_WRAPPER_ELEMENTS:
   2899       case SLOW_STRING_WRAPPER_ELEMENTS:
   2900       case NO_ELEMENTS:
   2901         UNREACHABLE();
   2902         break;
   2903     }
   2904   }
   2905 }
   2906 
   2907 void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
   2908   Register elements = ToRegister(instr->elements());
   2909   bool key_is_constant = instr->key()->IsConstantOperand();
   2910   Register key = no_reg;
   2911   DoubleRegister result = ToDoubleRegister(instr->result());
   2912   Register scratch = scratch0();
   2913 
   2914   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   2915   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
   2916   bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
   2917   int constant_key = 0;
   2918   if (key_is_constant) {
   2919     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   2920     if (constant_key & 0xF0000000) {
   2921       Abort(kArrayIndexConstantValueTooBig);
   2922     }
   2923   } else {
   2924     key = ToRegister(instr->key());
   2925   }
   2926 
   2927   bool use_scratch = false;
   2928   intptr_t base_offset = instr->base_offset() + constant_key * kDoubleSize;
   2929   if (!key_is_constant) {
   2930     use_scratch = true;
   2931     __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi,
   2932                           keyMaybeNegative);
   2933   }
   2934 
   2935   // Memory references support up to 20-bits signed displacement in RXY form
   2936   // Include Register::kExponentOffset in check, so we are guaranteed not to
   2937   // overflow displacement later.
   2938   if (!is_int20(base_offset + Register::kExponentOffset)) {
   2939     use_scratch = true;
   2940     if (key_is_constant) {
   2941       __ mov(scratch, Operand(base_offset));
   2942     } else {
   2943       __ AddP(scratch, Operand(base_offset));
   2944     }
   2945     base_offset = 0;
   2946   }
   2947 
   2948   if (!use_scratch) {
   2949     __ LoadDouble(result, MemOperand(elements, base_offset));
   2950   } else {
   2951     __ LoadDouble(result, MemOperand(scratch, elements, base_offset));
   2952   }
   2953 
   2954   if (instr->hydrogen()->RequiresHoleCheck()) {
   2955     if (!use_scratch) {
   2956       __ LoadlW(r0,
   2957                 MemOperand(elements, base_offset + Register::kExponentOffset));
   2958     } else {
   2959       __ LoadlW(r0, MemOperand(scratch, elements,
   2960                                base_offset + Register::kExponentOffset));
   2961     }
   2962     __ Cmp32(r0, Operand(kHoleNanUpper32));
   2963     DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
   2964   }
   2965 }
   2966 
   2967 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
   2968   HLoadKeyed* hinstr = instr->hydrogen();
   2969   Register elements = ToRegister(instr->elements());
   2970   Register result = ToRegister(instr->result());
   2971   Register scratch = scratch0();
   2972   int offset = instr->base_offset();
   2973 
   2974   if (instr->key()->IsConstantOperand()) {
   2975     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   2976     offset += ToInteger32(const_operand) * kPointerSize;
   2977   } else {
   2978     Register key = ToRegister(instr->key());
   2979     // Even though the HLoadKeyed instruction forces the input
   2980     // representation for the key to be an integer, the input gets replaced
   2981     // during bound check elimination with the index argument to the bounds
   2982     // check, which can be tagged, so that case must be handled here, too.
   2983     if (hinstr->key()->representation().IsSmi()) {
   2984       __ SmiToPtrArrayOffset(scratch, key);
   2985     } else {
   2986       __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2));
   2987     }
   2988   }
   2989 
   2990   bool requires_hole_check = hinstr->RequiresHoleCheck();
   2991   Representation representation = hinstr->representation();
   2992 
   2993 #if V8_TARGET_ARCH_S390X
   2994   // 64-bit Smi optimization
   2995   if (representation.IsInteger32() &&
   2996       hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
   2997     DCHECK(!requires_hole_check);
   2998     // Read int value directly from upper half of the smi.
   2999     offset = SmiWordOffset(offset);
   3000   }
   3001 #endif
   3002 
   3003   if (instr->key()->IsConstantOperand()) {
   3004     __ LoadRepresentation(result, MemOperand(elements, offset), representation,
   3005                           r1);
   3006   } else {
   3007     __ LoadRepresentation(result, MemOperand(scratch, elements, offset),
   3008                           representation, r1);
   3009   }
   3010 
   3011   // Check for the hole value.
   3012   if (requires_hole_check) {
   3013     if (IsFastSmiElementsKind(hinstr->elements_kind())) {
   3014       __ TestIfSmi(result);
   3015       DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
   3016     } else {
   3017       __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
   3018       DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
   3019     }
   3020   } else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
   3021     DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
   3022     Label done;
   3023     __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
   3024     __ CmpP(result, scratch);
   3025     __ bne(&done);
   3026     if (info()->IsStub()) {
   3027       // A stub can safely convert the hole to undefined only if the array
   3028       // protector cell contains (Smi) Isolate::kProtectorValid. Otherwise
   3029       // it needs to bail out.
   3030       __ LoadRoot(result, Heap::kArrayProtectorRootIndex);
   3031       __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset));
   3032       __ CmpSmiLiteral(result, Smi::FromInt(Isolate::kProtectorValid), r0);
   3033       DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
   3034     }
   3035     __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
   3036     __ bind(&done);
   3037   }
   3038 }
   3039 
   3040 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
   3041   if (instr->is_fixed_typed_array()) {
   3042     DoLoadKeyedExternalArray(instr);
   3043   } else if (instr->hydrogen()->representation().IsDouble()) {
   3044     DoLoadKeyedFixedDoubleArray(instr);
   3045   } else {
   3046     DoLoadKeyedFixedArray(instr);
   3047   }
   3048 }
   3049 
   3050 MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
   3051                                          bool key_is_constant, bool key_is_smi,
   3052                                          int constant_key,
   3053                                          int element_size_shift,
   3054                                          int base_offset,
   3055                                          bool keyMaybeNegative) {
   3056   Register scratch = scratch0();
   3057 
   3058   if (key_is_constant) {
   3059     int offset = (base_offset + (constant_key << element_size_shift));
   3060     if (!is_int20(offset)) {
   3061       __ mov(scratch, Operand(offset));
   3062       return MemOperand(base, scratch);
   3063     } else {
   3064       return MemOperand(base,
   3065                         (constant_key << element_size_shift) + base_offset);
   3066     }
   3067   }
   3068 
   3069   bool needs_shift =
   3070       (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
   3071 
   3072   if (needs_shift) {
   3073     __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi,
   3074                           keyMaybeNegative);
   3075   } else {
   3076     scratch = key;
   3077   }
   3078 
   3079   if (!is_int20(base_offset)) {
   3080     __ AddP(scratch, Operand(base_offset));
   3081     base_offset = 0;
   3082   }
   3083   return MemOperand(scratch, base, base_offset);
   3084 }
   3085 
   3086 void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
   3087   Register scratch = scratch0();
   3088   Register result = ToRegister(instr->result());
   3089 
   3090   if (instr->hydrogen()->from_inlined()) {
   3091     __ lay(result, MemOperand(sp, -2 * kPointerSize));
   3092   } else if (instr->hydrogen()->arguments_adaptor()) {
   3093     // Check if the calling frame is an arguments adaptor frame.
   3094     Label done, adapted;
   3095     __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3096     __ LoadP(
   3097         result,
   3098         MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
   3099     __ CmpP(result,
   3100             Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   3101 
   3102     // Result is the frame pointer for the frame if not adapted and for the real
   3103     // frame below the adaptor frame if adapted.
   3104     __ beq(&adapted, Label::kNear);
   3105     __ LoadRR(result, fp);
   3106     __ b(&done, Label::kNear);
   3107 
   3108     __ bind(&adapted);
   3109     __ LoadRR(result, scratch);
   3110     __ bind(&done);
   3111   } else {
   3112     __ LoadRR(result, fp);
   3113   }
   3114 }
   3115 
   3116 void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
   3117   Register elem = ToRegister(instr->elements());
   3118   Register result = ToRegister(instr->result());
   3119 
   3120   Label done;
   3121 
   3122   // If no arguments adaptor frame the number of arguments is fixed.
   3123   __ CmpP(fp, elem);
   3124   __ mov(result, Operand(scope()->num_parameters()));
   3125   __ beq(&done, Label::kNear);
   3126 
   3127   // Arguments adaptor frame present. Get argument length from there.
   3128   __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3129   __ LoadP(result,
   3130            MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
   3131   __ SmiUntag(result);
   3132 
   3133   // Argument length is in result register.
   3134   __ bind(&done);
   3135 }
   3136 
   3137 void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
   3138   Register receiver = ToRegister(instr->receiver());
   3139   Register function = ToRegister(instr->function());
   3140   Register result = ToRegister(instr->result());
   3141   Register scratch = scratch0();
   3142 
   3143   // If the receiver is null or undefined, we have to pass the global
   3144   // object as a receiver to normal functions. Values have to be
   3145   // passed unchanged to builtins and strict-mode functions.
   3146   Label global_object, result_in_receiver;
   3147 
   3148   if (!instr->hydrogen()->known_function()) {
   3149     // Do not transform the receiver to object for strict mode
   3150     // functions or builtins.
   3151     __ LoadP(scratch,
   3152              FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
   3153     __ LoadlW(scratch, FieldMemOperand(
   3154                            scratch, SharedFunctionInfo::kCompilerHintsOffset));
   3155     __ AndP(r0, scratch, Operand((1 << SharedFunctionInfo::kStrictModeBit) |
   3156                                  (1 << SharedFunctionInfo::kNativeBit)));
   3157     __ bne(&result_in_receiver, Label::kNear);
   3158   }
   3159 
   3160   // Normal function. Replace undefined or null with global receiver.
   3161   __ CompareRoot(receiver, Heap::kNullValueRootIndex);
   3162   __ beq(&global_object, Label::kNear);
   3163   __ CompareRoot(receiver, Heap::kUndefinedValueRootIndex);
   3164   __ beq(&global_object, Label::kNear);
   3165 
   3166   // Deoptimize if the receiver is not a JS object.
   3167   __ TestIfSmi(receiver);
   3168   DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
   3169   __ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
   3170   DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject);
   3171 
   3172   __ b(&result_in_receiver, Label::kNear);
   3173   __ bind(&global_object);
   3174   __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
   3175   __ LoadP(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
   3176   __ LoadP(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
   3177 
   3178   if (result.is(receiver)) {
   3179     __ bind(&result_in_receiver);
   3180   } else {
   3181     Label result_ok;
   3182     __ b(&result_ok, Label::kNear);
   3183     __ bind(&result_in_receiver);
   3184     __ LoadRR(result, receiver);
   3185     __ bind(&result_ok);
   3186   }
   3187 }
   3188 
   3189 void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
   3190   Register receiver = ToRegister(instr->receiver());
   3191   Register function = ToRegister(instr->function());
   3192   Register length = ToRegister(instr->length());
   3193   Register elements = ToRegister(instr->elements());
   3194   Register scratch = scratch0();
   3195   DCHECK(receiver.is(r2));  // Used for parameter count.
   3196   DCHECK(function.is(r3));  // Required by InvokeFunction.
   3197   DCHECK(ToRegister(instr->result()).is(r2));
   3198 
   3199   // Copy the arguments to this function possibly from the
   3200   // adaptor frame below it.
   3201   const uint32_t kArgumentsLimit = 1 * KB;
   3202   __ CmpLogicalP(length, Operand(kArgumentsLimit));
   3203   DeoptimizeIf(gt, instr, DeoptimizeReason::kTooManyArguments);
   3204 
   3205   // Push the receiver and use the register to keep the original
   3206   // number of arguments.
   3207   __ push(receiver);
   3208   __ LoadRR(receiver, length);
   3209   // The arguments are at a one pointer size offset from elements.
   3210   __ AddP(elements, Operand(1 * kPointerSize));
   3211 
   3212   // Loop through the arguments pushing them onto the execution
   3213   // stack.
   3214   Label invoke, loop;
   3215   // length is a small non-negative integer, due to the test above.
   3216   __ CmpP(length, Operand::Zero());
   3217   __ beq(&invoke, Label::kNear);
   3218   __ bind(&loop);
   3219   __ ShiftLeftP(r1, length, Operand(kPointerSizeLog2));
   3220   __ LoadP(scratch, MemOperand(elements, r1));
   3221   __ push(scratch);
   3222   __ BranchOnCount(length, &loop);
   3223 
   3224   __ bind(&invoke);
   3225 
   3226   InvokeFlag flag = CALL_FUNCTION;
   3227   if (instr->hydrogen()->tail_call_mode() == TailCallMode::kAllow) {
   3228     DCHECK(!info()->saves_caller_doubles());
   3229     // TODO(ishell): drop current frame before pushing arguments to the stack.
   3230     flag = JUMP_FUNCTION;
   3231     ParameterCount actual(r2);
   3232     // It is safe to use r5, r6 and r7 as scratch registers here given that
   3233     // 1) we are not going to return to caller function anyway,
   3234     // 2) r5 (new.target) will be initialized below.
   3235     PrepareForTailCall(actual, r5, r6, r7);
   3236   }
   3237 
   3238   DCHECK(instr->HasPointerMap());
   3239   LPointerMap* pointers = instr->pointer_map();
   3240   SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
   3241   // The number of arguments is stored in receiver which is r2, as expected
   3242   // by InvokeFunction.
   3243   ParameterCount actual(receiver);
   3244   __ InvokeFunction(function, no_reg, actual, flag, safepoint_generator);
   3245 }
   3246 
   3247 void LCodeGen::DoPushArgument(LPushArgument* instr) {
   3248   LOperand* argument = instr->value();
   3249   if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
   3250     Abort(kDoPushArgumentNotImplementedForDoubleType);
   3251   } else {
   3252     Register argument_reg = EmitLoadRegister(argument, ip);
   3253     __ push(argument_reg);
   3254   }
   3255 }
   3256 
   3257 void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
   3258 
   3259 void LCodeGen::DoThisFunction(LThisFunction* instr) {
   3260   Register result = ToRegister(instr->result());
   3261   __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   3262 }
   3263 
   3264 void LCodeGen::DoContext(LContext* instr) {
   3265   // If there is a non-return use, the context must be moved to a register.
   3266   Register result = ToRegister(instr->result());
   3267   if (info()->IsOptimizing()) {
   3268     __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
   3269   } else {
   3270     // If there is no frame, the context must be in cp.
   3271     DCHECK(result.is(cp));
   3272   }
   3273 }
   3274 
   3275 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   3276   DCHECK(ToRegister(instr->context()).is(cp));
   3277   __ Move(scratch0(), instr->hydrogen()->declarations());
   3278   __ push(scratch0());
   3279   __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
   3280   __ push(scratch0());
   3281   __ Move(scratch0(), instr->hydrogen()->feedback_vector());
   3282   __ push(scratch0());
   3283   CallRuntime(Runtime::kDeclareGlobals, instr);
   3284 }
   3285 
   3286 void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
   3287                                  int formal_parameter_count, int arity,
   3288                                  bool is_tail_call, LInstruction* instr) {
   3289   bool dont_adapt_arguments =
   3290       formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
   3291   bool can_invoke_directly =
   3292       dont_adapt_arguments || formal_parameter_count == arity;
   3293 
   3294   Register function_reg = r3;
   3295 
   3296   LPointerMap* pointers = instr->pointer_map();
   3297 
   3298   if (can_invoke_directly) {
   3299     // Change context.
   3300     __ LoadP(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
   3301 
   3302     // Always initialize new target and number of actual arguments.
   3303     __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
   3304     __ mov(r2, Operand(arity));
   3305 
   3306     bool is_self_call = function.is_identical_to(info()->closure());
   3307 
   3308     // Invoke function.
   3309     if (is_self_call) {
   3310       Handle<Code> self(reinterpret_cast<Code**>(__ CodeObject().location()));
   3311       if (is_tail_call) {
   3312         __ Jump(self, RelocInfo::CODE_TARGET);
   3313       } else {
   3314         __ Call(self, RelocInfo::CODE_TARGET);
   3315       }
   3316     } else {
   3317       __ LoadP(ip, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
   3318       if (is_tail_call) {
   3319         __ JumpToJSEntry(ip);
   3320       } else {
   3321         __ CallJSEntry(ip);
   3322       }
   3323     }
   3324 
   3325     if (!is_tail_call) {
   3326       // Set up deoptimization.
   3327       RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
   3328     }
   3329   } else {
   3330     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3331     ParameterCount actual(arity);
   3332     ParameterCount expected(formal_parameter_count);
   3333     InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
   3334     __ InvokeFunction(function_reg, expected, actual, flag, generator);
   3335   }
   3336 }
   3337 
   3338 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
   3339   DCHECK(instr->context() != NULL);
   3340   DCHECK(ToRegister(instr->context()).is(cp));
   3341   Register input = ToRegister(instr->value());
   3342   Register result = ToRegister(instr->result());
   3343   Register scratch = scratch0();
   3344 
   3345   // Deoptimize if not a heap number.
   3346   __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   3347   __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
   3348   DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
   3349 
   3350   Label done;
   3351   Register exponent = scratch0();
   3352   scratch = no_reg;
   3353   __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
   3354   // Check the sign of the argument. If the argument is positive, just
   3355   // return it.
   3356   __ Cmp32(exponent, Operand::Zero());
   3357   // Move the input to the result if necessary.
   3358   __ Move(result, input);
   3359   __ bge(&done);
   3360 
   3361   // Input is negative. Reverse its sign.
   3362   // Preserve the value of all registers.
   3363   {
   3364     PushSafepointRegistersScope scope(this);
   3365 
   3366     // Registers were saved at the safepoint, so we can use
   3367     // many scratch registers.
   3368     Register tmp1 = input.is(r3) ? r2 : r3;
   3369     Register tmp2 = input.is(r4) ? r2 : r4;
   3370     Register tmp3 = input.is(r5) ? r2 : r5;
   3371     Register tmp4 = input.is(r6) ? r2 : r6;
   3372 
   3373     // exponent: floating point exponent value.
   3374 
   3375     Label allocated, slow;
   3376     __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
   3377     __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
   3378     __ b(&allocated);
   3379 
   3380     // Slow case: Call the runtime system to do the number allocation.
   3381     __ bind(&slow);
   3382 
   3383     CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
   3384                             instr->context());
   3385     // Set the pointer to the new heap number in tmp.
   3386     if (!tmp1.is(r2)) __ LoadRR(tmp1, r2);
   3387     // Restore input_reg after call to runtime.
   3388     __ LoadFromSafepointRegisterSlot(input, input);
   3389     __ LoadlW(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
   3390 
   3391     __ bind(&allocated);
   3392     // exponent: floating point exponent value.
   3393     // tmp1: allocated heap number.
   3394 
   3395     // Clear the sign bit.
   3396     __ nilf(exponent, Operand(~HeapNumber::kSignMask));
   3397     __ StoreW(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
   3398     __ LoadlW(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
   3399     __ StoreW(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
   3400 
   3401     __ StoreToSafepointRegisterSlot(tmp1, result);
   3402   }
   3403 
   3404   __ bind(&done);
   3405 }
   3406 
   3407 void LCodeGen::EmitMathAbs(LMathAbs* instr) {
   3408   Register input = ToRegister(instr->value());
   3409   Register result = ToRegister(instr->result());
   3410   __ LoadPositiveP(result, input);
   3411   // Deoptimize on overflow.
   3412   DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow, cr0);
   3413 }
   3414 
   3415 #if V8_TARGET_ARCH_S390X
   3416 void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
   3417   Register input = ToRegister(instr->value());
   3418   Register result = ToRegister(instr->result());
   3419   __ LoadPositive32(result, input);
   3420   DeoptimizeIf(overflow, instr, DeoptimizeReason::kOverflow);
   3421 }
   3422 #endif
   3423 
   3424 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   3425   // Class for deferred case.
   3426   class DeferredMathAbsTaggedHeapNumber final : public LDeferredCode {
   3427    public:
   3428     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
   3429         : LDeferredCode(codegen), instr_(instr) {}
   3430     void Generate() override {
   3431       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
   3432     }
   3433     LInstruction* instr() override { return instr_; }
   3434 
   3435    private:
   3436     LMathAbs* instr_;
   3437   };
   3438 
   3439   Representation r = instr->hydrogen()->value()->representation();
   3440   if (r.IsDouble()) {
   3441     DoubleRegister input = ToDoubleRegister(instr->value());
   3442     DoubleRegister result = ToDoubleRegister(instr->result());
   3443     __ lpdbr(result, input);
   3444 #if V8_TARGET_ARCH_S390X
   3445   } else if (r.IsInteger32()) {
   3446     EmitInteger32MathAbs(instr);
   3447   } else if (r.IsSmi()) {
   3448 #else
   3449   } else if (r.IsSmiOrInteger32()) {
   3450 #endif
   3451     EmitMathAbs(instr);
   3452   } else {
   3453     // Representation is tagged.
   3454     DeferredMathAbsTaggedHeapNumber* deferred =
   3455         new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
   3456     Register input = ToRegister(instr->value());
   3457     // Smi check.
   3458     __ JumpIfNotSmi(input, deferred->entry());
   3459     // If smi, handle it directly.
   3460     EmitMathAbs(instr);
   3461     __ bind(deferred->exit());
   3462   }
   3463 }
   3464 
   3465 void LCodeGen::DoMathFloor(LMathFloor* instr) {
   3466   DoubleRegister input = ToDoubleRegister(instr->value());
   3467   Register result = ToRegister(instr->result());
   3468   Register input_high = scratch0();
   3469   Register scratch = ip;
   3470   Label done, exact;
   3471 
   3472   __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
   3473                    &exact);
   3474   DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
   3475 
   3476   __ bind(&exact);
   3477   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3478     // Test for -0.
   3479     __ CmpP(result, Operand::Zero());
   3480     __ bne(&done, Label::kNear);
   3481     __ Cmp32(input_high, Operand::Zero());
   3482     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
   3483   }
   3484   __ bind(&done);
   3485 }
   3486 
   3487 void LCodeGen::DoMathRound(LMathRound* instr) {
   3488   DoubleRegister input = ToDoubleRegister(instr->value());
   3489   Register result = ToRegister(instr->result());
   3490   DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
   3491   DoubleRegister input_plus_dot_five = double_scratch1;
   3492   Register scratch1 = scratch0();
   3493   Register scratch2 = ip;
   3494   DoubleRegister dot_five = double_scratch0();
   3495   Label convert, done;
   3496 
   3497   __ LoadDoubleLiteral(dot_five, 0.5, r0);
   3498   __ lpdbr(double_scratch1, input);
   3499   __ cdbr(double_scratch1, dot_five);
   3500   DeoptimizeIf(unordered, instr, DeoptimizeReason::kLostPrecisionOrNaN);
   3501   // If input is in [-0.5, -0], the result is -0.
   3502   // If input is in [+0, +0.5[, the result is +0.
   3503   // If the input is +0.5, the result is 1.
   3504   __ bgt(&convert, Label::kNear);  // Out of [-0.5, +0.5].
   3505   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   3506     // [-0.5, -0] (negative) yields minus zero.
   3507     __ TestDoubleSign(input, scratch1);
   3508     DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
   3509   }
   3510   Label return_zero;
   3511   __ cdbr(input, dot_five);
   3512   __ bne(&return_zero, Label::kNear);
   3513   __ LoadImmP(result, Operand(1));  // +0.5.
   3514   __ b(&done, Label::kNear);
   3515   // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
   3516   // flag kBailoutOnMinusZero.
   3517   __ bind(&return_zero);
   3518   __ LoadImmP(result, Operand::Zero());
   3519   __ b(&done, Label::kNear);
   3520 
   3521   __ bind(&convert);
   3522   __ ldr(input_plus_dot_five, input);
   3523   __ adbr(input_plus_dot_five, dot_five);
   3524   // Reuse dot_five (double_scratch0) as we no longer need this value.
   3525   __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
   3526                    double_scratch0(), &done, &done);
   3527   DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
   3528   __ bind(&done);
   3529 }
   3530 
   3531 void LCodeGen::DoMathFround(LMathFround* instr) {
   3532   DoubleRegister input_reg = ToDoubleRegister(instr->value());
   3533   DoubleRegister output_reg = ToDoubleRegister(instr->result());
   3534 
   3535   // Round double to float
   3536   __ ledbr(output_reg, input_reg);
   3537   // Extend from float to double
   3538   __ ldebr(output_reg, output_reg);
   3539 }
   3540 
   3541 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   3542   DoubleRegister result = ToDoubleRegister(instr->result());
   3543   LOperand* input = instr->value();
   3544   if (input->IsDoubleRegister()) {
   3545     __ Sqrt(result, ToDoubleRegister(instr->value()));
   3546   } else {
   3547     __ Sqrt(result, ToMemOperand(input));
   3548   }
   3549 }
   3550 
   3551 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   3552   DoubleRegister input = ToDoubleRegister(instr->value());
   3553   DoubleRegister result = ToDoubleRegister(instr->result());
   3554   DoubleRegister temp = double_scratch0();
   3555 
   3556   // Note that according to ECMA-262 15.8.2.13:
   3557   // Math.pow(-Infinity, 0.5) == Infinity
   3558   // Math.sqrt(-Infinity) == NaN
   3559   Label skip, done;
   3560 
   3561   __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
   3562   __ cdbr(input, temp);
   3563   __ bne(&skip, Label::kNear);
   3564   __ lcdbr(result, temp);
   3565   __ b(&done, Label::kNear);
   3566 
   3567   // Add +0 to convert -0 to +0.
   3568   __ bind(&skip);
   3569   __ ldr(result, input);
   3570   __ lzdr(kDoubleRegZero);
   3571   __ adbr(result, kDoubleRegZero);
   3572   __ sqdbr(result, result);
   3573   __ bind(&done);
   3574 }
   3575 
   3576 void LCodeGen::DoPower(LPower* instr) {
   3577   Representation exponent_type = instr->hydrogen()->right()->representation();
   3578   // Having marked this as a call, we can use any registers.
   3579   // Just make sure that the input/output registers are the expected ones.
   3580   Register tagged_exponent = MathPowTaggedDescriptor::exponent();
   3581   DCHECK(!instr->right()->IsDoubleRegister() ||
   3582          ToDoubleRegister(instr->right()).is(d2));
   3583   DCHECK(!instr->right()->IsRegister() ||
   3584          ToRegister(instr->right()).is(tagged_exponent));
   3585   DCHECK(ToDoubleRegister(instr->left()).is(d1));
   3586   DCHECK(ToDoubleRegister(instr->result()).is(d3));
   3587 
   3588   if (exponent_type.IsSmi()) {
   3589     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3590     __ CallStub(&stub);
   3591   } else if (exponent_type.IsTagged()) {
   3592     Label no_deopt;
   3593     __ JumpIfSmi(tagged_exponent, &no_deopt);
   3594     __ LoadP(r9, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
   3595     __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
   3596     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
   3597     __ bind(&no_deopt);
   3598     MathPowStub stub(isolate(), MathPowStub::TAGGED);
   3599     __ CallStub(&stub);
   3600   } else if (exponent_type.IsInteger32()) {
   3601     MathPowStub stub(isolate(), MathPowStub::INTEGER);
   3602     __ CallStub(&stub);
   3603   } else {
   3604     DCHECK(exponent_type.IsDouble());
   3605     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
   3606     __ CallStub(&stub);
   3607   }
   3608 }
   3609 
   3610 void LCodeGen::DoMathCos(LMathCos* instr) {
   3611   __ PrepareCallCFunction(0, 1, scratch0());
   3612   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
   3613   __ CallCFunction(ExternalReference::ieee754_cos_function(isolate()), 0, 1);
   3614   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
   3615 }
   3616 
   3617 void LCodeGen::DoMathSin(LMathSin* instr) {
   3618   __ PrepareCallCFunction(0, 1, scratch0());
   3619   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
   3620   __ CallCFunction(ExternalReference::ieee754_sin_function(isolate()), 0, 1);
   3621   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
   3622 }
   3623 
   3624 void LCodeGen::DoMathExp(LMathExp* instr) {
   3625   __ PrepareCallCFunction(0, 1, scratch0());
   3626   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
   3627   __ CallCFunction(ExternalReference::ieee754_exp_function(isolate()), 0, 1);
   3628   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
   3629 }
   3630 
   3631 void LCodeGen::DoMathLog(LMathLog* instr) {
   3632   __ PrepareCallCFunction(0, 1, scratch0());
   3633   __ MovToFloatParameter(ToDoubleRegister(instr->value()));
   3634   __ CallCFunction(ExternalReference::ieee754_log_function(isolate()), 0, 1);
   3635   __ MovFromFloatResult(ToDoubleRegister(instr->result()));
   3636 }
   3637 
   3638 void LCodeGen::DoMathClz32(LMathClz32* instr) {
   3639   Register input = ToRegister(instr->value());
   3640   Register result = ToRegister(instr->result());
   3641   Label done;
   3642   __ llgfr(result, input);
   3643   __ flogr(r0, result);
   3644   __ LoadRR(result, r0);
   3645   __ CmpP(r0, Operand::Zero());
   3646   __ beq(&done, Label::kNear);
   3647   __ SubP(result, Operand(32));
   3648   __ bind(&done);
   3649 }
   3650 
   3651 void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
   3652                                   Register scratch1, Register scratch2,
   3653                                   Register scratch3) {
   3654 #if DEBUG
   3655   if (actual.is_reg()) {
   3656     DCHECK(!AreAliased(actual.reg(), scratch1, scratch2, scratch3));
   3657   } else {
   3658     DCHECK(!AreAliased(scratch1, scratch2, scratch3));
   3659   }
   3660 #endif
   3661   if (FLAG_code_comments) {
   3662     if (actual.is_reg()) {
   3663       Comment(";;; PrepareForTailCall, actual: %s {",
   3664               RegisterConfiguration::Crankshaft()->GetGeneralRegisterName(
   3665                   actual.reg().code()));
   3666     } else {
   3667       Comment(";;; PrepareForTailCall, actual: %d {", actual.immediate());
   3668     }
   3669   }
   3670 
   3671   // Check if next frame is an arguments adaptor frame.
   3672   Register caller_args_count_reg = scratch1;
   3673   Label no_arguments_adaptor, formal_parameter_count_loaded;
   3674   __ LoadP(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   3675   __ LoadP(scratch3,
   3676            MemOperand(scratch2, StandardFrameConstants::kContextOffset));
   3677   __ CmpP(scratch3,
   3678           Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
   3679   __ bne(&no_arguments_adaptor);
   3680 
   3681   // Drop current frame and load arguments count from arguments adaptor frame.
   3682   __ LoadRR(fp, scratch2);
   3683   __ LoadP(caller_args_count_reg,
   3684            MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
   3685   __ SmiUntag(caller_args_count_reg);
   3686   __ b(&formal_parameter_count_loaded);
   3687 
   3688   __ bind(&no_arguments_adaptor);
   3689   // Load caller's formal parameter count
   3690   __ mov(caller_args_count_reg, Operand(info()->literal()->parameter_count()));
   3691 
   3692   __ bind(&formal_parameter_count_loaded);
   3693   __ PrepareForTailCall(actual, caller_args_count_reg, scratch2, scratch3);
   3694 
   3695   Comment(";;; }");
   3696 }
   3697 
   3698 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
   3699   HInvokeFunction* hinstr = instr->hydrogen();
   3700   DCHECK(ToRegister(instr->context()).is(cp));
   3701   DCHECK(ToRegister(instr->function()).is(r3));
   3702   DCHECK(instr->HasPointerMap());
   3703 
   3704   bool is_tail_call = hinstr->tail_call_mode() == TailCallMode::kAllow;
   3705 
   3706   if (is_tail_call) {
   3707     DCHECK(!info()->saves_caller_doubles());
   3708     ParameterCount actual(instr->arity());
   3709     // It is safe to use r5, r6 and r7 as scratch registers here given that
   3710     // 1) we are not going to return to caller function anyway,
   3711     // 2) r5 (new.target) will be initialized below.
   3712     PrepareForTailCall(actual, r5, r6, r7);
   3713   }
   3714 
   3715   Handle<JSFunction> known_function = hinstr->known_function();
   3716   if (known_function.is_null()) {
   3717     LPointerMap* pointers = instr->pointer_map();
   3718     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3719     ParameterCount actual(instr->arity());
   3720     InvokeFlag flag = is_tail_call ? JUMP_FUNCTION : CALL_FUNCTION;
   3721     __ InvokeFunction(r3, no_reg, actual, flag, generator);
   3722   } else {
   3723     CallKnownFunction(known_function, hinstr->formal_parameter_count(),
   3724                       instr->arity(), is_tail_call, instr);
   3725   }
   3726 }
   3727 
   3728 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   3729   DCHECK(ToRegister(instr->result()).is(r2));
   3730 
   3731   if (instr->hydrogen()->IsTailCall()) {
   3732     if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
   3733 
   3734     if (instr->target()->IsConstantOperand()) {
   3735       LConstantOperand* target = LConstantOperand::cast(instr->target());
   3736       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   3737       __ Jump(code, RelocInfo::CODE_TARGET);
   3738     } else {
   3739       DCHECK(instr->target()->IsRegister());
   3740       Register target = ToRegister(instr->target());
   3741       __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
   3742       __ JumpToJSEntry(ip);
   3743     }
   3744   } else {
   3745     LPointerMap* pointers = instr->pointer_map();
   3746     SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
   3747 
   3748     if (instr->target()->IsConstantOperand()) {
   3749       LConstantOperand* target = LConstantOperand::cast(instr->target());
   3750       Handle<Code> code = Handle<Code>::cast(ToHandle(target));
   3751       generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
   3752       __ Call(code, RelocInfo::CODE_TARGET);
   3753     } else {
   3754       DCHECK(instr->target()->IsRegister());
   3755       Register target = ToRegister(instr->target());
   3756       generator.BeforeCall(__ CallSize(target));
   3757       __ AddP(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
   3758       __ CallJSEntry(ip);
   3759     }
   3760     generator.AfterCall();
   3761   }
   3762 }
   3763 
   3764 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
   3765   DCHECK(ToRegister(instr->context()).is(cp));
   3766   DCHECK(ToRegister(instr->constructor()).is(r3));
   3767   DCHECK(ToRegister(instr->result()).is(r2));
   3768 
   3769   __ mov(r2, Operand(instr->arity()));
   3770   __ Move(r4, instr->hydrogen()->site());
   3771 
   3772   ElementsKind kind = instr->hydrogen()->elements_kind();
   3773   AllocationSiteOverrideMode override_mode =
   3774       (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
   3775           ? DISABLE_ALLOCATION_SITES
   3776           : DONT_OVERRIDE;
   3777 
   3778   if (instr->arity() == 0) {
   3779     ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
   3780     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3781   } else if (instr->arity() == 1) {
   3782     Label done;
   3783     if (IsFastPackedElementsKind(kind)) {
   3784       Label packed_case;
   3785       // We might need a change here
   3786       // look at the first argument
   3787       __ LoadP(r7, MemOperand(sp, 0));
   3788       __ CmpP(r7, Operand::Zero());
   3789       __ beq(&packed_case, Label::kNear);
   3790 
   3791       ElementsKind holey_kind = GetHoleyElementsKind(kind);
   3792       ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
   3793                                               override_mode);
   3794       CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3795       __ b(&done, Label::kNear);
   3796       __ bind(&packed_case);
   3797     }
   3798 
   3799     ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
   3800     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3801     __ bind(&done);
   3802   } else {
   3803     ArrayNArgumentsConstructorStub stub(isolate());
   3804     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   3805   }
   3806 }
   3807 
   3808 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
   3809   CallRuntime(instr->function(), instr->arity(), instr);
   3810 }
   3811 
   3812 void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
   3813   Register function = ToRegister(instr->function());
   3814   Register code_object = ToRegister(instr->code_object());
   3815   __ lay(code_object,
   3816          MemOperand(code_object, Code::kHeaderSize - kHeapObjectTag));
   3817   __ StoreP(code_object,
   3818             FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
   3819 }
   3820 
   3821 void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
   3822   Register result = ToRegister(instr->result());
   3823   Register base = ToRegister(instr->base_object());
   3824   if (instr->offset()->IsConstantOperand()) {
   3825     LConstantOperand* offset = LConstantOperand::cast(instr->offset());
   3826     __ lay(result, MemOperand(base, ToInteger32(offset)));
   3827   } else {
   3828     Register offset = ToRegister(instr->offset());
   3829     __ lay(result, MemOperand(base, offset));
   3830   }
   3831 }
   3832 
   3833 void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
   3834   HStoreNamedField* hinstr = instr->hydrogen();
   3835   Representation representation = instr->representation();
   3836 
   3837   Register object = ToRegister(instr->object());
   3838   Register scratch = scratch0();
   3839   HObjectAccess access = hinstr->access();
   3840   int offset = access.offset();
   3841 
   3842   if (access.IsExternalMemory()) {
   3843     Register value = ToRegister(instr->value());
   3844     MemOperand operand = MemOperand(object, offset);
   3845     __ StoreRepresentation(value, operand, representation, r0);
   3846     return;
   3847   }
   3848 
   3849   __ AssertNotSmi(object);
   3850 
   3851 #if V8_TARGET_ARCH_S390X
   3852   DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
   3853          IsInteger32(LConstantOperand::cast(instr->value())));
   3854 #else
   3855   DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
   3856          IsSmi(LConstantOperand::cast(instr->value())));
   3857 #endif
   3858   if (!FLAG_unbox_double_fields && representation.IsDouble()) {
   3859     DCHECK(access.IsInobject());
   3860     DCHECK(!hinstr->has_transition());
   3861     DCHECK(!hinstr->NeedsWriteBarrier());
   3862     DoubleRegister value = ToDoubleRegister(instr->value());
   3863     DCHECK(offset >= 0);
   3864     __ StoreDouble(value, FieldMemOperand(object, offset));
   3865     return;
   3866   }
   3867 
   3868   if (hinstr->has_transition()) {
   3869     Handle<Map> transition = hinstr->transition_map();
   3870     AddDeprecationDependency(transition);
   3871     __ mov(scratch, Operand(transition));
   3872     __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
   3873     if (hinstr->NeedsWriteBarrierForMap()) {
   3874       Register temp = ToRegister(instr->temp());
   3875       // Update the write barrier for the map field.
   3876       __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
   3877                            kSaveFPRegs);
   3878     }
   3879   }
   3880 
   3881   // Do the store.
   3882   Register record_dest = object;
   3883   Register record_value = no_reg;
   3884   Register record_scratch = scratch;
   3885 #if V8_TARGET_ARCH_S390X
   3886   if (FLAG_unbox_double_fields && representation.IsDouble()) {
   3887     DCHECK(access.IsInobject());
   3888     DoubleRegister value = ToDoubleRegister(instr->value());
   3889     __ StoreDouble(value, FieldMemOperand(object, offset));
   3890     if (hinstr->NeedsWriteBarrier()) {
   3891       record_value = ToRegister(instr->value());
   3892     }
   3893   } else {
   3894     if (representation.IsSmi() &&
   3895         hinstr->value()->representation().IsInteger32()) {
   3896       DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
   3897       // 64-bit Smi optimization
   3898       // Store int value directly to upper half of the smi.
   3899       offset = SmiWordOffset(offset);
   3900       representation = Representation::Integer32();
   3901     }
   3902 #endif
   3903     if (access.IsInobject()) {
   3904       Register value = ToRegister(instr->value());
   3905       MemOperand operand = FieldMemOperand(object, offset);
   3906       __ StoreRepresentation(value, operand, representation, r0);
   3907       record_value = value;
   3908     } else {
   3909       Register value = ToRegister(instr->value());
   3910       __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
   3911       MemOperand operand = FieldMemOperand(scratch, offset);
   3912       __ StoreRepresentation(value, operand, representation, r0);
   3913       record_dest = scratch;
   3914       record_value = value;
   3915       record_scratch = object;
   3916     }
   3917 #if V8_TARGET_ARCH_S390X
   3918   }
   3919 #endif
   3920 
   3921   if (hinstr->NeedsWriteBarrier()) {
   3922     __ RecordWriteField(record_dest, offset, record_value, record_scratch,
   3923                         GetLinkRegisterState(), kSaveFPRegs,
   3924                         EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
   3925                         hinstr->PointersToHereCheckForValue());
   3926   }
   3927 }
   3928 
   3929 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   3930   Representation representation = instr->hydrogen()->length()->representation();
   3931   DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
   3932   DCHECK(representation.IsSmiOrInteger32());
   3933   Register temp = scratch0();
   3934 
   3935   Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
   3936   if (instr->length()->IsConstantOperand()) {
   3937     int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
   3938     Register index = ToRegister(instr->index());
   3939     if (representation.IsSmi()) {
   3940       __ CmpLogicalSmiLiteral(index, Smi::FromInt(length), temp);
   3941     } else {
   3942       __ CmpLogical32(index, Operand(length));
   3943     }
   3944     cc = CommuteCondition(cc);
   3945   } else if (instr->index()->IsConstantOperand()) {
   3946     int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
   3947     Register length = ToRegister(instr->length());
   3948     if (representation.IsSmi()) {
   3949       __ CmpLogicalSmiLiteral(length, Smi::FromInt(index), temp);
   3950     } else {
   3951       __ CmpLogical32(length, Operand(index));
   3952     }
   3953   } else {
   3954     Register index = ToRegister(instr->index());
   3955     Register length = ToRegister(instr->length());
   3956     if (representation.IsSmi()) {
   3957       __ CmpLogicalP(length, index);
   3958     } else {
   3959       __ CmpLogical32(length, index);
   3960     }
   3961   }
   3962   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
   3963     Label done;
   3964     __ b(NegateCondition(cc), &done, Label::kNear);
   3965     __ stop("eliminated bounds check failed");
   3966     __ bind(&done);
   3967   } else {
   3968     DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
   3969   }
   3970 }
   3971 
   3972 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
   3973   Register external_pointer = ToRegister(instr->elements());
   3974   Register key = no_reg;
   3975   ElementsKind elements_kind = instr->elements_kind();
   3976   bool key_is_constant = instr->key()->IsConstantOperand();
   3977   int constant_key = 0;
   3978   if (key_is_constant) {
   3979     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   3980     if (constant_key & 0xF0000000) {
   3981       Abort(kArrayIndexConstantValueTooBig);
   3982     }
   3983   } else {
   3984     key = ToRegister(instr->key());
   3985   }
   3986   int element_size_shift = ElementsKindToShiftSize(elements_kind);
   3987   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
   3988   bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
   3989   int base_offset = instr->base_offset();
   3990 
   3991   if (elements_kind == FLOAT32_ELEMENTS || elements_kind == FLOAT64_ELEMENTS) {
   3992     Register address = scratch0();
   3993     DoubleRegister value(ToDoubleRegister(instr->value()));
   3994     if (key_is_constant) {
   3995       if (constant_key != 0) {
   3996         base_offset += constant_key << element_size_shift;
   3997         if (!is_int20(base_offset)) {
   3998           __ mov(address, Operand(base_offset));
   3999           __ AddP(address, external_pointer);
   4000         } else {
   4001           __ AddP(address, external_pointer, Operand(base_offset));
   4002         }
   4003         base_offset = 0;
   4004       } else {
   4005         address = external_pointer;
   4006       }
   4007     } else {
   4008       __ IndexToArrayOffset(address, key, element_size_shift, key_is_smi,
   4009                             keyMaybeNegative);
   4010       __ AddP(address, external_pointer);
   4011     }
   4012     if (elements_kind == FLOAT32_ELEMENTS) {
   4013       __ ledbr(double_scratch0(), value);
   4014       __ StoreFloat32(double_scratch0(), MemOperand(address, base_offset));
   4015     } else {  // Storing doubles, not floats.
   4016       __ StoreDouble(value, MemOperand(address, base_offset));
   4017     }
   4018   } else {
   4019     Register value(ToRegister(instr->value()));
   4020     MemOperand mem_operand =
   4021         PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
   4022                             constant_key, element_size_shift, base_offset,
   4023                             keyMaybeNegative);
   4024     switch (elements_kind) {
   4025       case UINT8_ELEMENTS:
   4026       case UINT8_CLAMPED_ELEMENTS:
   4027       case INT8_ELEMENTS:
   4028         if (key_is_constant) {
   4029           __ StoreByte(value, mem_operand, r0);
   4030         } else {
   4031           __ StoreByte(value, mem_operand);
   4032         }
   4033         break;
   4034       case INT16_ELEMENTS:
   4035       case UINT16_ELEMENTS:
   4036         if (key_is_constant) {
   4037           __ StoreHalfWord(value, mem_operand, r0);
   4038         } else {
   4039           __ StoreHalfWord(value, mem_operand);
   4040         }
   4041         break;
   4042       case INT32_ELEMENTS:
   4043       case UINT32_ELEMENTS:
   4044         if (key_is_constant) {
   4045           __ StoreW(value, mem_operand, r0);
   4046         } else {
   4047           __ StoreW(value, mem_operand);
   4048         }
   4049         break;
   4050       case FLOAT32_ELEMENTS:
   4051       case FLOAT64_ELEMENTS:
   4052       case FAST_DOUBLE_ELEMENTS:
   4053       case FAST_ELEMENTS:
   4054       case FAST_SMI_ELEMENTS:
   4055       case FAST_HOLEY_DOUBLE_ELEMENTS:
   4056       case FAST_HOLEY_ELEMENTS:
   4057       case FAST_HOLEY_SMI_ELEMENTS:
   4058       case DICTIONARY_ELEMENTS:
   4059       case FAST_SLOPPY_ARGUMENTS_ELEMENTS:
   4060       case SLOW_SLOPPY_ARGUMENTS_ELEMENTS:
   4061       case FAST_STRING_WRAPPER_ELEMENTS:
   4062       case SLOW_STRING_WRAPPER_ELEMENTS:
   4063       case NO_ELEMENTS:
   4064         UNREACHABLE();
   4065         break;
   4066     }
   4067   }
   4068 }
   4069 
   4070 void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
   4071   DoubleRegister value = ToDoubleRegister(instr->value());
   4072   Register elements = ToRegister(instr->elements());
   4073   Register key = no_reg;
   4074   Register scratch = scratch0();
   4075   DoubleRegister double_scratch = double_scratch0();
   4076   bool key_is_constant = instr->key()->IsConstantOperand();
   4077   int constant_key = 0;
   4078 
   4079   // Calculate the effective address of the slot in the array to store the
   4080   // double value.
   4081   if (key_is_constant) {
   4082     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
   4083     if (constant_key & 0xF0000000) {
   4084       Abort(kArrayIndexConstantValueTooBig);
   4085     }
   4086   } else {
   4087     key = ToRegister(instr->key());
   4088   }
   4089   int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
   4090   bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
   4091   bool keyMaybeNegative = instr->hydrogen()->IsDehoisted();
   4092   int base_offset = instr->base_offset() + constant_key * kDoubleSize;
   4093   bool use_scratch = false;
   4094   intptr_t address_offset = base_offset;
   4095 
   4096   if (key_is_constant) {
   4097     // Memory references support up to 20-bits signed displacement in RXY form
   4098     if (!is_int20((address_offset))) {
   4099       __ mov(scratch, Operand(address_offset));
   4100       address_offset = 0;
   4101       use_scratch = true;
   4102     }
   4103   } else {
   4104     use_scratch = true;
   4105     __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi,
   4106                           keyMaybeNegative);
   4107     // Memory references support up to 20-bits signed displacement in RXY form
   4108     if (!is_int20((address_offset))) {
   4109       __ AddP(scratch, Operand(address_offset));
   4110       address_offset = 0;
   4111     }
   4112   }
   4113 
   4114   if (instr->NeedsCanonicalization()) {
   4115     // Turn potential sNaN value into qNaN.
   4116     __ CanonicalizeNaN(double_scratch, value);
   4117     DCHECK(address_offset >= 0);
   4118     if (use_scratch)
   4119       __ StoreDouble(double_scratch,
   4120                      MemOperand(scratch, elements, address_offset));
   4121     else
   4122       __ StoreDouble(double_scratch, MemOperand(elements, address_offset));
   4123   } else {
   4124     if (use_scratch)
   4125       __ StoreDouble(value, MemOperand(scratch, elements, address_offset));
   4126     else
   4127       __ StoreDouble(value, MemOperand(elements, address_offset));
   4128   }
   4129 }
   4130 
   4131 void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
   4132   HStoreKeyed* hinstr = instr->hydrogen();
   4133   Register value = ToRegister(instr->value());
   4134   Register elements = ToRegister(instr->elements());
   4135   Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
   4136   Register scratch = scratch0();
   4137   int offset = instr->base_offset();
   4138 
   4139   // Do the store.
   4140   if (instr->key()->IsConstantOperand()) {
   4141     DCHECK(!hinstr->NeedsWriteBarrier());
   4142     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
   4143     offset += ToInteger32(const_operand) * kPointerSize;
   4144   } else {
   4145     // Even though the HLoadKeyed instruction forces the input
   4146     // representation for the key to be an integer, the input gets replaced
   4147     // during bound check elimination with the index argument to the bounds
   4148     // check, which can be tagged, so that case must be handled here, too.
   4149     if (hinstr->key()->representation().IsSmi()) {
   4150       __ SmiToPtrArrayOffset(scratch, key);
   4151     } else {
   4152       if (instr->hydrogen()->IsDehoisted() ||
   4153           !CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
   4154 #if V8_TARGET_ARCH_S390X
   4155         // If array access is dehoisted, the key, being an int32, can contain
   4156         // a negative value, as needs to be sign-extended to 64-bit for
   4157         // memory access.
   4158         __ lgfr(key, key);
   4159 #endif
   4160         __ ShiftLeftP(scratch, key, Operand(kPointerSizeLog2));
   4161       } else {
   4162         // Small optimization to reduce pathlength.  After Bounds Check,
   4163         // the key is guaranteed to be non-negative.  Leverage RISBG,
   4164         // which also performs zero-extension.
   4165         __ risbg(scratch, key, Operand(32 - kPointerSizeLog2),
   4166                  Operand(63 - kPointerSizeLog2), Operand(kPointerSizeLog2),
   4167                  true);
   4168       }
   4169     }
   4170   }
   4171 
   4172   Representation representation = hinstr->value()->representation();
   4173 
   4174 #if V8_TARGET_ARCH_S390X
   4175   // 64-bit Smi optimization
   4176   if (representation.IsInteger32()) {
   4177     DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
   4178     DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
   4179     // Store int value directly to upper half of the smi.
   4180     offset = SmiWordOffset(offset);
   4181   }
   4182 #endif
   4183 
   4184   if (instr->key()->IsConstantOperand()) {
   4185     __ StoreRepresentation(value, MemOperand(elements, offset), representation,
   4186                            scratch);
   4187   } else {
   4188     __ StoreRepresentation(value, MemOperand(scratch, elements, offset),
   4189                            representation, r0);
   4190   }
   4191 
   4192   if (hinstr->NeedsWriteBarrier()) {
   4193     SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
   4194                                 ? OMIT_SMI_CHECK
   4195                                 : INLINE_SMI_CHECK;
   4196     // Compute address of modified element and store it into key register.
   4197     if (instr->key()->IsConstantOperand()) {
   4198       __ lay(key, MemOperand(elements, offset));
   4199     } else {
   4200       __ lay(key, MemOperand(scratch, elements, offset));
   4201     }
   4202     __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
   4203                    EMIT_REMEMBERED_SET, check_needed,
   4204                    hinstr->PointersToHereCheckForValue());
   4205   }
   4206 }
   4207 
   4208 void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
   4209   // By cases: external, fast double
   4210   if (instr->is_fixed_typed_array()) {
   4211     DoStoreKeyedExternalArray(instr);
   4212   } else if (instr->hydrogen()->value()->representation().IsDouble()) {
   4213     DoStoreKeyedFixedDoubleArray(instr);
   4214   } else {
   4215     DoStoreKeyedFixedArray(instr);
   4216   }
   4217 }
   4218 
   4219 void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
   4220   class DeferredMaybeGrowElements final : public LDeferredCode {
   4221    public:
   4222     DeferredMaybeGrowElements(LCodeGen* codegen, LMaybeGrowElements* instr)
   4223         : LDeferredCode(codegen), instr_(instr) {}
   4224     void Generate() override { codegen()->DoDeferredMaybeGrowElements(instr_); }
   4225     LInstruction* instr() override { return instr_; }
   4226 
   4227    private:
   4228     LMaybeGrowElements* instr_;
   4229   };
   4230 
   4231   Register result = r2;
   4232   DeferredMaybeGrowElements* deferred =
   4233       new (zone()) DeferredMaybeGrowElements(this, instr);
   4234   LOperand* key = instr->key();
   4235   LOperand* current_capacity = instr->current_capacity();
   4236 
   4237   DCHECK(instr->hydrogen()->key()->representation().IsInteger32());
   4238   DCHECK(instr->hydrogen()->current_capacity()->representation().IsInteger32());
   4239   DCHECK(key->IsConstantOperand() || key->IsRegister());
   4240   DCHECK(current_capacity->IsConstantOperand() ||
   4241          current_capacity->IsRegister());
   4242 
   4243   if (key->IsConstantOperand() && current_capacity->IsConstantOperand()) {
   4244     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
   4245     int32_t constant_capacity =
   4246         ToInteger32(LConstantOperand::cast(current_capacity));
   4247     if (constant_key >= constant_capacity) {
   4248       // Deferred case.
   4249       __ b(deferred->entry());
   4250     }
   4251   } else if (key->IsConstantOperand()) {
   4252     int32_t constant_key = ToInteger32(LConstantOperand::cast(key));
   4253     __ Cmp32(ToRegister(current_capacity), Operand(constant_key));
   4254     __ ble(deferred->entry());
   4255   } else if (current_capacity->IsConstantOperand()) {
   4256     int32_t constant_capacity =
   4257         ToInteger32(LConstantOperand::cast(current_capacity));
   4258     __ Cmp32(ToRegister(key), Operand(constant_capacity));
   4259     __ bge(deferred->entry());
   4260   } else {
   4261     __ Cmp32(ToRegister(key), ToRegister(current_capacity));
   4262     __ bge(deferred->entry());
   4263   }
   4264 
   4265   if (instr->elements()->IsRegister()) {
   4266     __ Move(result, ToRegister(instr->elements()));
   4267   } else {
   4268     __ LoadP(result, ToMemOperand(instr->elements()));
   4269   }
   4270 
   4271   __ bind(deferred->exit());
   4272 }
   4273 
   4274 void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
   4275   // TODO(3095996): Get rid of this. For now, we need to make the
   4276   // result register contain a valid pointer because it is already
   4277   // contained in the register pointer map.
   4278   Register result = r2;
   4279   __ LoadImmP(result, Operand::Zero());
   4280 
   4281   // We have to call a stub.
   4282   {
   4283     PushSafepointRegistersScope scope(this);
   4284     if (instr->object()->IsRegister()) {
   4285       __ Move(result, ToRegister(instr->object()));
   4286     } else {
   4287       __ LoadP(result, ToMemOperand(instr->object()));
   4288     }
   4289 
   4290     LOperand* key = instr->key();
   4291     if (key->IsConstantOperand()) {
   4292       LConstantOperand* constant_key = LConstantOperand::cast(key);
   4293       int32_t int_key = ToInteger32(constant_key);
   4294       if (Smi::IsValid(int_key)) {
   4295         __ LoadSmiLiteral(r5, Smi::FromInt(int_key));
   4296       } else {
   4297         Abort(kArrayIndexConstantValueTooBig);
   4298       }
   4299     } else {
   4300       Label is_smi;
   4301 #if V8_TARGET_ARCH_S390X
   4302       __ SmiTag(r5, ToRegister(key));
   4303 #else
   4304       // Deopt if the key is outside Smi range. The stub expects Smi and would
   4305       // bump the elements into dictionary mode (and trigger a deopt) anyways.
   4306       __ Add32(r5, ToRegister(key), ToRegister(key));
   4307       __ b(nooverflow, &is_smi);
   4308       __ PopSafepointRegisters();
   4309       DeoptimizeIf(al, instr, DeoptimizeReason::kOverflow, cr0);
   4310       __ bind(&is_smi);
   4311 #endif
   4312     }
   4313 
   4314     GrowArrayElementsStub stub(isolate(), instr->hydrogen()->kind());
   4315     __ CallStub(&stub);
   4316     RecordSafepointWithLazyDeopt(
   4317         instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   4318     __ StoreToSafepointRegisterSlot(result, result);
   4319   }
   4320 
   4321   // Deopt on smi, which means the elements array changed to dictionary mode.
   4322   __ TestIfSmi(result);
   4323   DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
   4324 }
   4325 
   4326 void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
   4327   Register object_reg = ToRegister(instr->object());
   4328   Register scratch = scratch0();
   4329 
   4330   Handle<Map> from_map = instr->original_map();
   4331   Handle<Map> to_map = instr->transitioned_map();
   4332   ElementsKind from_kind = instr->from_kind();
   4333   ElementsKind to_kind = instr->to_kind();
   4334 
   4335   Label not_applicable;
   4336   __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
   4337   __ CmpP(scratch, Operand(from_map));
   4338   __ bne(&not_applicable);
   4339 
   4340   if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
   4341     Register new_map_reg = ToRegister(instr->new_map_temp());
   4342     __ mov(new_map_reg, Operand(to_map));
   4343     __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
   4344     // Write barrier.
   4345     __ RecordWriteForMap(object_reg, new_map_reg, scratch,
   4346                          GetLinkRegisterState(), kDontSaveFPRegs);
   4347   } else {
   4348     DCHECK(ToRegister(instr->context()).is(cp));
   4349     DCHECK(object_reg.is(r2));
   4350     PushSafepointRegistersScope scope(this);
   4351     __ Move(r3, to_map);
   4352     TransitionElementsKindStub stub(isolate(), from_kind, to_kind);
   4353     __ CallStub(&stub);
   4354     RecordSafepointWithRegisters(instr->pointer_map(), 0,
   4355                                  Safepoint::kLazyDeopt);
   4356   }
   4357   __ bind(&not_applicable);
   4358 }
   4359 
   4360 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
   4361   Register object = ToRegister(instr->object());
   4362   Register temp1 = ToRegister(instr->temp1());
   4363   Register temp2 = ToRegister(instr->temp2());
   4364   Label no_memento_found;
   4365   __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
   4366   DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
   4367   __ bind(&no_memento_found);
   4368 }
   4369 
   4370 void LCodeGen::DoStringAdd(LStringAdd* instr) {
   4371   DCHECK(ToRegister(instr->context()).is(cp));
   4372   DCHECK(ToRegister(instr->left()).is(r3));
   4373   DCHECK(ToRegister(instr->right()).is(r2));
   4374   StringAddStub stub(isolate(), instr->hydrogen()->flags(),
   4375                      instr->hydrogen()->pretenure_flag());
   4376   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   4377 }
   4378 
   4379 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
   4380   class DeferredStringCharCodeAt final : public LDeferredCode {
   4381    public:
   4382     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
   4383         : LDeferredCode(codegen), instr_(instr) {}
   4384     void Generate() override { codegen()->DoDeferredStringCharCodeAt(instr_); }
   4385     LInstruction* instr() override { return instr_; }
   4386 
   4387    private:
   4388     LStringCharCodeAt* instr_;
   4389   };
   4390 
   4391   DeferredStringCharCodeAt* deferred =
   4392       new (zone()) DeferredStringCharCodeAt(this, instr);
   4393 
   4394   StringCharLoadGenerator::Generate(
   4395       masm(), ToRegister(instr->string()), ToRegister(instr->index()),
   4396       ToRegister(instr->result()), deferred->entry());
   4397   __ bind(deferred->exit());
   4398 }
   4399 
   4400 void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
   4401   Register string = ToRegister(instr->string());
   4402   Register result = ToRegister(instr->result());
   4403   Register scratch = scratch0();
   4404 
   4405   // TODO(3095996): Get rid of this. For now, we need to make the
   4406   // result register contain a valid pointer because it is already
   4407   // contained in the register pointer map.
   4408   __ LoadImmP(result, Operand::Zero());
   4409 
   4410   PushSafepointRegistersScope scope(this);
   4411   __ push(string);
   4412   // Push the index as a smi. This is safe because of the checks in
   4413   // DoStringCharCodeAt above.
   4414   if (instr->index()->IsConstantOperand()) {
   4415     int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
   4416     __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
   4417     __ push(scratch);
   4418   } else {
   4419     Register index = ToRegister(instr->index());
   4420     __ SmiTag(index);
   4421     __ push(index);
   4422   }
   4423   CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
   4424                           instr->context());
   4425   __ AssertSmi(r2);
   4426   __ SmiUntag(r2);
   4427   __ StoreToSafepointRegisterSlot(r2, result);
   4428 }
   4429 
   4430 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
   4431   class DeferredStringCharFromCode final : public LDeferredCode {
   4432    public:
   4433     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
   4434         : LDeferredCode(codegen), instr_(instr) {}
   4435     void Generate() override {
   4436       codegen()->DoDeferredStringCharFromCode(instr_);
   4437     }
   4438     LInstruction* instr() override { return instr_; }
   4439 
   4440    private:
   4441     LStringCharFromCode* instr_;
   4442   };
   4443 
   4444   DeferredStringCharFromCode* deferred =
   4445       new (zone()) DeferredStringCharFromCode(this, instr);
   4446 
   4447   DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
   4448   Register char_code = ToRegister(instr->char_code());
   4449   Register result = ToRegister(instr->result());
   4450   DCHECK(!char_code.is(result));
   4451 
   4452   __ CmpLogicalP(char_code, Operand(String::kMaxOneByteCharCode));
   4453   __ bgt(deferred->entry());
   4454   __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
   4455   __ ShiftLeftP(r0, char_code, Operand(kPointerSizeLog2));
   4456   __ AddP(result, r0);
   4457   __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
   4458   __ CompareRoot(result, Heap::kUndefinedValueRootIndex);
   4459   __ beq(deferred->entry());
   4460   __ bind(deferred->exit());
   4461 }
   4462 
   4463 void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
   4464   Register char_code = ToRegister(instr->char_code());
   4465   Register result = ToRegister(instr->result());
   4466 
   4467   // TODO(3095996): Get rid of this. For now, we need to make the
   4468   // result register contain a valid pointer because it is already
   4469   // contained in the register pointer map.
   4470   __ LoadImmP(result, Operand::Zero());
   4471 
   4472   PushSafepointRegistersScope scope(this);
   4473   __ SmiTag(char_code);
   4474   __ push(char_code);
   4475   CallRuntimeFromDeferred(Runtime::kStringCharFromCode, 1, instr,
   4476                           instr->context());
   4477   __ StoreToSafepointRegisterSlot(r2, result);
   4478 }
   4479 
   4480 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   4481   LOperand* input = instr->value();
   4482   DCHECK(input->IsRegister() || input->IsStackSlot());
   4483   LOperand* output = instr->result();
   4484   DCHECK(output->IsDoubleRegister());
   4485   if (input->IsStackSlot()) {
   4486     Register scratch = scratch0();
   4487     __ LoadP(scratch, ToMemOperand(input));
   4488     __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
   4489   } else {
   4490     __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
   4491   }
   4492 }
   4493 
   4494 void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
   4495   LOperand* input = instr->value();
   4496   LOperand* output = instr->result();
   4497   __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
   4498 }
   4499 
   4500 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
   4501   class DeferredNumberTagI final : public LDeferredCode {
   4502    public:
   4503     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
   4504         : LDeferredCode(codegen), instr_(instr) {}
   4505     void Generate() override {
   4506       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
   4507                                        instr_->temp2(), SIGNED_INT32);
   4508     }
   4509     LInstruction* instr() override { return instr_; }
   4510 
   4511    private:
   4512     LNumberTagI* instr_;
   4513   };
   4514 
   4515   Register src = ToRegister(instr->value());
   4516   Register dst = ToRegister(instr->result());
   4517 
   4518   DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
   4519 #if V8_TARGET_ARCH_S390X
   4520   __ SmiTag(dst, src);
   4521 #else
   4522   // Add src to itself to defect SMI overflow.
   4523   __ Add32(dst, src, src);
   4524   __ b(overflow, deferred->entry());
   4525 #endif
   4526   __ bind(deferred->exit());
   4527 }
   4528 
   4529 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
   4530   class DeferredNumberTagU final : public LDeferredCode {
   4531    public:
   4532     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
   4533         : LDeferredCode(codegen), instr_(instr) {}
   4534     void Generate() override {
   4535       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
   4536                                        instr_->temp2(), UNSIGNED_INT32);
   4537     }
   4538     LInstruction* instr() override { return instr_; }
   4539 
   4540    private:
   4541     LNumberTagU* instr_;
   4542   };
   4543 
   4544   Register input = ToRegister(instr->value());
   4545   Register result = ToRegister(instr->result());
   4546 
   4547   DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
   4548   __ CmpLogicalP(input, Operand(Smi::kMaxValue));
   4549   __ bgt(deferred->entry());
   4550   __ SmiTag(result, input);
   4551   __ bind(deferred->exit());
   4552 }
   4553 
   4554 void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
   4555                                      LOperand* temp1, LOperand* temp2,
   4556                                      IntegerSignedness signedness) {
   4557   Label done, slow;
   4558   Register src = ToRegister(value);
   4559   Register dst = ToRegister(instr->result());
   4560   Register tmp1 = scratch0();
   4561   Register tmp2 = ToRegister(temp1);
   4562   Register tmp3 = ToRegister(temp2);
   4563   DoubleRegister dbl_scratch = double_scratch0();
   4564 
   4565   if (signedness == SIGNED_INT32) {
   4566     // There was overflow, so bits 30 and 31 of the original integer
   4567     // disagree. Try to allocate a heap number in new space and store
   4568     // the value in there. If that fails, call the runtime system.
   4569     if (dst.is(src)) {
   4570       __ SmiUntag(src, dst);
   4571       __ xilf(src, Operand(HeapNumber::kSignMask));
   4572     }
   4573     __ ConvertIntToDouble(src, dbl_scratch);
   4574   } else {
   4575     __ ConvertUnsignedIntToDouble(src, dbl_scratch);
   4576   }
   4577 
   4578   if (FLAG_inline_new) {
   4579     __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
   4580     __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
   4581     __ b(&done);
   4582   }
   4583 
   4584   // Slow case: Call the runtime system to do the number allocation.
   4585   __ bind(&slow);
   4586   {
   4587     // TODO(3095996): Put a valid pointer value in the stack slot where the
   4588     // result register is stored, as this register is in the pointer map, but
   4589     // contains an integer value.
   4590     __ LoadImmP(dst, Operand::Zero());
   4591 
   4592     // Preserve the value of all registers.
   4593     PushSafepointRegistersScope scope(this);
   4594     // Reset the context register.
   4595     if (!dst.is(cp)) {
   4596       __ LoadImmP(cp, Operand::Zero());
   4597     }
   4598     __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4599     RecordSafepointWithRegisters(instr->pointer_map(), 0,
   4600                                  Safepoint::kNoLazyDeopt);
   4601     __ StoreToSafepointRegisterSlot(r2, dst);
   4602   }
   4603 
   4604   // Done. Put the value in dbl_scratch into the value of the allocated heap
   4605   // number.
   4606   __ bind(&done);
   4607   __ StoreDouble(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
   4608 }
   4609 
   4610 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
   4611   class DeferredNumberTagD final : public LDeferredCode {
   4612    public:
   4613     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
   4614         : LDeferredCode(codegen), instr_(instr) {}
   4615     void Generate() override { codegen()->DoDeferredNumberTagD(instr_); }
   4616     LInstruction* instr() override { return instr_; }
   4617 
   4618    private:
   4619     LNumberTagD* instr_;
   4620   };
   4621 
   4622   DoubleRegister input_reg = ToDoubleRegister(instr->value());
   4623   Register scratch = scratch0();
   4624   Register reg = ToRegister(instr->result());
   4625   Register temp1 = ToRegister(instr->temp());
   4626   Register temp2 = ToRegister(instr->temp2());
   4627 
   4628   DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
   4629   if (FLAG_inline_new) {
   4630     __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
   4631     __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
   4632   } else {
   4633     __ b(deferred->entry());
   4634   }
   4635   __ bind(deferred->exit());
   4636   __ StoreDouble(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
   4637 }
   4638 
   4639 void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
   4640   // TODO(3095996): Get rid of this. For now, we need to make the
   4641   // result register contain a valid pointer because it is already
   4642   // contained in the register pointer map.
   4643   Register reg = ToRegister(instr->result());
   4644   __ LoadImmP(reg, Operand::Zero());
   4645 
   4646   PushSafepointRegistersScope scope(this);
   4647   // Reset the context register.
   4648   if (!reg.is(cp)) {
   4649     __ LoadImmP(cp, Operand::Zero());
   4650   }
   4651   __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   4652   RecordSafepointWithRegisters(instr->pointer_map(), 0,
   4653                                Safepoint::kNoLazyDeopt);
   4654   __ StoreToSafepointRegisterSlot(r2, reg);
   4655 }
   4656 
   4657 void LCodeGen::DoSmiTag(LSmiTag* instr) {
   4658   HChange* hchange = instr->hydrogen();
   4659   Register input = ToRegister(instr->value());
   4660   Register output = ToRegister(instr->result());
   4661   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4662       hchange->value()->CheckFlag(HValue::kUint32)) {
   4663     __ TestUnsignedSmiCandidate(input, r0);
   4664     DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow, cr0);
   4665   }
   4666 #if !V8_TARGET_ARCH_S390X
   4667   if (hchange->CheckFlag(HValue::kCanOverflow) &&
   4668       !hchange->value()->CheckFlag(HValue::kUint32)) {
   4669     __ SmiTagCheckOverflow(output, input, r0);
   4670     DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
   4671   } else {
   4672 #endif
   4673     __ SmiTag(output, input);
   4674 #if !V8_TARGET_ARCH_S390X
   4675   }
   4676 #endif
   4677 }
   4678 
   4679 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
   4680   Register input = ToRegister(instr->value());
   4681   Register result = ToRegister(instr->result());
   4682   if (instr->needs_check()) {
   4683     __ tmll(input, Operand(kHeapObjectTag));
   4684     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
   4685     __ SmiUntag(result, input);
   4686   } else {
   4687     __ SmiUntag(result, input);
   4688   }
   4689 }
   4690 
   4691 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
   4692                                 DoubleRegister result_reg,
   4693                                 NumberUntagDMode mode) {
   4694   bool can_convert_undefined_to_nan = instr->truncating();
   4695   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
   4696 
   4697   Register scratch = scratch0();
   4698   DCHECK(!result_reg.is(double_scratch0()));
   4699 
   4700   Label convert, load_smi, done;
   4701 
   4702   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
   4703     // Smi check.
   4704     __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
   4705 
   4706     // Heap number map check.
   4707     __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   4708     __ CmpP(scratch, RootMemOperand(Heap::kHeapNumberMapRootIndex));
   4709 
   4710     if (can_convert_undefined_to_nan) {
   4711       __ bne(&convert, Label::kNear);
   4712     } else {
   4713       DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
   4714     }
   4715     // load heap number
   4716     __ LoadDouble(result_reg,
   4717                   FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   4718     if (deoptimize_on_minus_zero) {
   4719       __ TestDoubleIsMinusZero(result_reg, scratch, ip);
   4720       DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
   4721     }
   4722     __ b(&done, Label::kNear);
   4723     if (can_convert_undefined_to_nan) {
   4724       __ bind(&convert);
   4725       // Convert undefined (and hole) to NaN.
   4726       __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
   4727       DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
   4728       __ LoadRoot(scratch, Heap::kNanValueRootIndex);
   4729       __ LoadDouble(result_reg,
   4730                     FieldMemOperand(scratch, HeapNumber::kValueOffset));
   4731       __ b(&done, Label::kNear);
   4732     }
   4733   } else {
   4734     __ SmiUntag(scratch, input_reg);
   4735     DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
   4736   }
   4737   // Smi to double register conversion
   4738   __ bind(&load_smi);
   4739   // scratch: untagged value of input_reg
   4740   __ ConvertIntToDouble(scratch, result_reg);
   4741   __ bind(&done);
   4742 }
   4743 
   4744 void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
   4745   Register input_reg = ToRegister(instr->value());
   4746   Register scratch1 = scratch0();
   4747   Register scratch2 = ToRegister(instr->temp());
   4748   DoubleRegister double_scratch = double_scratch0();
   4749   DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
   4750 
   4751   DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
   4752   DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
   4753 
   4754   Label done;
   4755 
   4756   // Heap number map check.
   4757   __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   4758   __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
   4759 
   4760   if (instr->truncating()) {
   4761     Label truncate;
   4762     __ beq(&truncate);
   4763     __ CompareInstanceType(scratch1, scratch1, ODDBALL_TYPE);
   4764     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball);
   4765     __ bind(&truncate);
   4766     __ LoadRR(scratch2, input_reg);
   4767     __ TruncateHeapNumberToI(input_reg, scratch2);
   4768   } else {
   4769     // Deoptimize if we don't have a heap number.
   4770     DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
   4771 
   4772     __ LoadDouble(double_scratch2,
   4773                   FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   4774     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4775       // preserve heap number pointer in scratch2 for minus zero check below
   4776       __ LoadRR(scratch2, input_reg);
   4777     }
   4778     __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
   4779                              double_scratch);
   4780     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
   4781 
   4782     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4783       __ CmpP(input_reg, Operand::Zero());
   4784       __ bne(&done, Label::kNear);
   4785       __ TestHeapNumberSign(scratch2, scratch1);
   4786       DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
   4787     }
   4788   }
   4789   __ bind(&done);
   4790 }
   4791 
   4792 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
   4793   class DeferredTaggedToI final : public LDeferredCode {
   4794    public:
   4795     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
   4796         : LDeferredCode(codegen), instr_(instr) {}
   4797     void Generate() override { codegen()->DoDeferredTaggedToI(instr_); }
   4798     LInstruction* instr() override { return instr_; }
   4799 
   4800    private:
   4801     LTaggedToI* instr_;
   4802   };
   4803 
   4804   LOperand* input = instr->value();
   4805   DCHECK(input->IsRegister());
   4806   DCHECK(input->Equals(instr->result()));
   4807 
   4808   Register input_reg = ToRegister(input);
   4809 
   4810   if (instr->hydrogen()->value()->representation().IsSmi()) {
   4811     __ SmiUntag(input_reg);
   4812   } else {
   4813     DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
   4814 
   4815     // Branch to deferred code if the input is a HeapObject.
   4816     __ JumpIfNotSmi(input_reg, deferred->entry());
   4817 
   4818     __ SmiUntag(input_reg);
   4819     __ bind(deferred->exit());
   4820   }
   4821 }
   4822 
   4823 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   4824   LOperand* input = instr->value();
   4825   DCHECK(input->IsRegister());
   4826   LOperand* result = instr->result();
   4827   DCHECK(result->IsDoubleRegister());
   4828 
   4829   Register input_reg = ToRegister(input);
   4830   DoubleRegister result_reg = ToDoubleRegister(result);
   4831 
   4832   HValue* value = instr->hydrogen()->value();
   4833   NumberUntagDMode mode = value->representation().IsSmi()
   4834                               ? NUMBER_CANDIDATE_IS_SMI
   4835                               : NUMBER_CANDIDATE_IS_ANY_TAGGED;
   4836 
   4837   EmitNumberUntagD(instr, input_reg, result_reg, mode);
   4838 }
   4839 
   4840 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
   4841   Register result_reg = ToRegister(instr->result());
   4842   Register scratch1 = scratch0();
   4843   DoubleRegister double_input = ToDoubleRegister(instr->value());
   4844   DoubleRegister double_scratch = double_scratch0();
   4845 
   4846   if (instr->truncating()) {
   4847     __ TruncateDoubleToI(result_reg, double_input);
   4848   } else {
   4849     __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
   4850                              double_scratch);
   4851     // Deoptimize if the input wasn't a int32 (inside a double).
   4852     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
   4853     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4854       Label done;
   4855       __ CmpP(result_reg, Operand::Zero());
   4856       __ bne(&done, Label::kNear);
   4857       __ TestDoubleSign(double_input, scratch1);
   4858       DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
   4859       __ bind(&done);
   4860     }
   4861   }
   4862 }
   4863 
   4864 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
   4865   Register result_reg = ToRegister(instr->result());
   4866   Register scratch1 = scratch0();
   4867   DoubleRegister double_input = ToDoubleRegister(instr->value());
   4868   DoubleRegister double_scratch = double_scratch0();
   4869 
   4870   if (instr->truncating()) {
   4871     __ TruncateDoubleToI(result_reg, double_input);
   4872   } else {
   4873     __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
   4874                              double_scratch);
   4875     // Deoptimize if the input wasn't a int32 (inside a double).
   4876     DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
   4877     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
   4878       Label done;
   4879       __ CmpP(result_reg, Operand::Zero());
   4880       __ bne(&done, Label::kNear);
   4881       __ TestDoubleSign(double_input, scratch1);
   4882       DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
   4883       __ bind(&done);
   4884     }
   4885   }
   4886 #if V8_TARGET_ARCH_S390X
   4887   __ SmiTag(result_reg);
   4888 #else
   4889   __ SmiTagCheckOverflow(result_reg, r0);
   4890   DeoptimizeIf(lt, instr, DeoptimizeReason::kOverflow, cr0);
   4891 #endif
   4892 }
   4893 
   4894 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   4895   LOperand* input = instr->value();
   4896   if (input->IsRegister()) {
   4897     __ TestIfSmi(ToRegister(input));
   4898   } else if (input->IsStackSlot()) {
   4899     MemOperand value = ToMemOperand(input);
   4900 #if !V8_TARGET_LITTLE_ENDIAN
   4901 #if V8_TARGET_ARCH_S390X
   4902     __ TestIfSmi(MemOperand(value.rb(), value.offset() + 7));
   4903 #else
   4904     __ TestIfSmi(MemOperand(value.rb(), value.offset() + 3));
   4905 #endif
   4906 #else
   4907     __ TestIfSmi(value);
   4908 #endif
   4909   }
   4910   DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi, cr0);
   4911 }
   4912 
   4913 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
   4914   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
   4915     LOperand* input = instr->value();
   4916     if (input->IsRegister()) {
   4917       __ TestIfSmi(ToRegister(input));
   4918     } else if (input->IsStackSlot()) {
   4919       MemOperand value = ToMemOperand(input);
   4920 #if !V8_TARGET_LITTLE_ENDIAN
   4921 #if V8_TARGET_ARCH_S390X
   4922       __ TestIfSmi(MemOperand(value.rb(), value.offset() + 7));
   4923 #else
   4924       __ TestIfSmi(MemOperand(value.rb(), value.offset() + 3));
   4925 #endif
   4926 #else
   4927       __ TestIfSmi(value);
   4928 #endif
   4929     } else {
   4930       UNIMPLEMENTED();
   4931     }
   4932     DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi, cr0);
   4933   }
   4934 }
   4935 
   4936 void LCodeGen::DoCheckArrayBufferNotNeutered(
   4937     LCheckArrayBufferNotNeutered* instr) {
   4938   Register view = ToRegister(instr->view());
   4939   Register scratch = scratch0();
   4940 
   4941   __ LoadP(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
   4942   __ LoadlW(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
   4943   __ And(r0, scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
   4944   DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, cr0);
   4945 }
   4946 
   4947 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
   4948   Register input = ToRegister(instr->value());
   4949   Register scratch = scratch0();
   4950 
   4951   __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   4952 
   4953   if (instr->hydrogen()->is_interval_check()) {
   4954     InstanceType first;
   4955     InstanceType last;
   4956     instr->hydrogen()->GetCheckInterval(&first, &last);
   4957 
   4958     __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
   4959                       Operand(first));
   4960 
   4961     // If there is only one type in the interval check for equality.
   4962     if (first == last) {
   4963       DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
   4964     } else {
   4965       DeoptimizeIf(lt, instr, DeoptimizeReason::kWrongInstanceType);
   4966       // Omit check for the last type.
   4967       if (last != LAST_TYPE) {
   4968         __ CmpLogicalByte(FieldMemOperand(scratch, Map::kInstanceTypeOffset),
   4969                           Operand(last));
   4970         DeoptimizeIf(gt, instr, DeoptimizeReason::kWrongInstanceType);
   4971       }
   4972     }
   4973   } else {
   4974     uint8_t mask;
   4975     uint8_t tag;
   4976     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
   4977 
   4978     __ LoadlB(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
   4979 
   4980     if (base::bits::IsPowerOfTwo32(mask)) {
   4981       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
   4982       __ AndP(scratch, Operand(mask));
   4983       DeoptimizeIf(tag == 0 ? ne : eq, instr,
   4984                    DeoptimizeReason::kWrongInstanceType);
   4985     } else {
   4986       __ AndP(scratch, Operand(mask));
   4987       __ CmpP(scratch, Operand(tag));
   4988       DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
   4989     }
   4990   }
   4991 }
   4992 
   4993 void LCodeGen::DoCheckValue(LCheckValue* instr) {
   4994   Register reg = ToRegister(instr->value());
   4995   Handle<HeapObject> object = instr->hydrogen()->object().handle();
   4996   AllowDeferredHandleDereference smi_check;
   4997   if (isolate()->heap()->InNewSpace(*object)) {
   4998     Register reg = ToRegister(instr->value());
   4999     Handle<Cell> cell = isolate()->factory()->NewCell(object);
   5000     __ mov(ip, Operand(cell));
   5001     __ CmpP(reg, FieldMemOperand(ip, Cell::kValueOffset));
   5002   } else {
   5003     __ CmpP(reg, Operand(object));
   5004   }
   5005   DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
   5006 }
   5007 
   5008 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   5009   Register temp = ToRegister(instr->temp());
   5010   Label deopt, done;
   5011   // If the map is not deprecated the migration attempt does not make sense.
   5012   __ LoadP(temp, FieldMemOperand(object, HeapObject::kMapOffset));
   5013   __ LoadlW(temp, FieldMemOperand(temp, Map::kBitField3Offset));
   5014   __ TestBitMask(temp, Map::Deprecated::kMask, r0);
   5015   __ beq(&deopt);
   5016 
   5017   {
   5018     PushSafepointRegistersScope scope(this);
   5019     __ push(object);
   5020     __ LoadImmP(cp, Operand::Zero());
   5021     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
   5022     RecordSafepointWithRegisters(instr->pointer_map(), 1,
   5023                                  Safepoint::kNoLazyDeopt);
   5024     __ StoreToSafepointRegisterSlot(r2, temp);
   5025   }
   5026   __ TestIfSmi(temp);
   5027   __ bne(&done);
   5028 
   5029   __ bind(&deopt);
   5030   // In case of "al" condition the operand is not used so just pass cr0 there.
   5031   DeoptimizeIf(al, instr, DeoptimizeReason::kInstanceMigrationFailed, cr0);
   5032 
   5033   __ bind(&done);
   5034 }
   5035 
   5036 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
   5037   class DeferredCheckMaps final : public LDeferredCode {
   5038    public:
   5039     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
   5040         : LDeferredCode(codegen), instr_(instr), object_(object) {
   5041       SetExit(check_maps());
   5042     }
   5043     void Generate() override {
   5044       codegen()->DoDeferredInstanceMigration(instr_, object_);
   5045     }
   5046     Label* check_maps() { return &check_maps_; }
   5047     LInstruction* instr() override { return instr_; }
   5048 
   5049    private:
   5050     LCheckMaps* instr_;
   5051     Label check_maps_;
   5052     Register object_;
   5053   };
   5054 
   5055   if (instr->hydrogen()->IsStabilityCheck()) {
   5056     const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   5057     for (int i = 0; i < maps->size(); ++i) {
   5058       AddStabilityDependency(maps->at(i).handle());
   5059     }
   5060     return;
   5061   }
   5062 
   5063   LOperand* input = instr->value();
   5064   DCHECK(input->IsRegister());
   5065   Register reg = ToRegister(input);
   5066 
   5067   DeferredCheckMaps* deferred = NULL;
   5068   if (instr->hydrogen()->HasMigrationTarget()) {
   5069     deferred = new (zone()) DeferredCheckMaps(this, instr, reg);
   5070     __ bind(deferred->check_maps());
   5071   }
   5072 
   5073   const UniqueSet<Map>* maps = instr->hydrogen()->maps();
   5074   Label success;
   5075   for (int i = 0; i < maps->size() - 1; i++) {
   5076     Handle<Map> map = maps->at(i).handle();
   5077     __ CompareMap(reg, map, &success);
   5078     __ beq(&success);
   5079   }
   5080 
   5081   Handle<Map> map = maps->at(maps->size() - 1).handle();
   5082   __ CompareMap(reg, map, &success);
   5083   if (instr->hydrogen()->HasMigrationTarget()) {
   5084     __ bne(deferred->entry());
   5085   } else {
   5086     DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
   5087   }
   5088 
   5089   __ bind(&success);
   5090 }
   5091 
   5092 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
   5093   DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
   5094   Register result_reg = ToRegister(instr->result());
   5095   __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
   5096 }
   5097 
   5098 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
   5099   Register unclamped_reg = ToRegister(instr->unclamped());
   5100   Register result_reg = ToRegister(instr->result());
   5101   __ ClampUint8(result_reg, unclamped_reg);
   5102 }
   5103 
   5104 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
   5105   Register scratch = scratch0();
   5106   Register input_reg = ToRegister(instr->unclamped());
   5107   Register result_reg = ToRegister(instr->result());
   5108   DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
   5109   Label is_smi, done, heap_number;
   5110 
   5111   // Both smi and heap number cases are handled.
   5112   __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
   5113 
   5114   // Check for heap number
   5115   __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
   5116   __ CmpP(scratch, Operand(factory()->heap_number_map()));
   5117   __ beq(&heap_number, Label::kNear);
   5118 
   5119   // Check for undefined. Undefined is converted to zero for clamping
   5120   // conversions.
   5121   __ CmpP(input_reg, Operand(factory()->undefined_value()));
   5122   DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
   5123   __ LoadImmP(result_reg, Operand::Zero());
   5124   __ b(&done, Label::kNear);
   5125 
   5126   // Heap number
   5127   __ bind(&heap_number);
   5128   __ LoadDouble(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
   5129   __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
   5130   __ b(&done, Label::kNear);
   5131 
   5132   // smi
   5133   __ bind(&is_smi);
   5134   __ ClampUint8(result_reg, result_reg);
   5135 
   5136   __ bind(&done);
   5137 }
   5138 
   5139 void LCodeGen::DoAllocate(LAllocate* instr) {
   5140   class DeferredAllocate final : public LDeferredCode {
   5141    public:
   5142     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
   5143         : LDeferredCode(codegen), instr_(instr) {}
   5144     void Generate() override { codegen()->DoDeferredAllocate(instr_); }
   5145     LInstruction* instr() override { return instr_; }
   5146 
   5147    private:
   5148     LAllocate* instr_;
   5149   };
   5150 
   5151   DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
   5152 
   5153   Register result = ToRegister(instr->result());
   5154   Register scratch = ToRegister(instr->temp1());
   5155   Register scratch2 = ToRegister(instr->temp2());
   5156 
   5157   // Allocate memory for the object.
   5158   AllocationFlags flags = NO_ALLOCATION_FLAGS;
   5159   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   5160     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   5161   }
   5162   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5163     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5164     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   5165   }
   5166 
   5167   if (instr->hydrogen()->IsAllocationFoldingDominator()) {
   5168     flags = static_cast<AllocationFlags>(flags | ALLOCATION_FOLDING_DOMINATOR);
   5169   }
   5170 
   5171   DCHECK(!instr->hydrogen()->IsAllocationFolded());
   5172 
   5173   if (instr->size()->IsConstantOperand()) {
   5174     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5175     CHECK(size <= kMaxRegularHeapObjectSize);
   5176     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   5177   } else {
   5178     Register size = ToRegister(instr->size());
   5179     __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   5180   }
   5181 
   5182   __ bind(deferred->exit());
   5183 
   5184   if (instr->hydrogen()->MustPrefillWithFiller()) {
   5185     if (instr->size()->IsConstantOperand()) {
   5186       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5187       __ LoadIntLiteral(scratch, size);
   5188     } else {
   5189       scratch = ToRegister(instr->size());
   5190     }
   5191     __ lay(scratch, MemOperand(scratch, -kPointerSize));
   5192     Label loop;
   5193     __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
   5194     __ bind(&loop);
   5195     __ StoreP(scratch2, MemOperand(scratch, result, -kHeapObjectTag));
   5196 #if V8_TARGET_ARCH_S390X
   5197     __ lay(scratch, MemOperand(scratch, -kPointerSize));
   5198 #else
   5199     // TODO(joransiu): Improve the following sequence.
   5200     // Need to use AHI instead of LAY as top nibble is not set with LAY, causing
   5201     // incorrect result with the signed compare
   5202     __ AddP(scratch, Operand(-kPointerSize));
   5203 #endif
   5204     __ CmpP(scratch, Operand::Zero());
   5205     __ bge(&loop);
   5206   }
   5207 }
   5208 
   5209 void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
   5210   Register result = ToRegister(instr->result());
   5211 
   5212   // TODO(3095996): Get rid of this. For now, we need to make the
   5213   // result register contain a valid pointer because it is already
   5214   // contained in the register pointer map.
   5215   __ LoadSmiLiteral(result, Smi::kZero);
   5216 
   5217   PushSafepointRegistersScope scope(this);
   5218   if (instr->size()->IsRegister()) {
   5219     Register size = ToRegister(instr->size());
   5220     DCHECK(!size.is(result));
   5221     __ SmiTag(size);
   5222     __ push(size);
   5223   } else {
   5224     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5225 #if !V8_TARGET_ARCH_S390X
   5226     if (size >= 0 && size <= Smi::kMaxValue) {
   5227 #endif
   5228       __ Push(Smi::FromInt(size));
   5229 #if !V8_TARGET_ARCH_S390X
   5230     } else {
   5231       // We should never get here at runtime => abort
   5232       __ stop("invalid allocation size");
   5233       return;
   5234     }
   5235 #endif
   5236   }
   5237 
   5238   int flags = AllocateDoubleAlignFlag::encode(
   5239       instr->hydrogen()->MustAllocateDoubleAligned());
   5240   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5241     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5242     flags = AllocateTargetSpace::update(flags, OLD_SPACE);
   5243   } else {
   5244     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
   5245   }
   5246   __ Push(Smi::FromInt(flags));
   5247 
   5248   CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
   5249                           instr->context());
   5250   __ StoreToSafepointRegisterSlot(r2, result);
   5251 
   5252   if (instr->hydrogen()->IsAllocationFoldingDominator()) {
   5253     AllocationFlags allocation_flags = NO_ALLOCATION_FLAGS;
   5254     if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5255       DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5256       allocation_flags = static_cast<AllocationFlags>(flags | PRETENURE);
   5257     }
   5258     // If the allocation folding dominator allocate triggered a GC, allocation
   5259     // happend in the runtime. We have to reset the top pointer to virtually
   5260     // undo the allocation.
   5261     ExternalReference allocation_top =
   5262         AllocationUtils::GetAllocationTopReference(isolate(), allocation_flags);
   5263     Register top_address = scratch0();
   5264     __ SubP(r2, r2, Operand(kHeapObjectTag));
   5265     __ mov(top_address, Operand(allocation_top));
   5266     __ StoreP(r2, MemOperand(top_address));
   5267     __ AddP(r2, r2, Operand(kHeapObjectTag));
   5268   }
   5269 }
   5270 
   5271 void LCodeGen::DoFastAllocate(LFastAllocate* instr) {
   5272   DCHECK(instr->hydrogen()->IsAllocationFolded());
   5273   DCHECK(!instr->hydrogen()->IsAllocationFoldingDominator());
   5274   Register result = ToRegister(instr->result());
   5275   Register scratch1 = ToRegister(instr->temp1());
   5276   Register scratch2 = ToRegister(instr->temp2());
   5277 
   5278   AllocationFlags flags = ALLOCATION_FOLDED;
   5279   if (instr->hydrogen()->MustAllocateDoubleAligned()) {
   5280     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   5281   }
   5282   if (instr->hydrogen()->IsOldSpaceAllocation()) {
   5283     DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
   5284     flags = static_cast<AllocationFlags>(flags | PRETENURE);
   5285   }
   5286   if (instr->size()->IsConstantOperand()) {
   5287     int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
   5288     CHECK(size <= kMaxRegularHeapObjectSize);
   5289     __ FastAllocate(size, result, scratch1, scratch2, flags);
   5290   } else {
   5291     Register size = ToRegister(instr->size());
   5292     __ FastAllocate(size, result, scratch1, scratch2, flags);
   5293   }
   5294 }
   5295 
   5296 void LCodeGen::DoTypeof(LTypeof* instr) {
   5297   DCHECK(ToRegister(instr->value()).is(r5));
   5298   DCHECK(ToRegister(instr->result()).is(r2));
   5299   Label end, do_call;
   5300   Register value_register = ToRegister(instr->value());
   5301   __ JumpIfNotSmi(value_register, &do_call);
   5302   __ mov(r2, Operand(isolate()->factory()->number_string()));
   5303   __ b(&end);
   5304   __ bind(&do_call);
   5305   Callable callable = CodeFactory::Typeof(isolate());
   5306   CallCode(callable.code(), RelocInfo::CODE_TARGET, instr);
   5307   __ bind(&end);
   5308 }
   5309 
   5310 void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
   5311   Register input = ToRegister(instr->value());
   5312 
   5313   Condition final_branch_condition =
   5314       EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
   5315                    instr->type_literal());
   5316   if (final_branch_condition != kNoCondition) {
   5317     EmitBranch(instr, final_branch_condition);
   5318   }
   5319 }
   5320 
   5321 Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
   5322                                  Register input, Handle<String> type_name) {
   5323   Condition final_branch_condition = kNoCondition;
   5324   Register scratch = scratch0();
   5325   Factory* factory = isolate()->factory();
   5326   if (String::Equals(type_name, factory->number_string())) {
   5327     __ JumpIfSmi(input, true_label);
   5328     __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   5329     __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
   5330     final_branch_condition = eq;
   5331 
   5332   } else if (String::Equals(type_name, factory->string_string())) {
   5333     __ JumpIfSmi(input, false_label);
   5334     __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
   5335     final_branch_condition = lt;
   5336 
   5337   } else if (String::Equals(type_name, factory->symbol_string())) {
   5338     __ JumpIfSmi(input, false_label);
   5339     __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
   5340     final_branch_condition = eq;
   5341 
   5342   } else if (String::Equals(type_name, factory->boolean_string())) {
   5343     __ CompareRoot(input, Heap::kTrueValueRootIndex);
   5344     __ beq(true_label);
   5345     __ CompareRoot(input, Heap::kFalseValueRootIndex);
   5346     final_branch_condition = eq;
   5347 
   5348   } else if (String::Equals(type_name, factory->undefined_string())) {
   5349     __ CompareRoot(input, Heap::kNullValueRootIndex);
   5350     __ beq(false_label);
   5351     __ JumpIfSmi(input, false_label);
   5352     // Check for undetectable objects => true.
   5353     __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   5354     __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5355     __ ExtractBit(r0, scratch, Map::kIsUndetectable);
   5356     __ CmpP(r0, Operand::Zero());
   5357     final_branch_condition = ne;
   5358 
   5359   } else if (String::Equals(type_name, factory->function_string())) {
   5360     __ JumpIfSmi(input, false_label);
   5361     __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   5362     __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5363     __ AndP(scratch, scratch,
   5364             Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
   5365     __ CmpP(scratch, Operand(1 << Map::kIsCallable));
   5366     final_branch_condition = eq;
   5367 
   5368   } else if (String::Equals(type_name, factory->object_string())) {
   5369     __ JumpIfSmi(input, false_label);
   5370     __ CompareRoot(input, Heap::kNullValueRootIndex);
   5371     __ beq(true_label);
   5372     STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
   5373     __ CompareObjectType(input, scratch, ip, FIRST_JS_RECEIVER_TYPE);
   5374     __ blt(false_label);
   5375     // Check for callable or undetectable objects => false.
   5376     __ LoadlB(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
   5377     __ AndP(r0, scratch,
   5378             Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
   5379     __ CmpP(r0, Operand::Zero());
   5380     final_branch_condition = eq;
   5381 
   5382   } else {
   5383     __ b(false_label);
   5384   }
   5385 
   5386   return final_branch_condition;
   5387 }
   5388 
   5389 void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
   5390   if (info()->ShouldEnsureSpaceForLazyDeopt()) {
   5391     // Ensure that we have enough space after the previous lazy-bailout
   5392     // instruction for patching the code here.
   5393     int current_pc = masm()->pc_offset();
   5394     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
   5395       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
   5396       DCHECK_EQ(0, padding_size % 2);
   5397       while (padding_size > 0) {
   5398         __ nop();
   5399         padding_size -= 2;
   5400       }
   5401     }
   5402   }
   5403   last_lazy_deopt_pc_ = masm()->pc_offset();
   5404 }
   5405 
   5406 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   5407   last_lazy_deopt_pc_ = masm()->pc_offset();
   5408   DCHECK(instr->HasEnvironment());
   5409   LEnvironment* env = instr->environment();
   5410   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5411   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5412 }
   5413 
   5414 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
   5415   Deoptimizer::BailoutType type = instr->hydrogen()->type();
   5416   // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
   5417   // needed return address), even though the implementation of LAZY and EAGER is
   5418   // now identical. When LAZY is eventually completely folded into EAGER, remove
   5419   // the special case below.
   5420   if (info()->IsStub() && type == Deoptimizer::EAGER) {
   5421     type = Deoptimizer::LAZY;
   5422   }
   5423 
   5424   DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
   5425 }
   5426 
   5427 void LCodeGen::DoDummy(LDummy* instr) {
   5428   // Nothing to see here, move on!
   5429 }
   5430 
   5431 void LCodeGen::DoDummyUse(LDummyUse* instr) {
   5432   // Nothing to see here, move on!
   5433 }
   5434 
   5435 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   5436   PushSafepointRegistersScope scope(this);
   5437   LoadContextFromDeferred(instr->context());
   5438   __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   5439   RecordSafepointWithLazyDeopt(
   5440       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
   5441   DCHECK(instr->HasEnvironment());
   5442   LEnvironment* env = instr->environment();
   5443   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
   5444 }
   5445 
   5446 void LCodeGen::DoStackCheck(LStackCheck* instr) {
   5447   class DeferredStackCheck final : public LDeferredCode {
   5448    public:
   5449     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
   5450         : LDeferredCode(codegen), instr_(instr) {}
   5451     void Generate() override { codegen()->DoDeferredStackCheck(instr_); }
   5452     LInstruction* instr() override { return instr_; }
   5453 
   5454    private:
   5455     LStackCheck* instr_;
   5456   };
   5457 
   5458   DCHECK(instr->HasEnvironment());
   5459   LEnvironment* env = instr->environment();
   5460   // There is no LLazyBailout instruction for stack-checks. We have to
   5461   // prepare for lazy deoptimization explicitly here.
   5462   if (instr->hydrogen()->is_function_entry()) {
   5463     // Perform stack overflow check.
   5464     Label done;
   5465     __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
   5466     __ bge(&done, Label::kNear);
   5467     DCHECK(instr->context()->IsRegister());
   5468     DCHECK(ToRegister(instr->context()).is(cp));
   5469     CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
   5470              instr);
   5471     __ bind(&done);
   5472   } else {
   5473     DCHECK(instr->hydrogen()->is_backwards_branch());
   5474     // Perform stack overflow check if this goto needs it before jumping.
   5475     DeferredStackCheck* deferred_stack_check =
   5476         new (zone()) DeferredStackCheck(this, instr);
   5477     __ CmpLogicalP(sp, RootMemOperand(Heap::kStackLimitRootIndex));
   5478     __ blt(deferred_stack_check->entry());
   5479     EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
   5480     __ bind(instr->done_label());
   5481     deferred_stack_check->SetExit(instr->done_label());
   5482     RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   5483     // Don't record a deoptimization index for the safepoint here.
   5484     // This will be done explicitly when emitting call and the safepoint in
   5485     // the deferred code.
   5486   }
   5487 }
   5488 
   5489 void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
   5490   // This is a pseudo-instruction that ensures that the environment here is
   5491   // properly registered for deoptimization and records the assembler's PC
   5492   // offset.
   5493   LEnvironment* environment = instr->environment();
   5494 
   5495   // If the environment were already registered, we would have no way of
   5496   // backpatching it with the spill slot operands.
   5497   DCHECK(!environment->HasBeenRegistered());
   5498   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   5499 
   5500   GenerateOsrPrologue();
   5501 }
   5502 
   5503 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   5504   Label use_cache, call_runtime;
   5505   __ CheckEnumCache(&call_runtime);
   5506 
   5507   __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
   5508   __ b(&use_cache);
   5509 
   5510   // Get the set of properties to enumerate.
   5511   __ bind(&call_runtime);
   5512   __ push(r2);
   5513   CallRuntime(Runtime::kForInEnumerate, instr);
   5514   __ bind(&use_cache);
   5515 }
   5516 
   5517 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
   5518   Register map = ToRegister(instr->map());
   5519   Register result = ToRegister(instr->result());
   5520   Label load_cache, done;
   5521   __ EnumLength(result, map);
   5522   __ CmpSmiLiteral(result, Smi::kZero, r0);
   5523   __ bne(&load_cache, Label::kNear);
   5524   __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
   5525   __ b(&done, Label::kNear);
   5526 
   5527   __ bind(&load_cache);
   5528   __ LoadInstanceDescriptors(map, result);
   5529   __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
   5530   __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
   5531   __ CmpP(result, Operand::Zero());
   5532   DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache);
   5533 
   5534   __ bind(&done);
   5535 }
   5536 
   5537 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
   5538   Register object = ToRegister(instr->value());
   5539   Register map = ToRegister(instr->map());
   5540   __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
   5541   __ CmpP(map, scratch0());
   5542   DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
   5543 }
   5544 
   5545 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
   5546                                            Register result, Register object,
   5547                                            Register index) {
   5548   PushSafepointRegistersScope scope(this);
   5549   __ Push(object, index);
   5550   __ LoadImmP(cp, Operand::Zero());
   5551   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
   5552   RecordSafepointWithRegisters(instr->pointer_map(), 2,
   5553                                Safepoint::kNoLazyDeopt);
   5554   __ StoreToSafepointRegisterSlot(r2, result);
   5555 }
   5556 
   5557 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
   5558   class DeferredLoadMutableDouble final : public LDeferredCode {
   5559    public:
   5560     DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
   5561                               Register result, Register object, Register index)
   5562         : LDeferredCode(codegen),
   5563           instr_(instr),
   5564           result_(result),
   5565           object_(object),
   5566           index_(index) {}
   5567     void Generate() override {
   5568       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
   5569     }
   5570     LInstruction* instr() override { return instr_; }
   5571 
   5572    private:
   5573     LLoadFieldByIndex* instr_;
   5574     Register result_;
   5575     Register object_;
   5576     Register index_;
   5577   };
   5578 
   5579   Register object = ToRegister(instr->object());
   5580   Register index = ToRegister(instr->index());
   5581   Register result = ToRegister(instr->result());
   5582   Register scratch = scratch0();
   5583 
   5584   DeferredLoadMutableDouble* deferred;
   5585   deferred = new (zone())
   5586       DeferredLoadMutableDouble(this, instr, result, object, index);
   5587 
   5588   Label out_of_object, done;
   5589 
   5590   __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
   5591   __ bne(deferred->entry());
   5592   __ ShiftRightArithP(index, index, Operand(1));
   5593 
   5594   __ CmpP(index, Operand::Zero());
   5595   __ blt(&out_of_object, Label::kNear);
   5596 
   5597   __ SmiToPtrArrayOffset(r0, index);
   5598   __ AddP(scratch, object, r0);
   5599   __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
   5600 
   5601   __ b(&done, Label::kNear);
   5602 
   5603   __ bind(&out_of_object);
   5604   __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
   5605   // Index is equal to negated out of object property index plus 1.
   5606   __ SmiToPtrArrayOffset(r0, index);
   5607   __ SubP(scratch, result, r0);
   5608   __ LoadP(result,
   5609            FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
   5610   __ bind(deferred->exit());
   5611   __ bind(&done);
   5612 }
   5613 
   5614 #undef __
   5615 
   5616 }  // namespace internal
   5617 }  // namespace v8
   5618