Home | History | Annotate | Download | only in s390
      1 // Copyright 2015 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "src/compiler/code-generator.h"
      6 
      7 #include "src/ast/scopes.h"
      8 #include "src/compiler/code-generator-impl.h"
      9 #include "src/compiler/gap-resolver.h"
     10 #include "src/compiler/node-matchers.h"
     11 #include "src/compiler/osr.h"
     12 #include "src/s390/macro-assembler-s390.h"
     13 
     14 namespace v8 {
     15 namespace internal {
     16 namespace compiler {
     17 
     18 #define __ masm()->
     19 
     20 #define kScratchReg ip
     21 
     22 // Adds S390-specific methods to convert InstructionOperands.
     23 class S390OperandConverter final : public InstructionOperandConverter {
     24  public:
     25   S390OperandConverter(CodeGenerator* gen, Instruction* instr)
     26       : InstructionOperandConverter(gen, instr) {}
     27 
     28   size_t OutputCount() { return instr_->OutputCount(); }
     29 
     30   bool CompareLogical() const {
     31     switch (instr_->flags_condition()) {
     32       case kUnsignedLessThan:
     33       case kUnsignedGreaterThanOrEqual:
     34       case kUnsignedLessThanOrEqual:
     35       case kUnsignedGreaterThan:
     36         return true;
     37       default:
     38         return false;
     39     }
     40     UNREACHABLE();
     41     return false;
     42   }
     43 
     44   Operand InputImmediate(size_t index) {
     45     Constant constant = ToConstant(instr_->InputAt(index));
     46     switch (constant.type()) {
     47       case Constant::kInt32:
     48         return Operand(constant.ToInt32());
     49       case Constant::kFloat32:
     50         return Operand(
     51             isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
     52       case Constant::kFloat64:
     53         return Operand(
     54             isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
     55       case Constant::kInt64:
     56 #if V8_TARGET_ARCH_S390X
     57         return Operand(constant.ToInt64());
     58 #endif
     59       case Constant::kExternalReference:
     60       case Constant::kHeapObject:
     61       case Constant::kRpoNumber:
     62         break;
     63     }
     64     UNREACHABLE();
     65     return Operand::Zero();
     66   }
     67 
     68   MemOperand MemoryOperand(AddressingMode* mode, size_t* first_index) {
     69     const size_t index = *first_index;
     70     if (mode) *mode = AddressingModeField::decode(instr_->opcode());
     71     switch (AddressingModeField::decode(instr_->opcode())) {
     72       case kMode_None:
     73         break;
     74       case kMode_MRI:
     75         *first_index += 2;
     76         return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
     77       case kMode_MRR:
     78         *first_index += 2;
     79         return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
     80     }
     81     UNREACHABLE();
     82     return MemOperand(r0);
     83   }
     84 
     85   MemOperand MemoryOperand(AddressingMode* mode = NULL,
     86                            size_t first_index = 0) {
     87     return MemoryOperand(mode, &first_index);
     88   }
     89 
     90   MemOperand ToMemOperand(InstructionOperand* op) const {
     91     DCHECK_NOT_NULL(op);
     92     DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
     93     return SlotToMemOperand(AllocatedOperand::cast(op)->index());
     94   }
     95 
     96   MemOperand SlotToMemOperand(int slot) const {
     97     FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
     98     return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
     99   }
    100 };
    101 
    102 static inline bool HasRegisterInput(Instruction* instr, int index) {
    103   return instr->InputAt(index)->IsRegister();
    104 }
    105 
    106 namespace {
    107 
    108 class OutOfLineLoadNAN32 final : public OutOfLineCode {
    109  public:
    110   OutOfLineLoadNAN32(CodeGenerator* gen, DoubleRegister result)
    111       : OutOfLineCode(gen), result_(result) {}
    112 
    113   void Generate() final {
    114     __ LoadDoubleLiteral(result_, std::numeric_limits<float>::quiet_NaN(),
    115                          kScratchReg);
    116   }
    117 
    118  private:
    119   DoubleRegister const result_;
    120 };
    121 
    122 class OutOfLineLoadNAN64 final : public OutOfLineCode {
    123  public:
    124   OutOfLineLoadNAN64(CodeGenerator* gen, DoubleRegister result)
    125       : OutOfLineCode(gen), result_(result) {}
    126 
    127   void Generate() final {
    128     __ LoadDoubleLiteral(result_, std::numeric_limits<double>::quiet_NaN(),
    129                          kScratchReg);
    130   }
    131 
    132  private:
    133   DoubleRegister const result_;
    134 };
    135 
    136 class OutOfLineLoadZero final : public OutOfLineCode {
    137  public:
    138   OutOfLineLoadZero(CodeGenerator* gen, Register result)
    139       : OutOfLineCode(gen), result_(result) {}
    140 
    141   void Generate() final { __ LoadImmP(result_, Operand::Zero()); }
    142 
    143  private:
    144   Register const result_;
    145 };
    146 
    147 class OutOfLineRecordWrite final : public OutOfLineCode {
    148  public:
    149   OutOfLineRecordWrite(CodeGenerator* gen, Register object, Register offset,
    150                        Register value, Register scratch0, Register scratch1,
    151                        RecordWriteMode mode)
    152       : OutOfLineCode(gen),
    153         object_(object),
    154         offset_(offset),
    155         offset_immediate_(0),
    156         value_(value),
    157         scratch0_(scratch0),
    158         scratch1_(scratch1),
    159         mode_(mode),
    160         must_save_lr_(!gen->frame_access_state()->has_frame()) {}
    161 
    162   OutOfLineRecordWrite(CodeGenerator* gen, Register object, int32_t offset,
    163                        Register value, Register scratch0, Register scratch1,
    164                        RecordWriteMode mode)
    165       : OutOfLineCode(gen),
    166         object_(object),
    167         offset_(no_reg),
    168         offset_immediate_(offset),
    169         value_(value),
    170         scratch0_(scratch0),
    171         scratch1_(scratch1),
    172         mode_(mode),
    173         must_save_lr_(!gen->frame_access_state()->has_frame()) {}
    174 
    175   void Generate() final {
    176     if (mode_ > RecordWriteMode::kValueIsPointer) {
    177       __ JumpIfSmi(value_, exit());
    178     }
    179     __ CheckPageFlag(value_, scratch0_,
    180                      MemoryChunk::kPointersToHereAreInterestingMask, eq,
    181                      exit());
    182     RememberedSetAction const remembered_set_action =
    183         mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
    184                                              : OMIT_REMEMBERED_SET;
    185     SaveFPRegsMode const save_fp_mode =
    186         frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
    187     if (must_save_lr_) {
    188       // We need to save and restore r14 if the frame was elided.
    189       __ Push(r14);
    190     }
    191     RecordWriteStub stub(isolate(), object_, scratch0_, scratch1_,
    192                          remembered_set_action, save_fp_mode);
    193     if (offset_.is(no_reg)) {
    194       __ AddP(scratch1_, object_, Operand(offset_immediate_));
    195     } else {
    196       DCHECK_EQ(0, offset_immediate_);
    197       __ AddP(scratch1_, object_, offset_);
    198     }
    199     __ CallStub(&stub);
    200     if (must_save_lr_) {
    201       // We need to save and restore r14 if the frame was elided.
    202       __ Pop(r14);
    203     }
    204   }
    205 
    206  private:
    207   Register const object_;
    208   Register const offset_;
    209   int32_t const offset_immediate_;  // Valid if offset_.is(no_reg).
    210   Register const value_;
    211   Register const scratch0_;
    212   Register const scratch1_;
    213   RecordWriteMode const mode_;
    214   bool must_save_lr_;
    215 };
    216 
    217 Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
    218   switch (condition) {
    219     case kEqual:
    220       return eq;
    221     case kNotEqual:
    222       return ne;
    223     case kSignedLessThan:
    224     case kUnsignedLessThan:
    225       return lt;
    226     case kSignedGreaterThanOrEqual:
    227     case kUnsignedGreaterThanOrEqual:
    228       return ge;
    229     case kSignedLessThanOrEqual:
    230     case kUnsignedLessThanOrEqual:
    231       return le;
    232     case kSignedGreaterThan:
    233     case kUnsignedGreaterThan:
    234       return gt;
    235     case kOverflow:
    236       // Overflow checked for AddP/SubP only.
    237       switch (op) {
    238 #if V8_TARGET_ARCH_S390X
    239         case kS390_Add:
    240         case kS390_Sub:
    241 #endif
    242         case kS390_AddWithOverflow32:
    243         case kS390_SubWithOverflow32:
    244           return lt;
    245         default:
    246           break;
    247       }
    248       break;
    249     case kNotOverflow:
    250       switch (op) {
    251 #if V8_TARGET_ARCH_S390X
    252         case kS390_Add:
    253         case kS390_Sub:
    254 #endif
    255         case kS390_AddWithOverflow32:
    256         case kS390_SubWithOverflow32:
    257           return ge;
    258         default:
    259           break;
    260       }
    261       break;
    262     default:
    263       break;
    264   }
    265   UNREACHABLE();
    266   return kNoCondition;
    267 }
    268 
    269 }  // namespace
    270 
    271 #define ASSEMBLE_FLOAT_UNOP(asm_instr)                                \
    272   do {                                                                \
    273     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0)); \
    274   } while (0)
    275 
    276 #define ASSEMBLE_FLOAT_BINOP(asm_instr)                              \
    277   do {                                                               \
    278     __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
    279                  i.InputDoubleRegister(1));                          \
    280   } while (0)
    281 
    282 #define ASSEMBLE_BINOP(asm_instr_reg, asm_instr_imm)           \
    283   do {                                                         \
    284     if (HasRegisterInput(instr, 1)) {                          \
    285       __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
    286                        i.InputRegister(1));                    \
    287     } else {                                                   \
    288       __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
    289                        i.InputImmediate(1));                   \
    290     }                                                          \
    291   } while (0)
    292 
    293 #define ASSEMBLE_BINOP_INT(asm_instr_reg, asm_instr_imm)       \
    294   do {                                                         \
    295     if (HasRegisterInput(instr, 1)) {                          \
    296       __ asm_instr_reg(i.OutputRegister(), i.InputRegister(0), \
    297                        i.InputRegister(1));                    \
    298     } else {                                                   \
    299       __ asm_instr_imm(i.OutputRegister(), i.InputRegister(0), \
    300                        i.InputInt32(1));                       \
    301     }                                                          \
    302   } while (0)
    303 
    304 #define ASSEMBLE_ADD_WITH_OVERFLOW()                                    \
    305   do {                                                                  \
    306     if (HasRegisterInput(instr, 1)) {                                   \
    307       __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
    308                                 i.InputRegister(1), kScratchReg, r0);   \
    309     } else {                                                            \
    310       __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
    311                                 i.InputInt32(1), kScratchReg, r0);      \
    312     }                                                                   \
    313   } while (0)
    314 
    315 #define ASSEMBLE_SUB_WITH_OVERFLOW()                                    \
    316   do {                                                                  \
    317     if (HasRegisterInput(instr, 1)) {                                   \
    318       __ SubAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
    319                                 i.InputRegister(1), kScratchReg, r0);   \
    320     } else {                                                            \
    321       __ AddAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0), \
    322                                 -i.InputInt32(1), kScratchReg, r0);     \
    323     }                                                                   \
    324   } while (0)
    325 
    326 #if V8_TARGET_ARCH_S390X
    327 #define ASSEMBLE_ADD_WITH_OVERFLOW32()                   \
    328   do {                                                   \
    329     ASSEMBLE_ADD_WITH_OVERFLOW();                        \
    330     __ LoadAndTestP_ExtendSrc(kScratchReg, kScratchReg); \
    331   } while (0)
    332 
    333 #define ASSEMBLE_SUB_WITH_OVERFLOW32()                   \
    334   do {                                                   \
    335     ASSEMBLE_SUB_WITH_OVERFLOW();                        \
    336     __ LoadAndTestP_ExtendSrc(kScratchReg, kScratchReg); \
    337   } while (0)
    338 #else
    339 #define ASSEMBLE_ADD_WITH_OVERFLOW32 ASSEMBLE_ADD_WITH_OVERFLOW
    340 #define ASSEMBLE_SUB_WITH_OVERFLOW32 ASSEMBLE_SUB_WITH_OVERFLOW
    341 #endif
    342 
    343 #define ASSEMBLE_COMPARE(cmp_instr, cmpl_instr)                 \
    344   do {                                                          \
    345     if (HasRegisterInput(instr, 1)) {                           \
    346       if (i.CompareLogical()) {                                 \
    347         __ cmpl_instr(i.InputRegister(0), i.InputRegister(1));  \
    348       } else {                                                  \
    349         __ cmp_instr(i.InputRegister(0), i.InputRegister(1));   \
    350       }                                                         \
    351     } else {                                                    \
    352       if (i.CompareLogical()) {                                 \
    353         __ cmpl_instr(i.InputRegister(0), i.InputImmediate(1)); \
    354       } else {                                                  \
    355         __ cmp_instr(i.InputRegister(0), i.InputImmediate(1));  \
    356       }                                                         \
    357     }                                                           \
    358   } while (0)
    359 
    360 #define ASSEMBLE_FLOAT_COMPARE(cmp_instr)                            \
    361   do {                                                               \
    362     __ cmp_instr(i.InputDoubleRegister(0), i.InputDoubleRegister(1); \
    363   } while (0)
    364 
    365 // Divide instruction dr will implicity use register pair
    366 // r0 & r1 below.
    367 // R0:R1 = R1 / divisor - R0 remainder
    368 // Copy remainder to output reg
    369 #define ASSEMBLE_MODULO(div_instr, shift_instr) \
    370   do {                                          \
    371     __ LoadRR(r0, i.InputRegister(0));          \
    372     __ shift_instr(r0, Operand(32));            \
    373     __ div_instr(r0, i.InputRegister(1));       \
    374     __ ltr(i.OutputRegister(), r0);             \
    375   } while (0)
    376 
    377 #define ASSEMBLE_FLOAT_MODULO()                                               \
    378   do {                                                                        \
    379     FrameScope scope(masm(), StackFrame::MANUAL);                             \
    380     __ PrepareCallCFunction(0, 2, kScratchReg);                               \
    381     __ MovToFloatParameters(i.InputDoubleRegister(0),                         \
    382                             i.InputDoubleRegister(1));                        \
    383     __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()), \
    384                      0, 2);                                                   \
    385     __ MovFromFloatResult(i.OutputDoubleRegister());                          \
    386   } while (0)
    387 
    388 #define ASSEMBLE_IEEE754_UNOP(name)                                            \
    389   do {                                                                         \
    390     /* TODO(bmeurer): We should really get rid of this special instruction, */ \
    391     /* and generate a CallAddress instruction instead. */                      \
    392     FrameScope scope(masm(), StackFrame::MANUAL);                              \
    393     __ PrepareCallCFunction(0, 1, kScratchReg);                                \
    394     __ MovToFloatParameter(i.InputDoubleRegister(0));                          \
    395     __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()),  \
    396                      0, 1);                                                    \
    397     /* Move the result in the double result register. */                       \
    398     __ MovFromFloatResult(i.OutputDoubleRegister());                           \
    399   } while (0)
    400 
    401 #define ASSEMBLE_IEEE754_BINOP(name)                                           \
    402   do {                                                                         \
    403     /* TODO(bmeurer): We should really get rid of this special instruction, */ \
    404     /* and generate a CallAddress instruction instead. */                      \
    405     FrameScope scope(masm(), StackFrame::MANUAL);                              \
    406     __ PrepareCallCFunction(0, 2, kScratchReg);                                \
    407     __ MovToFloatParameters(i.InputDoubleRegister(0),                          \
    408                             i.InputDoubleRegister(1));                         \
    409     __ CallCFunction(ExternalReference::ieee754_##name##_function(isolate()),  \
    410                      0, 2);                                                    \
    411     /* Move the result in the double result register. */                       \
    412     __ MovFromFloatResult(i.OutputDoubleRegister());                           \
    413   } while (0)
    414 
    415 #define ASSEMBLE_FLOAT_MAX(double_scratch_reg, general_scratch_reg) \
    416   do {                                                              \
    417     Label ge, done;                                                 \
    418     __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));    \
    419     __ bge(&ge, Label::kNear);                                      \
    420     __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(1));    \
    421     __ b(&done, Label::kNear);                                      \
    422     __ bind(&ge);                                                   \
    423     __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));    \
    424     __ bind(&done);                                                 \
    425   } while (0)
    426 
    427 #define ASSEMBLE_FLOAT_MIN(double_scratch_reg, general_scratch_reg) \
    428   do {                                                              \
    429     Label ge, done;                                                 \
    430     __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));    \
    431     __ bge(&ge, Label::kNear);                                      \
    432     __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(0));    \
    433     __ b(&done, Label::kNear);                                      \
    434     __ bind(&ge);                                                   \
    435     __ Move(i.OutputDoubleRegister(), i.InputDoubleRegister(1));    \
    436     __ bind(&done);                                                 \
    437   } while (0)
    438 
    439 // Only MRI mode for these instructions available
    440 #define ASSEMBLE_LOAD_FLOAT(asm_instr)                \
    441   do {                                                \
    442     DoubleRegister result = i.OutputDoubleRegister(); \
    443     AddressingMode mode = kMode_None;                 \
    444     MemOperand operand = i.MemoryOperand(&mode);      \
    445     __ asm_instr(result, operand);                    \
    446   } while (0)
    447 
    448 #define ASSEMBLE_LOAD_INTEGER(asm_instr)         \
    449   do {                                           \
    450     Register result = i.OutputRegister();        \
    451     AddressingMode mode = kMode_None;            \
    452     MemOperand operand = i.MemoryOperand(&mode); \
    453     __ asm_instr(result, operand);               \
    454   } while (0)
    455 
    456 #define ASSEMBLE_STORE_FLOAT32()                         \
    457   do {                                                   \
    458     size_t index = 0;                                    \
    459     AddressingMode mode = kMode_None;                    \
    460     MemOperand operand = i.MemoryOperand(&mode, &index); \
    461     DoubleRegister value = i.InputDoubleRegister(index); \
    462     __ StoreFloat32(value, operand);                     \
    463   } while (0)
    464 
    465 #define ASSEMBLE_STORE_DOUBLE()                          \
    466   do {                                                   \
    467     size_t index = 0;                                    \
    468     AddressingMode mode = kMode_None;                    \
    469     MemOperand operand = i.MemoryOperand(&mode, &index); \
    470     DoubleRegister value = i.InputDoubleRegister(index); \
    471     __ StoreDouble(value, operand);                      \
    472   } while (0)
    473 
    474 #define ASSEMBLE_STORE_INTEGER(asm_instr)                \
    475   do {                                                   \
    476     size_t index = 0;                                    \
    477     AddressingMode mode = kMode_None;                    \
    478     MemOperand operand = i.MemoryOperand(&mode, &index); \
    479     Register value = i.InputRegister(index);             \
    480     __ asm_instr(value, operand);                        \
    481   } while (0)
    482 
    483 #define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr, width)              \
    484   do {                                                             \
    485     DoubleRegister result = i.OutputDoubleRegister();              \
    486     size_t index = 0;                                              \
    487     AddressingMode mode = kMode_None;                              \
    488     MemOperand operand = i.MemoryOperand(&mode, index);            \
    489     Register offset = operand.rb();                                \
    490     if (HasRegisterInput(instr, 2)) {                              \
    491       __ CmpLogical32(offset, i.InputRegister(2));                 \
    492     } else {                                                       \
    493       __ CmpLogical32(offset, i.InputImmediate(2));                \
    494     }                                                              \
    495     auto ool = new (zone()) OutOfLineLoadNAN##width(this, result); \
    496     __ bge(ool->entry());                                          \
    497     __ CleanUInt32(offset);                                        \
    498     __ asm_instr(result, operand);                                 \
    499     __ bind(ool->exit());                                          \
    500   } while (0)
    501 
    502 #define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)             \
    503   do {                                                       \
    504     Register result = i.OutputRegister();                    \
    505     size_t index = 0;                                        \
    506     AddressingMode mode = kMode_None;                        \
    507     MemOperand operand = i.MemoryOperand(&mode, index);      \
    508     Register offset = operand.rb();                          \
    509     if (HasRegisterInput(instr, 2)) {                        \
    510       __ CmpLogical32(offset, i.InputRegister(2));           \
    511     } else {                                                 \
    512       __ CmpLogical32(offset, i.InputImmediate(2));          \
    513     }                                                        \
    514     auto ool = new (zone()) OutOfLineLoadZero(this, result); \
    515     __ bge(ool->entry());                                    \
    516     __ CleanUInt32(offset);                                  \
    517     __ asm_instr(result, operand);                           \
    518     __ bind(ool->exit());                                    \
    519   } while (0)
    520 
    521 #define ASSEMBLE_CHECKED_STORE_FLOAT32()                \
    522   do {                                                  \
    523     Label done;                                         \
    524     size_t index = 0;                                   \
    525     AddressingMode mode = kMode_None;                   \
    526     MemOperand operand = i.MemoryOperand(&mode, index); \
    527     Register offset = operand.rb();                     \
    528     if (HasRegisterInput(instr, 2)) {                   \
    529       __ CmpLogical32(offset, i.InputRegister(2));      \
    530     } else {                                            \
    531       __ CmpLogical32(offset, i.InputImmediate(2));     \
    532     }                                                   \
    533     __ bge(&done);                                      \
    534     DoubleRegister value = i.InputDoubleRegister(3);    \
    535     __ CleanUInt32(offset);                             \
    536     __ StoreFloat32(value, operand);                    \
    537     __ bind(&done);                                     \
    538   } while (0)
    539 
    540 #define ASSEMBLE_CHECKED_STORE_DOUBLE()                 \
    541   do {                                                  \
    542     Label done;                                         \
    543     size_t index = 0;                                   \
    544     AddressingMode mode = kMode_None;                   \
    545     MemOperand operand = i.MemoryOperand(&mode, index); \
    546     DCHECK_EQ(kMode_MRR, mode);                         \
    547     Register offset = operand.rb();                     \
    548     if (HasRegisterInput(instr, 2)) {                   \
    549       __ CmpLogical32(offset, i.InputRegister(2));      \
    550     } else {                                            \
    551       __ CmpLogical32(offset, i.InputImmediate(2));     \
    552     }                                                   \
    553     __ bge(&done);                                      \
    554     DoubleRegister value = i.InputDoubleRegister(3);    \
    555     __ CleanUInt32(offset);                             \
    556     __ StoreDouble(value, operand);                     \
    557     __ bind(&done);                                     \
    558   } while (0)
    559 
    560 #define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)       \
    561   do {                                                  \
    562     Label done;                                         \
    563     size_t index = 0;                                   \
    564     AddressingMode mode = kMode_None;                   \
    565     MemOperand operand = i.MemoryOperand(&mode, index); \
    566     Register offset = operand.rb();                     \
    567     if (HasRegisterInput(instr, 2)) {                   \
    568       __ CmpLogical32(offset, i.InputRegister(2));      \
    569     } else {                                            \
    570       __ CmpLogical32(offset, i.InputImmediate(2));     \
    571     }                                                   \
    572     __ bge(&done);                                      \
    573     Register value = i.InputRegister(3);                \
    574     __ CleanUInt32(offset);                             \
    575     __ asm_instr(value, operand);                       \
    576     __ bind(&done);                                     \
    577   } while (0)
    578 
    579 void CodeGenerator::AssembleDeconstructFrame() {
    580   __ LeaveFrame(StackFrame::MANUAL);
    581 }
    582 
    583 void CodeGenerator::AssembleDeconstructActivationRecord(int stack_param_delta) {
    584   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
    585   if (sp_slot_delta > 0) {
    586     __ AddP(sp, sp, Operand(sp_slot_delta * kPointerSize));
    587   }
    588   frame_access_state()->SetFrameAccessToDefault();
    589 }
    590 
    591 void CodeGenerator::AssemblePrepareTailCall(int stack_param_delta) {
    592   int sp_slot_delta = TailCallFrameStackSlotDelta(stack_param_delta);
    593   if (sp_slot_delta < 0) {
    594     __ AddP(sp, sp, Operand(sp_slot_delta * kPointerSize));
    595     frame_access_state()->IncreaseSPDelta(-sp_slot_delta);
    596   }
    597   if (frame_access_state()->has_frame()) {
    598     __ RestoreFrameStateForTailCall();
    599   }
    600   frame_access_state()->SetFrameAccessToSP();
    601 }
    602 
    603 void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
    604                                                      Register scratch1,
    605                                                      Register scratch2,
    606                                                      Register scratch3) {
    607   DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
    608   Label done;
    609 
    610   // Check if current frame is an arguments adaptor frame.
    611   __ LoadP(scratch1, MemOperand(fp, StandardFrameConstants::kContextOffset));
    612   __ CmpSmiLiteral(scratch1, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
    613   __ bne(&done);
    614 
    615   // Load arguments count from current arguments adaptor frame (note, it
    616   // does not include receiver).
    617   Register caller_args_count_reg = scratch1;
    618   __ LoadP(caller_args_count_reg,
    619            MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
    620   __ SmiUntag(caller_args_count_reg);
    621 
    622   ParameterCount callee_args_count(args_reg);
    623   __ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
    624                         scratch3);
    625   __ bind(&done);
    626 }
    627 
    628 // Assembles an instruction after register allocation, producing machine code.
    629 CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
    630     Instruction* instr) {
    631   S390OperandConverter i(this, instr);
    632   ArchOpcode opcode = ArchOpcodeField::decode(instr->opcode());
    633 
    634   switch (opcode) {
    635     case kArchCallCodeObject: {
    636       EnsureSpaceForLazyDeopt();
    637       if (HasRegisterInput(instr, 0)) {
    638         __ AddP(ip, i.InputRegister(0),
    639                 Operand(Code::kHeaderSize - kHeapObjectTag));
    640         __ Call(ip);
    641       } else {
    642         __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
    643                 RelocInfo::CODE_TARGET);
    644       }
    645       RecordCallPosition(instr);
    646       frame_access_state()->ClearSPDelta();
    647       break;
    648     }
    649     case kArchTailCallCodeObjectFromJSFunction:
    650     case kArchTailCallCodeObject: {
    651       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
    652       AssembleDeconstructActivationRecord(stack_param_delta);
    653       if (opcode == kArchTailCallCodeObjectFromJSFunction) {
    654         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
    655                                          i.TempRegister(0), i.TempRegister(1),
    656                                          i.TempRegister(2));
    657       }
    658       if (HasRegisterInput(instr, 0)) {
    659         __ AddP(ip, i.InputRegister(0),
    660                 Operand(Code::kHeaderSize - kHeapObjectTag));
    661         __ Jump(ip);
    662       } else {
    663         // We cannot use the constant pool to load the target since
    664         // we've already restored the caller's frame.
    665         ConstantPoolUnavailableScope constant_pool_unavailable(masm());
    666         __ Jump(Handle<Code>::cast(i.InputHeapObject(0)),
    667                 RelocInfo::CODE_TARGET);
    668       }
    669       frame_access_state()->ClearSPDelta();
    670       break;
    671     }
    672     case kArchTailCallAddress: {
    673       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
    674       AssembleDeconstructActivationRecord(stack_param_delta);
    675       CHECK(!instr->InputAt(0)->IsImmediate());
    676       __ Jump(i.InputRegister(0));
    677       frame_access_state()->ClearSPDelta();
    678       break;
    679     }
    680     case kArchCallJSFunction: {
    681       EnsureSpaceForLazyDeopt();
    682       Register func = i.InputRegister(0);
    683       if (FLAG_debug_code) {
    684         // Check the function's context matches the context argument.
    685         __ LoadP(kScratchReg,
    686                  FieldMemOperand(func, JSFunction::kContextOffset));
    687         __ CmpP(cp, kScratchReg);
    688         __ Assert(eq, kWrongFunctionContext);
    689       }
    690       __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
    691       __ Call(ip);
    692       RecordCallPosition(instr);
    693       frame_access_state()->ClearSPDelta();
    694       break;
    695     }
    696     case kArchTailCallJSFunctionFromJSFunction:
    697     case kArchTailCallJSFunction: {
    698       Register func = i.InputRegister(0);
    699       if (FLAG_debug_code) {
    700         // Check the function's context matches the context argument.
    701         __ LoadP(kScratchReg,
    702                  FieldMemOperand(func, JSFunction::kContextOffset));
    703         __ CmpP(cp, kScratchReg);
    704         __ Assert(eq, kWrongFunctionContext);
    705       }
    706       int stack_param_delta = i.InputInt32(instr->InputCount() - 1);
    707       AssembleDeconstructActivationRecord(stack_param_delta);
    708       if (opcode == kArchTailCallJSFunctionFromJSFunction) {
    709         AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
    710                                          i.TempRegister(0), i.TempRegister(1),
    711                                          i.TempRegister(2));
    712       }
    713       __ LoadP(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
    714       __ Jump(ip);
    715       frame_access_state()->ClearSPDelta();
    716       break;
    717     }
    718     case kArchPrepareCallCFunction: {
    719       int const num_parameters = MiscField::decode(instr->opcode());
    720       __ PrepareCallCFunction(num_parameters, kScratchReg);
    721       // Frame alignment requires using FP-relative frame addressing.
    722       frame_access_state()->SetFrameAccessToFP();
    723       break;
    724     }
    725     case kArchPrepareTailCall:
    726       AssemblePrepareTailCall(i.InputInt32(instr->InputCount() - 1));
    727       break;
    728     case kArchCallCFunction: {
    729       int const num_parameters = MiscField::decode(instr->opcode());
    730       if (instr->InputAt(0)->IsImmediate()) {
    731         ExternalReference ref = i.InputExternalReference(0);
    732         __ CallCFunction(ref, num_parameters);
    733       } else {
    734         Register func = i.InputRegister(0);
    735         __ CallCFunction(func, num_parameters);
    736       }
    737       frame_access_state()->SetFrameAccessToDefault();
    738       frame_access_state()->ClearSPDelta();
    739       break;
    740     }
    741     case kArchJmp:
    742       AssembleArchJump(i.InputRpo(0));
    743       break;
    744     case kArchLookupSwitch:
    745       AssembleArchLookupSwitch(instr);
    746       break;
    747     case kArchTableSwitch:
    748       AssembleArchTableSwitch(instr);
    749       break;
    750     case kArchDebugBreak:
    751       __ stop("kArchDebugBreak");
    752       break;
    753     case kArchNop:
    754     case kArchThrowTerminator:
    755       // don't emit code for nops.
    756       break;
    757     case kArchDeoptimize: {
    758       int deopt_state_id =
    759           BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
    760       Deoptimizer::BailoutType bailout_type =
    761           Deoptimizer::BailoutType(MiscField::decode(instr->opcode()));
    762       CodeGenResult result =
    763           AssembleDeoptimizerCall(deopt_state_id, bailout_type);
    764       if (result != kSuccess) return result;
    765       break;
    766     }
    767     case kArchRet:
    768       AssembleReturn();
    769       break;
    770     case kArchStackPointer:
    771       __ LoadRR(i.OutputRegister(), sp);
    772       break;
    773     case kArchFramePointer:
    774       __ LoadRR(i.OutputRegister(), fp);
    775       break;
    776     case kArchParentFramePointer:
    777       if (frame_access_state()->has_frame()) {
    778         __ LoadP(i.OutputRegister(), MemOperand(fp, 0));
    779       } else {
    780         __ LoadRR(i.OutputRegister(), fp);
    781       }
    782       break;
    783     case kArchTruncateDoubleToI:
    784       // TODO(mbrandy): move slow call to stub out of line.
    785       __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
    786       break;
    787     case kArchStoreWithWriteBarrier: {
    788       RecordWriteMode mode =
    789           static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
    790       Register object = i.InputRegister(0);
    791       Register value = i.InputRegister(2);
    792       Register scratch0 = i.TempRegister(0);
    793       Register scratch1 = i.TempRegister(1);
    794       OutOfLineRecordWrite* ool;
    795 
    796       AddressingMode addressing_mode =
    797           AddressingModeField::decode(instr->opcode());
    798       if (addressing_mode == kMode_MRI) {
    799         int32_t offset = i.InputInt32(1);
    800         ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
    801                                                 scratch0, scratch1, mode);
    802         __ StoreP(value, MemOperand(object, offset));
    803       } else {
    804         DCHECK_EQ(kMode_MRR, addressing_mode);
    805         Register offset(i.InputRegister(1));
    806         ool = new (zone()) OutOfLineRecordWrite(this, object, offset, value,
    807                                                 scratch0, scratch1, mode);
    808         __ StoreP(value, MemOperand(object, offset));
    809       }
    810       __ CheckPageFlag(object, scratch0,
    811                        MemoryChunk::kPointersFromHereAreInterestingMask, ne,
    812                        ool->entry());
    813       __ bind(ool->exit());
    814       break;
    815     }
    816     case kArchStackSlot: {
    817       FrameOffset offset =
    818           frame_access_state()->GetFrameOffset(i.InputInt32(0));
    819       __ AddP(i.OutputRegister(), offset.from_stack_pointer() ? sp : fp,
    820               Operand(offset.offset()));
    821       break;
    822     }
    823     case kS390_And:
    824       ASSEMBLE_BINOP(AndP, AndP);
    825       break;
    826     case kS390_AndComplement:
    827       __ NotP(i.InputRegister(1));
    828       __ AndP(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
    829       break;
    830     case kS390_Or:
    831       ASSEMBLE_BINOP(OrP, OrP);
    832       break;
    833     case kS390_OrComplement:
    834       __ NotP(i.InputRegister(1));
    835       __ OrP(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
    836       break;
    837     case kS390_Xor:
    838       ASSEMBLE_BINOP(XorP, XorP);
    839       break;
    840     case kS390_ShiftLeft32:
    841       if (HasRegisterInput(instr, 1)) {
    842         if (i.OutputRegister().is(i.InputRegister(1)) &&
    843             !CpuFeatures::IsSupported(DISTINCT_OPS)) {
    844           __ LoadRR(kScratchReg, i.InputRegister(1));
    845           __ ShiftLeft(i.OutputRegister(), i.InputRegister(0), kScratchReg);
    846         } else {
    847           ASSEMBLE_BINOP(ShiftLeft, ShiftLeft);
    848         }
    849       } else {
    850         ASSEMBLE_BINOP(ShiftLeft, ShiftLeft);
    851       }
    852       __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
    853       break;
    854 #if V8_TARGET_ARCH_S390X
    855     case kS390_ShiftLeft64:
    856       ASSEMBLE_BINOP(sllg, sllg);
    857       break;
    858 #endif
    859     case kS390_ShiftRight32:
    860       if (HasRegisterInput(instr, 1)) {
    861         if (i.OutputRegister().is(i.InputRegister(1)) &&
    862             !CpuFeatures::IsSupported(DISTINCT_OPS)) {
    863           __ LoadRR(kScratchReg, i.InputRegister(1));
    864           __ ShiftRight(i.OutputRegister(), i.InputRegister(0), kScratchReg);
    865         } else {
    866           ASSEMBLE_BINOP(ShiftRight, ShiftRight);
    867         }
    868       } else {
    869         ASSEMBLE_BINOP(ShiftRight, ShiftRight);
    870       }
    871       __ LoadlW(i.OutputRegister(0), i.OutputRegister(0));
    872       break;
    873 #if V8_TARGET_ARCH_S390X
    874     case kS390_ShiftRight64:
    875       ASSEMBLE_BINOP(srlg, srlg);
    876       break;
    877 #endif
    878     case kS390_ShiftRightArith32:
    879       if (HasRegisterInput(instr, 1)) {
    880         if (i.OutputRegister().is(i.InputRegister(1)) &&
    881             !CpuFeatures::IsSupported(DISTINCT_OPS)) {
    882           __ LoadRR(kScratchReg, i.InputRegister(1));
    883           __ ShiftRightArith(i.OutputRegister(), i.InputRegister(0),
    884                              kScratchReg);
    885         } else {
    886           ASSEMBLE_BINOP(ShiftRightArith, ShiftRightArith);
    887         }
    888       } else {
    889         ASSEMBLE_BINOP(ShiftRightArith, ShiftRightArith);
    890       }
    891       __ LoadlW(i.OutputRegister(), i.OutputRegister());
    892       break;
    893 #if V8_TARGET_ARCH_S390X
    894     case kS390_ShiftRightArith64:
    895       ASSEMBLE_BINOP(srag, srag);
    896       break;
    897 #endif
    898 #if !V8_TARGET_ARCH_S390X
    899     case kS390_AddPair:
    900       // i.InputRegister(0) ... left low word.
    901       // i.InputRegister(1) ... left high word.
    902       // i.InputRegister(2) ... right low word.
    903       // i.InputRegister(3) ... right high word.
    904       __ AddLogical32(i.OutputRegister(0), i.InputRegister(0),
    905                       i.InputRegister(2));
    906       __ AddLogicalWithCarry32(i.OutputRegister(1), i.InputRegister(1),
    907                                i.InputRegister(3));
    908       break;
    909     case kS390_SubPair:
    910       // i.InputRegister(0) ... left low word.
    911       // i.InputRegister(1) ... left high word.
    912       // i.InputRegister(2) ... right low word.
    913       // i.InputRegister(3) ... right high word.
    914       __ SubLogical32(i.OutputRegister(0), i.InputRegister(0),
    915                       i.InputRegister(2));
    916       __ SubLogicalWithBorrow32(i.OutputRegister(1), i.InputRegister(1),
    917                                 i.InputRegister(3));
    918       break;
    919     case kS390_MulPair:
    920       // i.InputRegister(0) ... left low word.
    921       // i.InputRegister(1) ... left high word.
    922       // i.InputRegister(2) ... right low word.
    923       // i.InputRegister(3) ... right high word.
    924       __ sllg(r0, i.InputRegister(1), Operand(32));
    925       __ sllg(r1, i.InputRegister(3), Operand(32));
    926       __ lr(r0, i.InputRegister(0));
    927       __ lr(r1, i.InputRegister(2));
    928       __ msgr(r1, r0);
    929       __ lr(i.OutputRegister(0), r1);
    930       __ srag(i.OutputRegister(1), r1, Operand(32));
    931       break;
    932     case kS390_ShiftLeftPair:
    933       if (instr->InputAt(2)->IsImmediate()) {
    934         __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
    935                          i.InputRegister(0), i.InputRegister(1),
    936                          i.InputInt32(2));
    937       } else {
    938         __ ShiftLeftPair(i.OutputRegister(0), i.OutputRegister(1),
    939                          i.InputRegister(0), i.InputRegister(1), kScratchReg,
    940                          i.InputRegister(2));
    941       }
    942       break;
    943     case kS390_ShiftRightPair:
    944       if (instr->InputAt(2)->IsImmediate()) {
    945         __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
    946                           i.InputRegister(0), i.InputRegister(1),
    947                           i.InputInt32(2));
    948       } else {
    949         __ ShiftRightPair(i.OutputRegister(0), i.OutputRegister(1),
    950                           i.InputRegister(0), i.InputRegister(1), kScratchReg,
    951                           i.InputRegister(2));
    952       }
    953       break;
    954     case kS390_ShiftRightArithPair:
    955       if (instr->InputAt(2)->IsImmediate()) {
    956         __ ShiftRightArithPair(i.OutputRegister(0), i.OutputRegister(1),
    957                                i.InputRegister(0), i.InputRegister(1),
    958                                i.InputInt32(2));
    959       } else {
    960         __ ShiftRightArithPair(i.OutputRegister(0), i.OutputRegister(1),
    961                                i.InputRegister(0), i.InputRegister(1),
    962                                kScratchReg, i.InputRegister(2));
    963       }
    964       break;
    965 #endif
    966     case kS390_RotRight32:
    967       if (HasRegisterInput(instr, 1)) {
    968         __ LoadComplementRR(kScratchReg, i.InputRegister(1));
    969         __ rll(i.OutputRegister(), i.InputRegister(0), kScratchReg);
    970       } else {
    971         __ rll(i.OutputRegister(), i.InputRegister(0),
    972                Operand(32 - i.InputInt32(1)));
    973       }
    974       break;
    975 #if V8_TARGET_ARCH_S390X
    976     case kS390_RotRight64:
    977       if (HasRegisterInput(instr, 1)) {
    978         __ LoadComplementRR(kScratchReg, i.InputRegister(1));
    979         __ rllg(i.OutputRegister(), i.InputRegister(0), kScratchReg);
    980       } else {
    981         __ rllg(i.OutputRegister(), i.InputRegister(0),
    982                 Operand(64 - i.InputInt32(1)));
    983       }
    984       break;
    985 #endif
    986     case kS390_Not:
    987       __ LoadRR(i.OutputRegister(), i.InputRegister(0));
    988       __ NotP(i.OutputRegister());
    989       break;
    990     case kS390_RotLeftAndMask32:
    991       if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
    992         int shiftAmount = i.InputInt32(1);
    993         int endBit = 63 - i.InputInt32(3);
    994         int startBit = 63 - i.InputInt32(2);
    995         __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
    996         __ risbg(i.OutputRegister(), i.OutputRegister(), Operand(startBit),
    997                  Operand(endBit), Operand::Zero(), true);
    998       } else {
    999         int shiftAmount = i.InputInt32(1);
   1000         int clearBitLeft = 63 - i.InputInt32(2);
   1001         int clearBitRight = i.InputInt32(3);
   1002         __ rll(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
   1003         __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitLeft));
   1004         __ srlg(i.OutputRegister(), i.OutputRegister(),
   1005                 Operand((clearBitLeft + clearBitRight)));
   1006         __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBitRight));
   1007       }
   1008       break;
   1009 #if V8_TARGET_ARCH_S390X
   1010     case kS390_RotLeftAndClear64:
   1011       UNIMPLEMENTED();  // Find correct instruction
   1012       break;
   1013     case kS390_RotLeftAndClearLeft64:
   1014       if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
   1015         int shiftAmount = i.InputInt32(1);
   1016         int endBit = 63;
   1017         int startBit = 63 - i.InputInt32(2);
   1018         __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
   1019                  Operand(endBit), Operand(shiftAmount), true);
   1020       } else {
   1021         int shiftAmount = i.InputInt32(1);
   1022         int clearBit = 63 - i.InputInt32(2);
   1023         __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
   1024         __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
   1025         __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
   1026       }
   1027       break;
   1028     case kS390_RotLeftAndClearRight64:
   1029       if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
   1030         int shiftAmount = i.InputInt32(1);
   1031         int endBit = 63 - i.InputInt32(2);
   1032         int startBit = 0;
   1033         __ risbg(i.OutputRegister(), i.InputRegister(0), Operand(startBit),
   1034                  Operand(endBit), Operand(shiftAmount), true);
   1035       } else {
   1036         int shiftAmount = i.InputInt32(1);
   1037         int clearBit = i.InputInt32(2);
   1038         __ rllg(i.OutputRegister(), i.InputRegister(0), Operand(shiftAmount));
   1039         __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
   1040         __ sllg(i.OutputRegister(), i.OutputRegister(), Operand(clearBit));
   1041       }
   1042       break;
   1043 #endif
   1044     case kS390_Add:
   1045 #if V8_TARGET_ARCH_S390X
   1046       if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
   1047         ASSEMBLE_ADD_WITH_OVERFLOW();
   1048       } else {
   1049 #endif
   1050         ASSEMBLE_BINOP(AddP, AddP);
   1051 #if V8_TARGET_ARCH_S390X
   1052       }
   1053 #endif
   1054       break;
   1055     case kS390_AddWithOverflow32:
   1056       ASSEMBLE_ADD_WITH_OVERFLOW32();
   1057       break;
   1058     case kS390_AddFloat:
   1059       // Ensure we don't clobber right/InputReg(1)
   1060       if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
   1061         ASSEMBLE_FLOAT_UNOP(aebr);
   1062       } else {
   1063         if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
   1064           __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1065         __ aebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
   1066       }
   1067       break;
   1068     case kS390_AddDouble:
   1069       // Ensure we don't clobber right/InputReg(1)
   1070       if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
   1071         ASSEMBLE_FLOAT_UNOP(adbr);
   1072       } else {
   1073         if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
   1074           __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1075         __ adbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
   1076       }
   1077       break;
   1078     case kS390_Sub:
   1079 #if V8_TARGET_ARCH_S390X
   1080       if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
   1081         ASSEMBLE_SUB_WITH_OVERFLOW();
   1082       } else {
   1083 #endif
   1084         ASSEMBLE_BINOP(SubP, SubP);
   1085 #if V8_TARGET_ARCH_S390X
   1086       }
   1087 #endif
   1088       break;
   1089     case kS390_SubWithOverflow32:
   1090       ASSEMBLE_SUB_WITH_OVERFLOW32();
   1091       break;
   1092     case kS390_SubFloat:
   1093       // OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
   1094       if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
   1095         __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
   1096         __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1097         __ sebr(i.OutputDoubleRegister(), kScratchDoubleReg);
   1098       } else {
   1099         if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
   1100           __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1101         }
   1102         __ sebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
   1103       }
   1104       break;
   1105     case kS390_SubDouble:
   1106       // OutputDoubleReg() = i.InputDoubleRegister(0) - i.InputDoubleRegister(1)
   1107       if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
   1108         __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
   1109         __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1110         __ sdbr(i.OutputDoubleRegister(), kScratchDoubleReg);
   1111       } else {
   1112         if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0))) {
   1113           __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1114         }
   1115         __ sdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
   1116       }
   1117       break;
   1118     case kS390_Mul32:
   1119 #if V8_TARGET_ARCH_S390X
   1120     case kS390_Mul64:
   1121 #endif
   1122       __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
   1123       break;
   1124     case kS390_MulHigh32:
   1125       __ LoadRR(r1, i.InputRegister(0));
   1126       __ mr_z(r0, i.InputRegister(1));
   1127       __ LoadW(i.OutputRegister(), r0);
   1128       break;
   1129     case kS390_MulHighU32:
   1130       __ LoadRR(r1, i.InputRegister(0));
   1131       __ mlr(r0, i.InputRegister(1));
   1132       __ LoadlW(i.OutputRegister(), r0);
   1133       break;
   1134     case kS390_MulFloat:
   1135       // Ensure we don't clobber right
   1136       if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
   1137         ASSEMBLE_FLOAT_UNOP(meebr);
   1138       } else {
   1139         if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
   1140           __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1141         __ meebr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
   1142       }
   1143       break;
   1144     case kS390_MulDouble:
   1145       // Ensure we don't clobber right
   1146       if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
   1147         ASSEMBLE_FLOAT_UNOP(mdbr);
   1148       } else {
   1149         if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
   1150           __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1151         __ mdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
   1152       }
   1153       break;
   1154 #if V8_TARGET_ARCH_S390X
   1155     case kS390_Div64:
   1156       __ LoadRR(r1, i.InputRegister(0));
   1157       __ dsgr(r0, i.InputRegister(1));  // R1: Dividend
   1158       __ ltgr(i.OutputRegister(), r1);  // Copy R1: Quotient to output
   1159       break;
   1160 #endif
   1161     case kS390_Div32:
   1162       __ LoadRR(r0, i.InputRegister(0));
   1163       __ srda(r0, Operand(32));
   1164       __ dr(r0, i.InputRegister(1));
   1165       __ LoadAndTestP_ExtendSrc(i.OutputRegister(),
   1166                                 r1);  // Copy R1: Quotient to output
   1167       break;
   1168 #if V8_TARGET_ARCH_S390X
   1169     case kS390_DivU64:
   1170       __ LoadRR(r1, i.InputRegister(0));
   1171       __ LoadImmP(r0, Operand::Zero());
   1172       __ dlgr(r0, i.InputRegister(1));  // R0:R1: Dividend
   1173       __ ltgr(i.OutputRegister(), r1);  // Copy R1: Quotient to output
   1174       break;
   1175 #endif
   1176     case kS390_DivU32:
   1177       __ LoadRR(r0, i.InputRegister(0));
   1178       __ srdl(r0, Operand(32));
   1179       __ dlr(r0, i.InputRegister(1));  // R0:R1: Dividend
   1180       __ LoadlW(i.OutputRegister(), r1);  // Copy R1: Quotient to output
   1181       __ LoadAndTestP_ExtendSrc(r1, r1);
   1182       break;
   1183 
   1184     case kS390_DivFloat:
   1185       // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
   1186       if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
   1187         __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
   1188         __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1189         __ debr(i.OutputDoubleRegister(), kScratchDoubleReg);
   1190       } else {
   1191         if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
   1192           __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1193         __ debr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
   1194       }
   1195       break;
   1196     case kS390_DivDouble:
   1197       // InputDoubleRegister(1)=InputDoubleRegister(0)/InputDoubleRegister(1)
   1198       if (i.OutputDoubleRegister().is(i.InputDoubleRegister(1))) {
   1199         __ ldr(kScratchDoubleReg, i.InputDoubleRegister(1));
   1200         __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1201         __ ddbr(i.OutputDoubleRegister(), kScratchDoubleReg);
   1202       } else {
   1203         if (!i.OutputDoubleRegister().is(i.InputDoubleRegister(0)))
   1204           __ ldr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1205         __ ddbr(i.OutputDoubleRegister(), i.InputDoubleRegister(1));
   1206       }
   1207       break;
   1208     case kS390_Mod32:
   1209       ASSEMBLE_MODULO(dr, srda);
   1210       break;
   1211     case kS390_ModU32:
   1212       ASSEMBLE_MODULO(dlr, srdl);
   1213       break;
   1214 #if V8_TARGET_ARCH_S390X
   1215     case kS390_Mod64:
   1216       __ LoadRR(r1, i.InputRegister(0));
   1217       __ dsgr(r0, i.InputRegister(1));  // R1: Dividend
   1218       __ ltgr(i.OutputRegister(), r0);  // Copy R0: Remainder to output
   1219       break;
   1220     case kS390_ModU64:
   1221       __ LoadRR(r1, i.InputRegister(0));
   1222       __ LoadImmP(r0, Operand::Zero());
   1223       __ dlgr(r0, i.InputRegister(1));  // R0:R1: Dividend
   1224       __ ltgr(i.OutputRegister(), r0);  // Copy R0: Remainder to output
   1225       break;
   1226 #endif
   1227     case kS390_AbsFloat:
   1228       __ lpebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1229       break;
   1230     case kS390_SqrtFloat:
   1231       ASSEMBLE_FLOAT_UNOP(sqebr);
   1232       break;
   1233     case kS390_FloorFloat:
   1234       __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1235                 v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF);
   1236       break;
   1237     case kS390_CeilFloat:
   1238       __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1239                 v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF);
   1240       break;
   1241     case kS390_TruncateFloat:
   1242       __ fiebra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1243                 v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0);
   1244       break;
   1245     //  Double operations
   1246     case kS390_ModDouble:
   1247       ASSEMBLE_FLOAT_MODULO();
   1248       break;
   1249     case kIeee754Float64Atan:
   1250       ASSEMBLE_IEEE754_UNOP(atan);
   1251       break;
   1252     case kIeee754Float64Atan2:
   1253       ASSEMBLE_IEEE754_BINOP(atan2);
   1254       break;
   1255     case kIeee754Float64Tan:
   1256       ASSEMBLE_IEEE754_UNOP(tan);
   1257       break;
   1258     case kIeee754Float64Cbrt:
   1259       ASSEMBLE_IEEE754_UNOP(cbrt);
   1260       break;
   1261     case kIeee754Float64Sin:
   1262       ASSEMBLE_IEEE754_UNOP(sin);
   1263       break;
   1264     case kIeee754Float64Cos:
   1265       ASSEMBLE_IEEE754_UNOP(cos);
   1266       break;
   1267     case kIeee754Float64Exp:
   1268       ASSEMBLE_IEEE754_UNOP(exp);
   1269       break;
   1270     case kIeee754Float64Expm1:
   1271       ASSEMBLE_IEEE754_UNOP(expm1);
   1272       break;
   1273     case kIeee754Float64Atanh:
   1274       ASSEMBLE_IEEE754_UNOP(atanh);
   1275       break;
   1276     case kIeee754Float64Log:
   1277       ASSEMBLE_IEEE754_UNOP(log);
   1278       break;
   1279     case kIeee754Float64Log1p:
   1280       ASSEMBLE_IEEE754_UNOP(log1p);
   1281       break;
   1282     case kIeee754Float64Log2:
   1283       ASSEMBLE_IEEE754_UNOP(log2);
   1284       break;
   1285     case kIeee754Float64Log10:
   1286       ASSEMBLE_IEEE754_UNOP(log10);
   1287       break;
   1288     case kS390_Neg:
   1289       __ LoadComplementRR(i.OutputRegister(), i.InputRegister(0));
   1290       break;
   1291     case kS390_MaxDouble:
   1292       ASSEMBLE_FLOAT_MAX(kScratchDoubleReg, kScratchReg);
   1293       break;
   1294     case kS390_MinDouble:
   1295       ASSEMBLE_FLOAT_MIN(kScratchDoubleReg, kScratchReg);
   1296       break;
   1297     case kS390_AbsDouble:
   1298       __ lpdbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1299       break;
   1300     case kS390_SqrtDouble:
   1301       ASSEMBLE_FLOAT_UNOP(sqdbr);
   1302       break;
   1303     case kS390_FloorDouble:
   1304       __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1305                 v8::internal::Assembler::FIDBRA_ROUND_TOWARD_NEG_INF);
   1306       break;
   1307     case kS390_CeilDouble:
   1308       __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1309                 v8::internal::Assembler::FIDBRA_ROUND_TOWARD_POS_INF);
   1310       break;
   1311     case kS390_TruncateDouble:
   1312       __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1313                 v8::internal::Assembler::FIDBRA_ROUND_TOWARD_0);
   1314       break;
   1315     case kS390_RoundDouble:
   1316       __ fidbra(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
   1317                 v8::internal::Assembler::FIDBRA_ROUND_TO_NEAREST_AWAY_FROM_0);
   1318       break;
   1319     case kS390_NegDouble:
   1320       ASSEMBLE_FLOAT_UNOP(lcdbr);
   1321       break;
   1322     case kS390_Cntlz32: {
   1323       __ llgfr(i.OutputRegister(), i.InputRegister(0));
   1324       __ flogr(r0, i.OutputRegister());
   1325       __ LoadRR(i.OutputRegister(), r0);
   1326       __ SubP(i.OutputRegister(), Operand(32));
   1327     } break;
   1328 #if V8_TARGET_ARCH_S390X
   1329     case kS390_Cntlz64: {
   1330       __ flogr(r0, i.InputRegister(0));
   1331       __ LoadRR(i.OutputRegister(), r0);
   1332     } break;
   1333 #endif
   1334     case kS390_Popcnt32:
   1335       __ Popcnt32(i.OutputRegister(), i.InputRegister(0));
   1336       break;
   1337 #if V8_TARGET_ARCH_S390X
   1338     case kS390_Popcnt64:
   1339       __ Popcnt64(i.OutputRegister(), i.InputRegister(0));
   1340       break;
   1341 #endif
   1342     case kS390_Cmp32:
   1343       ASSEMBLE_COMPARE(Cmp32, CmpLogical32);
   1344       break;
   1345 #if V8_TARGET_ARCH_S390X
   1346     case kS390_Cmp64:
   1347       ASSEMBLE_COMPARE(CmpP, CmpLogicalP);
   1348       break;
   1349 #endif
   1350     case kS390_CmpFloat:
   1351       __ cebr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
   1352       break;
   1353     case kS390_CmpDouble:
   1354       __ cdbr(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
   1355       break;
   1356     case kS390_Tst32:
   1357       if (HasRegisterInput(instr, 1)) {
   1358         __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
   1359       } else {
   1360         __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
   1361       }
   1362       __ LoadAndTestP_ExtendSrc(r0, r0);
   1363       break;
   1364 #if V8_TARGET_ARCH_S390X
   1365     case kS390_Tst64:
   1366       if (HasRegisterInput(instr, 1)) {
   1367         __ AndP(r0, i.InputRegister(0), i.InputRegister(1));
   1368       } else {
   1369         __ AndP(r0, i.InputRegister(0), i.InputImmediate(1));
   1370       }
   1371       break;
   1372 #endif
   1373     case kS390_Float64SilenceNaN: {
   1374       DoubleRegister value = i.InputDoubleRegister(0);
   1375       DoubleRegister result = i.OutputDoubleRegister();
   1376       __ CanonicalizeNaN(result, value);
   1377       break;
   1378     }
   1379     case kS390_Push:
   1380       if (instr->InputAt(0)->IsFPRegister()) {
   1381         __ lay(sp, MemOperand(sp, -kDoubleSize));
   1382         __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
   1383         frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
   1384       } else {
   1385         __ Push(i.InputRegister(0));
   1386         frame_access_state()->IncreaseSPDelta(1);
   1387       }
   1388       break;
   1389     case kS390_PushFrame: {
   1390       int num_slots = i.InputInt32(1);
   1391       __ lay(sp, MemOperand(sp, -num_slots * kPointerSize));
   1392       if (instr->InputAt(0)->IsFPRegister()) {
   1393         LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
   1394         if (op->representation() == MachineRepresentation::kFloat64) {
   1395           __ StoreDouble(i.InputDoubleRegister(0), MemOperand(sp));
   1396         } else {
   1397           DCHECK(op->representation() == MachineRepresentation::kFloat32);
   1398           __ StoreFloat32(i.InputDoubleRegister(0), MemOperand(sp));
   1399         }
   1400       } else {
   1401         __ StoreP(i.InputRegister(0),
   1402                   MemOperand(sp));
   1403       }
   1404       break;
   1405     }
   1406     case kS390_StoreToStackSlot: {
   1407       int slot = i.InputInt32(1);
   1408       if (instr->InputAt(0)->IsFPRegister()) {
   1409         LocationOperand* op = LocationOperand::cast(instr->InputAt(0));
   1410         if (op->representation() == MachineRepresentation::kFloat64) {
   1411           __ StoreDouble(i.InputDoubleRegister(0),
   1412                          MemOperand(sp, slot * kPointerSize));
   1413         } else {
   1414           DCHECK(op->representation() == MachineRepresentation::kFloat32);
   1415           __ StoreFloat32(i.InputDoubleRegister(0),
   1416                           MemOperand(sp, slot * kPointerSize));
   1417         }
   1418       } else {
   1419         __ StoreP(i.InputRegister(0), MemOperand(sp, slot * kPointerSize));
   1420       }
   1421       break;
   1422     }
   1423     case kS390_ExtendSignWord8:
   1424 #if V8_TARGET_ARCH_S390X
   1425       __ lgbr(i.OutputRegister(), i.InputRegister(0));
   1426 #else
   1427       __ lbr(i.OutputRegister(), i.InputRegister(0));
   1428 #endif
   1429       break;
   1430     case kS390_ExtendSignWord16:
   1431 #if V8_TARGET_ARCH_S390X
   1432       __ lghr(i.OutputRegister(), i.InputRegister(0));
   1433 #else
   1434       __ lhr(i.OutputRegister(), i.InputRegister(0));
   1435 #endif
   1436       break;
   1437 #if V8_TARGET_ARCH_S390X
   1438     case kS390_ExtendSignWord32:
   1439       __ lgfr(i.OutputRegister(), i.InputRegister(0));
   1440       break;
   1441     case kS390_Uint32ToUint64:
   1442       // Zero extend
   1443       __ llgfr(i.OutputRegister(), i.InputRegister(0));
   1444       break;
   1445     case kS390_Int64ToInt32:
   1446       // sign extend
   1447       __ lgfr(i.OutputRegister(), i.InputRegister(0));
   1448       break;
   1449     case kS390_Int64ToFloat32:
   1450       __ ConvertInt64ToFloat(i.InputRegister(0), i.OutputDoubleRegister());
   1451       break;
   1452     case kS390_Int64ToDouble:
   1453       __ ConvertInt64ToDouble(i.InputRegister(0), i.OutputDoubleRegister());
   1454       break;
   1455     case kS390_Uint64ToFloat32:
   1456       __ ConvertUnsignedInt64ToFloat(i.InputRegister(0),
   1457                                      i.OutputDoubleRegister());
   1458       break;
   1459     case kS390_Uint64ToDouble:
   1460       __ ConvertUnsignedInt64ToDouble(i.InputRegister(0),
   1461                                       i.OutputDoubleRegister());
   1462       break;
   1463 #endif
   1464     case kS390_Int32ToFloat32:
   1465       __ ConvertIntToFloat(i.InputRegister(0), i.OutputDoubleRegister());
   1466       break;
   1467     case kS390_Int32ToDouble:
   1468       __ ConvertIntToDouble(i.InputRegister(0), i.OutputDoubleRegister());
   1469       break;
   1470     case kS390_Uint32ToFloat32:
   1471       __ ConvertUnsignedIntToFloat(i.InputRegister(0),
   1472                                    i.OutputDoubleRegister());
   1473       break;
   1474     case kS390_Uint32ToDouble:
   1475       __ ConvertUnsignedIntToDouble(i.InputRegister(0),
   1476                                     i.OutputDoubleRegister());
   1477       break;
   1478     case kS390_DoubleToInt32:
   1479     case kS390_DoubleToUint32:
   1480     case kS390_DoubleToInt64: {
   1481 #if V8_TARGET_ARCH_S390X
   1482       bool check_conversion =
   1483           (opcode == kS390_DoubleToInt64 && i.OutputCount() > 1);
   1484 #endif
   1485       __ ConvertDoubleToInt64(i.InputDoubleRegister(0),
   1486 #if !V8_TARGET_ARCH_S390X
   1487                               kScratchReg,
   1488 #endif
   1489                               i.OutputRegister(0), kScratchDoubleReg);
   1490 #if V8_TARGET_ARCH_S390X
   1491       if (check_conversion) {
   1492         Label conversion_done;
   1493         __ LoadImmP(i.OutputRegister(1), Operand::Zero());
   1494         __ b(Condition(1), &conversion_done);  // special case
   1495         __ LoadImmP(i.OutputRegister(1), Operand(1));
   1496         __ bind(&conversion_done);
   1497       }
   1498 #endif
   1499       break;
   1500     }
   1501     case kS390_Float32ToInt32: {
   1502       bool check_conversion = (i.OutputCount() > 1);
   1503       __ ConvertFloat32ToInt32(i.InputDoubleRegister(0), i.OutputRegister(0),
   1504                                kScratchDoubleReg);
   1505       if (check_conversion) {
   1506         Label conversion_done;
   1507         __ LoadImmP(i.OutputRegister(1), Operand::Zero());
   1508         __ b(Condition(1), &conversion_done);  // special case
   1509         __ LoadImmP(i.OutputRegister(1), Operand(1));
   1510         __ bind(&conversion_done);
   1511       }
   1512       break;
   1513     }
   1514     case kS390_Float32ToUint32: {
   1515       bool check_conversion = (i.OutputCount() > 1);
   1516       __ ConvertFloat32ToUnsignedInt32(i.InputDoubleRegister(0),
   1517                                        i.OutputRegister(0), kScratchDoubleReg);
   1518       if (check_conversion) {
   1519         Label conversion_done;
   1520         __ LoadImmP(i.OutputRegister(1), Operand::Zero());
   1521         __ b(Condition(1), &conversion_done);  // special case
   1522         __ LoadImmP(i.OutputRegister(1), Operand(1));
   1523         __ bind(&conversion_done);
   1524       }
   1525       break;
   1526     }
   1527 #if V8_TARGET_ARCH_S390X
   1528     case kS390_Float32ToUint64: {
   1529       bool check_conversion = (i.OutputCount() > 1);
   1530       __ ConvertFloat32ToUnsignedInt64(i.InputDoubleRegister(0),
   1531                                        i.OutputRegister(0), kScratchDoubleReg);
   1532       if (check_conversion) {
   1533         Label conversion_done;
   1534         __ LoadImmP(i.OutputRegister(1), Operand::Zero());
   1535         __ b(Condition(1), &conversion_done);  // special case
   1536         __ LoadImmP(i.OutputRegister(1), Operand(1));
   1537         __ bind(&conversion_done);
   1538       }
   1539       break;
   1540     }
   1541 #endif
   1542     case kS390_Float32ToInt64: {
   1543 #if V8_TARGET_ARCH_S390X
   1544       bool check_conversion =
   1545           (opcode == kS390_Float32ToInt64 && i.OutputCount() > 1);
   1546 #endif
   1547       __ ConvertFloat32ToInt64(i.InputDoubleRegister(0),
   1548 #if !V8_TARGET_ARCH_S390X
   1549                                kScratchReg,
   1550 #endif
   1551                                i.OutputRegister(0), kScratchDoubleReg);
   1552 #if V8_TARGET_ARCH_S390X
   1553       if (check_conversion) {
   1554         Label conversion_done;
   1555         __ LoadImmP(i.OutputRegister(1), Operand::Zero());
   1556         __ b(Condition(1), &conversion_done);  // special case
   1557         __ LoadImmP(i.OutputRegister(1), Operand(1));
   1558         __ bind(&conversion_done);
   1559       }
   1560 #endif
   1561       break;
   1562     }
   1563 #if V8_TARGET_ARCH_S390X
   1564     case kS390_DoubleToUint64: {
   1565       bool check_conversion = (i.OutputCount() > 1);
   1566       __ ConvertDoubleToUnsignedInt64(i.InputDoubleRegister(0),
   1567                                       i.OutputRegister(0), kScratchDoubleReg);
   1568       if (check_conversion) {
   1569         Label conversion_done;
   1570         __ LoadImmP(i.OutputRegister(1), Operand::Zero());
   1571         __ b(Condition(1), &conversion_done);  // special case
   1572         __ LoadImmP(i.OutputRegister(1), Operand(1));
   1573         __ bind(&conversion_done);
   1574       }
   1575       break;
   1576     }
   1577 #endif
   1578     case kS390_DoubleToFloat32:
   1579       __ ledbr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1580       break;
   1581     case kS390_Float32ToDouble:
   1582       __ ldebr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
   1583       break;
   1584     case kS390_DoubleExtractLowWord32:
   1585       __ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
   1586       __ llgfr(i.OutputRegister(), i.OutputRegister());
   1587       break;
   1588     case kS390_DoubleExtractHighWord32:
   1589       __ lgdr(i.OutputRegister(), i.InputDoubleRegister(0));
   1590       __ srlg(i.OutputRegister(), i.OutputRegister(), Operand(32));
   1591       break;
   1592     case kS390_DoubleInsertLowWord32:
   1593       __ lgdr(kScratchReg, i.OutputDoubleRegister());
   1594       __ lr(kScratchReg, i.InputRegister(1));
   1595       __ ldgr(i.OutputDoubleRegister(), kScratchReg);
   1596       break;
   1597     case kS390_DoubleInsertHighWord32:
   1598       __ sllg(kScratchReg, i.InputRegister(1), Operand(32));
   1599       __ lgdr(r0, i.OutputDoubleRegister());
   1600       __ lr(kScratchReg, r0);
   1601       __ ldgr(i.OutputDoubleRegister(), kScratchReg);
   1602       break;
   1603     case kS390_DoubleConstruct:
   1604       __ sllg(kScratchReg, i.InputRegister(0), Operand(32));
   1605       __ lr(kScratchReg, i.InputRegister(1));
   1606 
   1607       // Bitwise convert from GPR to FPR
   1608       __ ldgr(i.OutputDoubleRegister(), kScratchReg);
   1609       break;
   1610     case kS390_LoadWordS8:
   1611       ASSEMBLE_LOAD_INTEGER(LoadlB);
   1612 #if V8_TARGET_ARCH_S390X
   1613       __ lgbr(i.OutputRegister(), i.OutputRegister());
   1614 #else
   1615       __ lbr(i.OutputRegister(), i.OutputRegister());
   1616 #endif
   1617       break;
   1618     case kS390_BitcastFloat32ToInt32:
   1619       __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
   1620       break;
   1621     case kS390_BitcastInt32ToFloat32:
   1622       __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
   1623       break;
   1624 #if V8_TARGET_ARCH_S390X
   1625     case kS390_BitcastDoubleToInt64:
   1626       __ MovDoubleToInt64(i.OutputRegister(), i.InputDoubleRegister(0));
   1627       break;
   1628     case kS390_BitcastInt64ToDouble:
   1629       __ MovInt64ToDouble(i.OutputDoubleRegister(), i.InputRegister(0));
   1630       break;
   1631 #endif
   1632     case kS390_LoadWordU8:
   1633       ASSEMBLE_LOAD_INTEGER(LoadlB);
   1634       break;
   1635     case kS390_LoadWordU16:
   1636       ASSEMBLE_LOAD_INTEGER(LoadLogicalHalfWordP);
   1637       break;
   1638     case kS390_LoadWordS16:
   1639       ASSEMBLE_LOAD_INTEGER(LoadHalfWordP);
   1640       break;
   1641     case kS390_LoadWordU32:
   1642       ASSEMBLE_LOAD_INTEGER(LoadlW);
   1643       break;
   1644     case kS390_LoadWordS32:
   1645       ASSEMBLE_LOAD_INTEGER(LoadW);
   1646       break;
   1647 #if V8_TARGET_ARCH_S390X
   1648     case kS390_LoadWord64:
   1649       ASSEMBLE_LOAD_INTEGER(lg);
   1650       break;
   1651 #endif
   1652     case kS390_LoadFloat32:
   1653       ASSEMBLE_LOAD_FLOAT(LoadFloat32);
   1654       break;
   1655     case kS390_LoadDouble:
   1656       ASSEMBLE_LOAD_FLOAT(LoadDouble);
   1657       break;
   1658     case kS390_StoreWord8:
   1659       ASSEMBLE_STORE_INTEGER(StoreByte);
   1660       break;
   1661     case kS390_StoreWord16:
   1662       ASSEMBLE_STORE_INTEGER(StoreHalfWord);
   1663       break;
   1664     case kS390_StoreWord32:
   1665       ASSEMBLE_STORE_INTEGER(StoreW);
   1666       break;
   1667 #if V8_TARGET_ARCH_S390X
   1668     case kS390_StoreWord64:
   1669       ASSEMBLE_STORE_INTEGER(StoreP);
   1670       break;
   1671 #endif
   1672     case kS390_StoreFloat32:
   1673       ASSEMBLE_STORE_FLOAT32();
   1674       break;
   1675     case kS390_StoreDouble:
   1676       ASSEMBLE_STORE_DOUBLE();
   1677       break;
   1678     case kCheckedLoadInt8:
   1679       ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
   1680 #if V8_TARGET_ARCH_S390X
   1681       __ lgbr(i.OutputRegister(), i.OutputRegister());
   1682 #else
   1683       __ lbr(i.OutputRegister(), i.OutputRegister());
   1684 #endif
   1685       break;
   1686     case kCheckedLoadUint8:
   1687       ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlB);
   1688       break;
   1689     case kCheckedLoadInt16:
   1690       ASSEMBLE_CHECKED_LOAD_INTEGER(LoadHalfWordP);
   1691       break;
   1692     case kCheckedLoadUint16:
   1693       ASSEMBLE_CHECKED_LOAD_INTEGER(LoadLogicalHalfWordP);
   1694       break;
   1695     case kCheckedLoadWord32:
   1696       ASSEMBLE_CHECKED_LOAD_INTEGER(LoadlW);
   1697       break;
   1698     case kCheckedLoadWord64:
   1699 #if V8_TARGET_ARCH_S390X
   1700       ASSEMBLE_CHECKED_LOAD_INTEGER(LoadP);
   1701 #else
   1702       UNREACHABLE();
   1703 #endif
   1704       break;
   1705     case kCheckedLoadFloat32:
   1706       ASSEMBLE_CHECKED_LOAD_FLOAT(LoadFloat32, 32);
   1707       break;
   1708     case kCheckedLoadFloat64:
   1709       ASSEMBLE_CHECKED_LOAD_FLOAT(LoadDouble, 64);
   1710       break;
   1711     case kCheckedStoreWord8:
   1712       ASSEMBLE_CHECKED_STORE_INTEGER(StoreByte);
   1713       break;
   1714     case kCheckedStoreWord16:
   1715       ASSEMBLE_CHECKED_STORE_INTEGER(StoreHalfWord);
   1716       break;
   1717     case kCheckedStoreWord32:
   1718       ASSEMBLE_CHECKED_STORE_INTEGER(StoreW);
   1719       break;
   1720     case kCheckedStoreWord64:
   1721 #if V8_TARGET_ARCH_S390X
   1722       ASSEMBLE_CHECKED_STORE_INTEGER(StoreP);
   1723 #else
   1724       UNREACHABLE();
   1725 #endif
   1726       break;
   1727     case kCheckedStoreFloat32:
   1728       ASSEMBLE_CHECKED_STORE_FLOAT32();
   1729       break;
   1730     case kCheckedStoreFloat64:
   1731       ASSEMBLE_CHECKED_STORE_DOUBLE();
   1732       break;
   1733     case kAtomicLoadInt8:
   1734       __ LoadB(i.OutputRegister(), i.MemoryOperand());
   1735       break;
   1736     case kAtomicLoadUint8:
   1737       __ LoadlB(i.OutputRegister(), i.MemoryOperand());
   1738       break;
   1739     case kAtomicLoadInt16:
   1740       __ LoadHalfWordP(i.OutputRegister(), i.MemoryOperand());
   1741       break;
   1742     case kAtomicLoadUint16:
   1743       __ LoadLogicalHalfWordP(i.OutputRegister(), i.MemoryOperand());
   1744       break;
   1745     case kAtomicLoadWord32:
   1746       __ LoadlW(i.OutputRegister(), i.MemoryOperand());
   1747       break;
   1748     case kAtomicStoreWord8:
   1749       __ StoreByte(i.InputRegister(0), i.MemoryOperand(NULL, 1));
   1750       break;
   1751     case kAtomicStoreWord16:
   1752       __ StoreHalfWord(i.InputRegister(0), i.MemoryOperand(NULL, 1));
   1753       break;
   1754     case kAtomicStoreWord32:
   1755       __ StoreW(i.InputRegister(0), i.MemoryOperand(NULL, 1));
   1756       break;
   1757     default:
   1758       UNREACHABLE();
   1759       break;
   1760   }
   1761   return kSuccess;
   1762 }  // NOLINT(readability/fn_size)
   1763 
   1764 // Assembles branches after an instruction.
   1765 void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
   1766   S390OperandConverter i(this, instr);
   1767   Label* tlabel = branch->true_label;
   1768   Label* flabel = branch->false_label;
   1769   ArchOpcode op = instr->arch_opcode();
   1770   FlagsCondition condition = branch->condition;
   1771 
   1772   Condition cond = FlagsConditionToCondition(condition, op);
   1773   if (op == kS390_CmpDouble) {
   1774     // check for unordered if necessary
   1775     // Branching to flabel/tlabel according to what's expected by tests
   1776     if (cond == le || cond == eq || cond == lt) {
   1777       __ bunordered(flabel);
   1778     } else if (cond == gt || cond == ne || cond == ge) {
   1779       __ bunordered(tlabel);
   1780     }
   1781   }
   1782   __ b(cond, tlabel);
   1783   if (!branch->fallthru) __ b(flabel);  // no fallthru to flabel.
   1784 }
   1785 
   1786 void CodeGenerator::AssembleArchJump(RpoNumber target) {
   1787   if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
   1788 }
   1789 
   1790 // Assembles boolean materializations after an instruction.
   1791 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
   1792                                         FlagsCondition condition) {
   1793   S390OperandConverter i(this, instr);
   1794   Label done;
   1795   ArchOpcode op = instr->arch_opcode();
   1796   bool check_unordered = (op == kS390_CmpDouble || kS390_CmpFloat);
   1797 
   1798   // Overflow checked for add/sub only.
   1799   DCHECK((condition != kOverflow && condition != kNotOverflow) ||
   1800          (op == kS390_AddWithOverflow32 || op == kS390_SubWithOverflow32) ||
   1801          (op == kS390_Add || op == kS390_Sub));
   1802 
   1803   // Materialize a full 32-bit 1 or 0 value. The result register is always the
   1804   // last output of the instruction.
   1805   DCHECK_NE(0u, instr->OutputCount());
   1806   Register reg = i.OutputRegister(instr->OutputCount() - 1);
   1807   Condition cond = FlagsConditionToCondition(condition, op);
   1808   switch (cond) {
   1809     case ne:
   1810     case ge:
   1811     case gt:
   1812       if (check_unordered) {
   1813         __ LoadImmP(reg, Operand(1));
   1814         __ LoadImmP(kScratchReg, Operand::Zero());
   1815         __ bunordered(&done);
   1816         Label cond_true;
   1817         __ b(cond, &cond_true, Label::kNear);
   1818         __ LoadRR(reg, kScratchReg);
   1819         __ bind(&cond_true);
   1820       } else {
   1821         Label cond_true, done_here;
   1822         __ LoadImmP(reg, Operand(1));
   1823         __ b(cond, &cond_true, Label::kNear);
   1824         __ LoadImmP(reg, Operand::Zero());
   1825         __ bind(&cond_true);
   1826       }
   1827       break;
   1828     case eq:
   1829     case lt:
   1830     case le:
   1831       if (check_unordered) {
   1832         __ LoadImmP(reg, Operand::Zero());
   1833         __ LoadImmP(kScratchReg, Operand(1));
   1834         __ bunordered(&done);
   1835         Label cond_false;
   1836         __ b(NegateCondition(cond), &cond_false, Label::kNear);
   1837         __ LoadRR(reg, kScratchReg);
   1838         __ bind(&cond_false);
   1839       } else {
   1840         __ LoadImmP(reg, Operand::Zero());
   1841         Label cond_false;
   1842         __ b(NegateCondition(cond), &cond_false, Label::kNear);
   1843         __ LoadImmP(reg, Operand(1));
   1844         __ bind(&cond_false);
   1845       }
   1846       break;
   1847     default:
   1848       UNREACHABLE();
   1849       break;
   1850   }
   1851   __ bind(&done);
   1852 }
   1853 
   1854 void CodeGenerator::AssembleArchLookupSwitch(Instruction* instr) {
   1855   S390OperandConverter i(this, instr);
   1856   Register input = i.InputRegister(0);
   1857   for (size_t index = 2; index < instr->InputCount(); index += 2) {
   1858     __ Cmp32(input, Operand(i.InputInt32(index + 0)));
   1859     __ beq(GetLabel(i.InputRpo(index + 1)));
   1860   }
   1861   AssembleArchJump(i.InputRpo(1));
   1862 }
   1863 
   1864 void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
   1865   S390OperandConverter i(this, instr);
   1866   Register input = i.InputRegister(0);
   1867   int32_t const case_count = static_cast<int32_t>(instr->InputCount() - 2);
   1868   Label** cases = zone()->NewArray<Label*>(case_count);
   1869   for (int32_t index = 0; index < case_count; ++index) {
   1870     cases[index] = GetLabel(i.InputRpo(index + 2));
   1871   }
   1872   Label* const table = AddJumpTable(cases, case_count);
   1873   __ CmpLogicalP(input, Operand(case_count));
   1874   __ bge(GetLabel(i.InputRpo(1)));
   1875   __ larl(kScratchReg, table);
   1876   __ ShiftLeftP(r1, input, Operand(kPointerSizeLog2));
   1877   __ LoadP(kScratchReg, MemOperand(kScratchReg, r1));
   1878   __ Jump(kScratchReg);
   1879 }
   1880 
   1881 CodeGenerator::CodeGenResult CodeGenerator::AssembleDeoptimizerCall(
   1882     int deoptimization_id, Deoptimizer::BailoutType bailout_type) {
   1883   Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
   1884       isolate(), deoptimization_id, bailout_type);
   1885   // TODO(turbofan): We should be able to generate better code by sharing the
   1886   // actual final call site and just bl'ing to it here, similar to what we do
   1887   // in the lithium backend.
   1888   if (deopt_entry == nullptr) return kTooManyDeoptimizationBailouts;
   1889   __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
   1890   return kSuccess;
   1891 }
   1892 
   1893 void CodeGenerator::FinishFrame(Frame* frame) {
   1894   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   1895   const RegList double_saves = descriptor->CalleeSavedFPRegisters();
   1896 
   1897   // Save callee-saved Double registers.
   1898   if (double_saves != 0) {
   1899     frame->AlignSavedCalleeRegisterSlots();
   1900     DCHECK(kNumCalleeSavedDoubles ==
   1901            base::bits::CountPopulation32(double_saves));
   1902     frame->AllocateSavedCalleeRegisterSlots(kNumCalleeSavedDoubles *
   1903                                             (kDoubleSize / kPointerSize));
   1904   }
   1905   // Save callee-saved registers.
   1906   const RegList saves = descriptor->CalleeSavedRegisters();
   1907   if (saves != 0) {
   1908     // register save area does not include the fp or constant pool pointer.
   1909     const int num_saves = kNumCalleeSaved - 1;
   1910     DCHECK(num_saves == base::bits::CountPopulation32(saves));
   1911     frame->AllocateSavedCalleeRegisterSlots(num_saves);
   1912   }
   1913 }
   1914 
   1915 void CodeGenerator::AssembleConstructFrame() {
   1916   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   1917 
   1918   if (frame_access_state()->has_frame()) {
   1919     if (descriptor->IsCFunctionCall()) {
   1920       __ Push(r14, fp);
   1921       __ LoadRR(fp, sp);
   1922     } else if (descriptor->IsJSFunctionCall()) {
   1923       __ Prologue(this->info()->GeneratePreagedPrologue(), ip);
   1924     } else {
   1925       StackFrame::Type type = info()->GetOutputStackFrameType();
   1926       // TODO(mbrandy): Detect cases where ip is the entrypoint (for
   1927       // efficient intialization of the constant pool pointer register).
   1928       __ StubPrologue(type);
   1929     }
   1930   }
   1931 
   1932   int shrink_slots = frame()->GetSpillSlotCount();
   1933   if (info()->is_osr()) {
   1934     // TurboFan OSR-compiled functions cannot be entered directly.
   1935     __ Abort(kShouldNotDirectlyEnterOsrFunction);
   1936 
   1937     // Unoptimized code jumps directly to this entrypoint while the unoptimized
   1938     // frame is still on the stack. Optimized code uses OSR values directly from
   1939     // the unoptimized frame. Thus, all that needs to be done is to allocate the
   1940     // remaining stack slots.
   1941     if (FLAG_code_comments) __ RecordComment("-- OSR entrypoint --");
   1942     osr_pc_offset_ = __ pc_offset();
   1943     shrink_slots -= OsrHelper(info()).UnoptimizedFrameSlots();
   1944   }
   1945 
   1946   const RegList double_saves = descriptor->CalleeSavedFPRegisters();
   1947   if (shrink_slots > 0) {
   1948     __ lay(sp, MemOperand(sp, -shrink_slots * kPointerSize));
   1949   }
   1950 
   1951   // Save callee-saved Double registers.
   1952   if (double_saves != 0) {
   1953     __ MultiPushDoubles(double_saves);
   1954     DCHECK(kNumCalleeSavedDoubles ==
   1955            base::bits::CountPopulation32(double_saves));
   1956   }
   1957 
   1958   // Save callee-saved registers.
   1959   const RegList saves = descriptor->CalleeSavedRegisters();
   1960   if (saves != 0) {
   1961     __ MultiPush(saves);
   1962     // register save area does not include the fp or constant pool pointer.
   1963   }
   1964 }
   1965 
   1966 void CodeGenerator::AssembleReturn() {
   1967   CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
   1968   int pop_count = static_cast<int>(descriptor->StackParameterCount());
   1969 
   1970   // Restore registers.
   1971   const RegList saves = descriptor->CalleeSavedRegisters();
   1972   if (saves != 0) {
   1973     __ MultiPop(saves);
   1974   }
   1975 
   1976   // Restore double registers.
   1977   const RegList double_saves = descriptor->CalleeSavedFPRegisters();
   1978   if (double_saves != 0) {
   1979     __ MultiPopDoubles(double_saves);
   1980   }
   1981 
   1982   if (descriptor->IsCFunctionCall()) {
   1983     AssembleDeconstructFrame();
   1984   } else if (frame_access_state()->has_frame()) {
   1985     // Canonicalize JSFunction return sites for now.
   1986     if (return_label_.is_bound()) {
   1987       __ b(&return_label_);
   1988       return;
   1989     } else {
   1990       __ bind(&return_label_);
   1991       AssembleDeconstructFrame();
   1992     }
   1993   }
   1994   __ Ret(pop_count);
   1995 }
   1996 
   1997 void CodeGenerator::AssembleMove(InstructionOperand* source,
   1998                                  InstructionOperand* destination) {
   1999   S390OperandConverter g(this, nullptr);
   2000   // Dispatch on the source and destination operand kinds.  Not all
   2001   // combinations are possible.
   2002   if (source->IsRegister()) {
   2003     DCHECK(destination->IsRegister() || destination->IsStackSlot());
   2004     Register src = g.ToRegister(source);
   2005     if (destination->IsRegister()) {
   2006       __ Move(g.ToRegister(destination), src);
   2007     } else {
   2008       __ StoreP(src, g.ToMemOperand(destination));
   2009     }
   2010   } else if (source->IsStackSlot()) {
   2011     DCHECK(destination->IsRegister() || destination->IsStackSlot());
   2012     MemOperand src = g.ToMemOperand(source);
   2013     if (destination->IsRegister()) {
   2014       __ LoadP(g.ToRegister(destination), src);
   2015     } else {
   2016       Register temp = kScratchReg;
   2017       __ LoadP(temp, src, r0);
   2018       __ StoreP(temp, g.ToMemOperand(destination));
   2019     }
   2020   } else if (source->IsConstant()) {
   2021     Constant src = g.ToConstant(source);
   2022     if (destination->IsRegister() || destination->IsStackSlot()) {
   2023       Register dst =
   2024           destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
   2025       switch (src.type()) {
   2026         case Constant::kInt32:
   2027 #if V8_TARGET_ARCH_S390X
   2028           if (src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
   2029 #else
   2030           if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
   2031               src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE ||
   2032               src.rmode() == RelocInfo::WASM_MEMORY_SIZE_REFERENCE) {
   2033 #endif
   2034             __ mov(dst, Operand(src.ToInt32(), src.rmode()));
   2035           } else {
   2036             __ mov(dst, Operand(src.ToInt32()));
   2037           }
   2038           break;
   2039         case Constant::kInt64:
   2040 #if V8_TARGET_ARCH_S390X
   2041           if (src.rmode() == RelocInfo::WASM_MEMORY_REFERENCE ||
   2042               src.rmode() == RelocInfo::WASM_GLOBAL_REFERENCE) {
   2043             __ mov(dst, Operand(src.ToInt64(), src.rmode()));
   2044           } else {
   2045             DCHECK(src.rmode() != RelocInfo::WASM_MEMORY_SIZE_REFERENCE);
   2046             __ mov(dst, Operand(src.ToInt64()));
   2047           }
   2048 #else
   2049           __ mov(dst, Operand(src.ToInt64()));
   2050 #endif  // V8_TARGET_ARCH_S390X
   2051           break;
   2052         case Constant::kFloat32:
   2053           __ Move(dst,
   2054                   isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
   2055           break;
   2056         case Constant::kFloat64:
   2057           __ Move(dst,
   2058                   isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
   2059           break;
   2060         case Constant::kExternalReference:
   2061           __ mov(dst, Operand(src.ToExternalReference()));
   2062           break;
   2063         case Constant::kHeapObject: {
   2064           Handle<HeapObject> src_object = src.ToHeapObject();
   2065           Heap::RootListIndex index;
   2066           int slot;
   2067           if (IsMaterializableFromFrame(src_object, &slot)) {
   2068             __ LoadP(dst, g.SlotToMemOperand(slot));
   2069           } else if (IsMaterializableFromRoot(src_object, &index)) {
   2070             __ LoadRoot(dst, index);
   2071           } else {
   2072             __ Move(dst, src_object);
   2073           }
   2074           break;
   2075         }
   2076         case Constant::kRpoNumber:
   2077           UNREACHABLE();  // TODO(dcarney): loading RPO constants on S390.
   2078           break;
   2079       }
   2080       if (destination->IsStackSlot()) {
   2081         __ StoreP(dst, g.ToMemOperand(destination), r0);
   2082       }
   2083     } else {
   2084       DoubleRegister dst = destination->IsFPRegister()
   2085                                ? g.ToDoubleRegister(destination)
   2086                                : kScratchDoubleReg;
   2087       double value = (src.type() == Constant::kFloat32) ? src.ToFloat32()
   2088                                                         : src.ToFloat64();
   2089       if (src.type() == Constant::kFloat32) {
   2090         __ LoadFloat32Literal(dst, src.ToFloat32(), kScratchReg);
   2091       } else {
   2092         __ LoadDoubleLiteral(dst, value, kScratchReg);
   2093       }
   2094 
   2095       if (destination->IsFPStackSlot()) {
   2096         __ StoreDouble(dst, g.ToMemOperand(destination));
   2097       }
   2098     }
   2099   } else if (source->IsFPRegister()) {
   2100     DoubleRegister src = g.ToDoubleRegister(source);
   2101     if (destination->IsFPRegister()) {
   2102       DoubleRegister dst = g.ToDoubleRegister(destination);
   2103       __ Move(dst, src);
   2104     } else {
   2105       DCHECK(destination->IsFPStackSlot());
   2106       LocationOperand* op = LocationOperand::cast(source);
   2107       if (op->representation() == MachineRepresentation::kFloat64) {
   2108         __ StoreDouble(src, g.ToMemOperand(destination));
   2109       } else {
   2110         __ StoreFloat32(src, g.ToMemOperand(destination));
   2111       }
   2112     }
   2113   } else if (source->IsFPStackSlot()) {
   2114     DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
   2115     MemOperand src = g.ToMemOperand(source);
   2116     if (destination->IsFPRegister()) {
   2117       LocationOperand* op = LocationOperand::cast(source);
   2118       if (op->representation() == MachineRepresentation::kFloat64) {
   2119         __ LoadDouble(g.ToDoubleRegister(destination), src);
   2120       } else {
   2121         __ LoadFloat32(g.ToDoubleRegister(destination), src);
   2122       }
   2123     } else {
   2124       LocationOperand* op = LocationOperand::cast(source);
   2125       DoubleRegister temp = kScratchDoubleReg;
   2126       if (op->representation() == MachineRepresentation::kFloat64) {
   2127         __ LoadDouble(temp, src);
   2128         __ StoreDouble(temp, g.ToMemOperand(destination));
   2129       } else {
   2130         __ LoadFloat32(temp, src);
   2131         __ StoreFloat32(temp, g.ToMemOperand(destination));
   2132       }
   2133     }
   2134   } else {
   2135     UNREACHABLE();
   2136   }
   2137 }
   2138 
   2139 void CodeGenerator::AssembleSwap(InstructionOperand* source,
   2140                                  InstructionOperand* destination) {
   2141   S390OperandConverter g(this, nullptr);
   2142   // Dispatch on the source and destination operand kinds.  Not all
   2143   // combinations are possible.
   2144   if (source->IsRegister()) {
   2145     // Register-register.
   2146     Register temp = kScratchReg;
   2147     Register src = g.ToRegister(source);
   2148     if (destination->IsRegister()) {
   2149       Register dst = g.ToRegister(destination);
   2150       __ LoadRR(temp, src);
   2151       __ LoadRR(src, dst);
   2152       __ LoadRR(dst, temp);
   2153     } else {
   2154       DCHECK(destination->IsStackSlot());
   2155       MemOperand dst = g.ToMemOperand(destination);
   2156       __ LoadRR(temp, src);
   2157       __ LoadP(src, dst);
   2158       __ StoreP(temp, dst);
   2159     }
   2160 #if V8_TARGET_ARCH_S390X
   2161   } else if (source->IsStackSlot() || source->IsFPStackSlot()) {
   2162 #else
   2163   } else if (source->IsStackSlot()) {
   2164     DCHECK(destination->IsStackSlot());
   2165 #endif
   2166     Register temp_0 = kScratchReg;
   2167     Register temp_1 = r0;
   2168     MemOperand src = g.ToMemOperand(source);
   2169     MemOperand dst = g.ToMemOperand(destination);
   2170     __ LoadP(temp_0, src);
   2171     __ LoadP(temp_1, dst);
   2172     __ StoreP(temp_0, dst);
   2173     __ StoreP(temp_1, src);
   2174   } else if (source->IsFPRegister()) {
   2175     DoubleRegister temp = kScratchDoubleReg;
   2176     DoubleRegister src = g.ToDoubleRegister(source);
   2177     if (destination->IsFPRegister()) {
   2178       DoubleRegister dst = g.ToDoubleRegister(destination);
   2179       __ ldr(temp, src);
   2180       __ ldr(src, dst);
   2181       __ ldr(dst, temp);
   2182     } else {
   2183       DCHECK(destination->IsFPStackSlot());
   2184       MemOperand dst = g.ToMemOperand(destination);
   2185       __ ldr(temp, src);
   2186       __ LoadDouble(src, dst);
   2187       __ StoreDouble(temp, dst);
   2188     }
   2189 #if !V8_TARGET_ARCH_S390X
   2190   } else if (source->IsFPStackSlot()) {
   2191     DCHECK(destination->IsFPStackSlot());
   2192     DoubleRegister temp_0 = kScratchDoubleReg;
   2193     DoubleRegister temp_1 = d0;
   2194     MemOperand src = g.ToMemOperand(source);
   2195     MemOperand dst = g.ToMemOperand(destination);
   2196     // TODO(joransiu): MVC opportunity
   2197     __ LoadDouble(temp_0, src);
   2198     __ LoadDouble(temp_1, dst);
   2199     __ StoreDouble(temp_0, dst);
   2200     __ StoreDouble(temp_1, src);
   2201 #endif
   2202   } else {
   2203     // No other combinations are possible.
   2204     UNREACHABLE();
   2205   }
   2206 }
   2207 
   2208 void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
   2209   for (size_t index = 0; index < target_count; ++index) {
   2210     __ emit_label_addr(targets[index]);
   2211   }
   2212 }
   2213 
   2214 void CodeGenerator::EnsureSpaceForLazyDeopt() {
   2215   if (!info()->ShouldEnsureSpaceForLazyDeopt()) {
   2216     return;
   2217   }
   2218 
   2219   int space_needed = Deoptimizer::patch_size();
   2220   // Ensure that we have enough space after the previous lazy-bailout
   2221   // instruction for patching the code here.
   2222   int current_pc = masm()->pc_offset();
   2223   if (current_pc < last_lazy_deopt_pc_ + space_needed) {
   2224     int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
   2225     DCHECK_EQ(0, padding_size % 2);
   2226     while (padding_size > 0) {
   2227       __ nop();
   2228       padding_size -= 2;
   2229     }
   2230   }
   2231 }
   2232 
   2233 #undef __
   2234 
   2235 }  // namespace compiler
   2236 }  // namespace internal
   2237 }  // namespace v8
   2238