Home | History | Annotate | Download | only in s390
      1 // Copyright 2014 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef V8_S390_MACRO_ASSEMBLER_S390_H_
      6 #define V8_S390_MACRO_ASSEMBLER_S390_H_
      7 
      8 #include "src/assembler.h"
      9 #include "src/bailout-reason.h"
     10 #include "src/globals.h"
     11 #include "src/s390/assembler-s390.h"
     12 #include "src/turbo-assembler.h"
     13 
     14 namespace v8 {
     15 namespace internal {
     16 
     17 // Give alias names to registers for calling conventions.
     18 constexpr Register kReturnRegister0 = r2;
     19 constexpr Register kReturnRegister1 = r3;
     20 constexpr Register kReturnRegister2 = r4;
     21 constexpr Register kJSFunctionRegister = r3;
     22 constexpr Register kContextRegister = r13;
     23 constexpr Register kAllocateSizeRegister = r3;
     24 constexpr Register kSpeculationPoisonRegister = r9;
     25 constexpr Register kInterpreterAccumulatorRegister = r2;
     26 constexpr Register kInterpreterBytecodeOffsetRegister = r6;
     27 constexpr Register kInterpreterBytecodeArrayRegister = r7;
     28 constexpr Register kInterpreterDispatchTableRegister = r8;
     29 
     30 constexpr Register kJavaScriptCallArgCountRegister = r2;
     31 constexpr Register kJavaScriptCallCodeStartRegister = r4;
     32 constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
     33 constexpr Register kJavaScriptCallNewTargetRegister = r5;
     34 constexpr Register kJavaScriptCallExtraArg1Register = r4;
     35 
     36 constexpr Register kOffHeapTrampolineRegister = ip;
     37 constexpr Register kRuntimeCallFunctionRegister = r3;
     38 constexpr Register kRuntimeCallArgCountRegister = r2;
     39 constexpr Register kRuntimeCallArgvRegister = r4;
     40 constexpr Register kWasmInstanceRegister = r6;
     41 
     42 // ----------------------------------------------------------------------------
     43 // Static helper functions
     44 
     45 // Generate a MemOperand for loading a field from an object.
     46 inline MemOperand FieldMemOperand(Register object, int offset) {
     47   return MemOperand(object, offset - kHeapObjectTag);
     48 }
     49 
     50 // Generate a MemOperand for loading a field from an object.
     51 inline MemOperand FieldMemOperand(Register object, Register index, int offset) {
     52   return MemOperand(object, index, offset - kHeapObjectTag);
     53 }
     54 
     55 // Generate a MemOperand for loading a field from Root register
     56 inline MemOperand RootMemOperand(Heap::RootListIndex index) {
     57   return MemOperand(kRootRegister, index << kPointerSizeLog2);
     58 }
     59 
     60 enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
     61 enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
     62 enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
     63 
     64 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
     65                                    Register reg3 = no_reg,
     66                                    Register reg4 = no_reg,
     67                                    Register reg5 = no_reg,
     68                                    Register reg6 = no_reg);
     69 
     70 // These exist to provide portability between 32 and 64bit
     71 #if V8_TARGET_ARCH_S390X
     72 
     73 // The length of the arithmetic operation is the length
     74 // of the register.
     75 
     76 // Length:
     77 // H = halfword
     78 // W = word
     79 
     80 // arithmetics and bitwise
     81 #define AddMI agsi
     82 #define AddRR agr
     83 #define SubRR sgr
     84 #define AndRR ngr
     85 #define OrRR ogr
     86 #define XorRR xgr
     87 #define LoadComplementRR lcgr
     88 #define LoadNegativeRR lngr
     89 
     90 // Distinct Operands
     91 #define AddP_RRR agrk
     92 #define AddPImm_RRI aghik
     93 #define AddLogicalP_RRR algrk
     94 #define SubP_RRR sgrk
     95 #define SubLogicalP_RRR slgrk
     96 #define AndP_RRR ngrk
     97 #define OrP_RRR ogrk
     98 #define XorP_RRR xgrk
     99 
    100 // Load / Store
    101 #define LoadRR lgr
    102 #define LoadAndTestRR ltgr
    103 #define LoadImmP lghi
    104 
    105 // Compare
    106 #define CmpPH cghi
    107 #define CmpLogicalPW clgfi
    108 
    109 // Shifts
    110 #define ShiftLeftP sllg
    111 #define ShiftRightP srlg
    112 #define ShiftLeftArithP slag
    113 #define ShiftRightArithP srag
    114 #else
    115 
    116 // arithmetics and bitwise
    117 // Reg2Reg
    118 #define AddMI asi
    119 #define AddRR ar
    120 #define SubRR sr
    121 #define AndRR nr
    122 #define OrRR or_z
    123 #define XorRR xr
    124 #define LoadComplementRR lcr
    125 #define LoadNegativeRR lnr
    126 
    127 // Distinct Operands
    128 #define AddP_RRR ark
    129 #define AddPImm_RRI ahik
    130 #define AddLogicalP_RRR alrk
    131 #define SubP_RRR srk
    132 #define SubLogicalP_RRR slrk
    133 #define AndP_RRR nrk
    134 #define OrP_RRR ork
    135 #define XorP_RRR xrk
    136 
    137 // Load / Store
    138 #define LoadRR lr
    139 #define LoadAndTestRR ltr
    140 #define LoadImmP lhi
    141 
    142 // Compare
    143 #define CmpPH chi
    144 #define CmpLogicalPW clfi
    145 
    146 // Shifts
    147 #define ShiftLeftP ShiftLeft
    148 #define ShiftRightP ShiftRight
    149 #define ShiftLeftArithP ShiftLeftArith
    150 #define ShiftRightArithP ShiftRightArith
    151 
    152 #endif
    153 
    154 class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
    155  public:
    156   TurboAssembler(Isolate* isolate, const AssemblerOptions& options,
    157                  void* buffer, int buffer_size,
    158                  CodeObjectRequired create_code_object)
    159       : TurboAssemblerBase(isolate, options, buffer, buffer_size,
    160                            create_code_object) {}
    161 
    162   void LoadFromConstantsTable(Register destination,
    163                               int constant_index) override;
    164   void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
    165   void LoadRootRelative(Register destination, int32_t offset) override;
    166 
    167   // Jump, Call, and Ret pseudo instructions implementing inter-working.
    168   void Jump(Register target, Condition cond = al);
    169   void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
    170   void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
    171   // Jump the register contains a smi.
    172   inline void JumpIfSmi(Register value, Label* smi_label) {
    173     TestIfSmi(value);
    174     beq(smi_label /*, cr0*/);  // branch if SMI
    175   }
    176   void JumpIfEqual(Register x, int32_t y, Label* dest);
    177   void JumpIfLessThan(Register x, int32_t y, Label* dest);
    178 
    179   void Call(Register target);
    180   void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
    181   void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
    182             Condition cond = al);
    183   void Ret() { b(r14); }
    184   void Ret(Condition cond) { b(cond, r14); }
    185 
    186   void CallForDeoptimization(Address target, int deopt_id,
    187                              RelocInfo::Mode rmode) {
    188     USE(deopt_id);
    189     Call(target, rmode);
    190   }
    191 
    192   // Emit code to discard a non-negative number of pointer-sized elements
    193   // from the stack, clobbering only the sp register.
    194   void Drop(int count);
    195   void Drop(Register count, Register scratch = r0);
    196 
    197   void Ret(int drop) {
    198     Drop(drop);
    199     Ret();
    200   }
    201 
    202   void Call(Label* target);
    203 
    204   // Register move. May do nothing if the registers are identical.
    205   void Move(Register dst, Smi* smi) { LoadSmiLiteral(dst, smi); }
    206   void Move(Register dst, Handle<HeapObject> value);
    207   void Move(Register dst, ExternalReference reference);
    208   void Move(Register dst, Register src, Condition cond = al);
    209   void Move(DoubleRegister dst, DoubleRegister src);
    210 
    211   void MoveChar(const MemOperand& opnd1, const MemOperand& opnd2,
    212                    const Operand& length);
    213 
    214   void CompareLogicalChar(const MemOperand& opnd1, const MemOperand& opnd2,
    215                    const Operand& length);
    216 
    217   void ExclusiveOrChar(const MemOperand& opnd1, const MemOperand& opnd2,
    218                    const Operand& length);
    219 
    220   void RotateInsertSelectBits(Register dst, Register src,
    221                      const Operand& startBit, const Operand& endBit,
    222                      const Operand& shiftAmt, bool zeroBits);
    223 
    224   void BranchRelativeOnIdxHighP(Register dst, Register inc, Label* L);
    225 
    226   void SaveRegisters(RegList registers);
    227   void RestoreRegisters(RegList registers);
    228 
    229   void CallRecordWriteStub(Register object, Register address,
    230                            RememberedSetAction remembered_set_action,
    231                            SaveFPRegsMode fp_mode);
    232 
    233   void MultiPush(RegList regs, Register location = sp);
    234   void MultiPop(RegList regs, Register location = sp);
    235 
    236   void MultiPushDoubles(RegList dregs, Register location = sp);
    237   void MultiPopDoubles(RegList dregs, Register location = sp);
    238 
    239   // Calculate how much stack space (in bytes) are required to store caller
    240   // registers excluding those specified in the arguments.
    241   int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
    242                                       Register exclusion1 = no_reg,
    243                                       Register exclusion2 = no_reg,
    244                                       Register exclusion3 = no_reg) const;
    245 
    246   // Push caller saved registers on the stack, and return the number of bytes
    247   // stack pointer is adjusted.
    248   int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
    249                       Register exclusion2 = no_reg,
    250                       Register exclusion3 = no_reg);
    251   // Restore caller saved registers from the stack, and return the number of
    252   // bytes stack pointer is adjusted.
    253   int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
    254                      Register exclusion2 = no_reg,
    255                      Register exclusion3 = no_reg);
    256 
    257   // Load an object from the root table.
    258   void LoadRoot(Register destination, Heap::RootListIndex index) override {
    259     LoadRoot(destination, index, al);
    260   }
    261   void LoadRoot(Register destination, Heap::RootListIndex index,
    262                 Condition cond);
    263   //--------------------------------------------------------------------------
    264   // S390 Macro Assemblers for Instructions
    265   //--------------------------------------------------------------------------
    266 
    267   // Arithmetic Operations
    268 
    269   // Add (Register - Immediate)
    270   void Add32(Register dst, const Operand& imm);
    271   void Add32_RI(Register dst, const Operand& imm);
    272   void AddP(Register dst, const Operand& imm);
    273   void Add32(Register dst, Register src, const Operand& imm);
    274   void Add32_RRI(Register dst, Register src, const Operand& imm);
    275   void AddP(Register dst, Register src, const Operand& imm);
    276 
    277   // Add (Register - Register)
    278   void Add32(Register dst, Register src);
    279   void AddP(Register dst, Register src);
    280   void AddP_ExtendSrc(Register dst, Register src);
    281   void Add32(Register dst, Register src1, Register src2);
    282   void AddP(Register dst, Register src1, Register src2);
    283   void AddP_ExtendSrc(Register dst, Register src1, Register src2);
    284 
    285   // Add (Register - Mem)
    286   void Add32(Register dst, const MemOperand& opnd);
    287   void AddP(Register dst, const MemOperand& opnd);
    288   void AddP_ExtendSrc(Register dst, const MemOperand& opnd);
    289 
    290   // Add (Mem - Immediate)
    291   void Add32(const MemOperand& opnd, const Operand& imm);
    292   void AddP(const MemOperand& opnd, const Operand& imm);
    293 
    294   // Add Logical (Register - Register)
    295   void AddLogical32(Register dst, Register src1, Register src2);
    296 
    297   // Add Logical With Carry (Register - Register)
    298   void AddLogicalWithCarry32(Register dst, Register src1, Register src2);
    299 
    300   // Add Logical (Register - Immediate)
    301   void AddLogical(Register dst, const Operand& imm);
    302   void AddLogicalP(Register dst, const Operand& imm);
    303 
    304   // Add Logical (Register - Mem)
    305   void AddLogical(Register dst, const MemOperand& opnd);
    306   void AddLogicalP(Register dst, const MemOperand& opnd);
    307 
    308   // Subtract (Register - Immediate)
    309   void Sub32(Register dst, const Operand& imm);
    310   void Sub32_RI(Register dst, const Operand& imm) { Sub32(dst, imm); }
    311   void SubP(Register dst, const Operand& imm);
    312   void Sub32(Register dst, Register src, const Operand& imm);
    313   void Sub32_RRI(Register dst, Register src, const Operand& imm) {
    314     Sub32(dst, src, imm);
    315   }
    316   void SubP(Register dst, Register src, const Operand& imm);
    317 
    318   // Subtract (Register - Register)
    319   void Sub32(Register dst, Register src);
    320   void SubP(Register dst, Register src);
    321   void SubP_ExtendSrc(Register dst, Register src);
    322   void Sub32(Register dst, Register src1, Register src2);
    323   void SubP(Register dst, Register src1, Register src2);
    324   void SubP_ExtendSrc(Register dst, Register src1, Register src2);
    325 
    326   // Subtract (Register - Mem)
    327   void Sub32(Register dst, const MemOperand& opnd);
    328   void SubP(Register dst, const MemOperand& opnd);
    329   void SubP_ExtendSrc(Register dst, const MemOperand& opnd);
    330   void LoadAndSub32(Register dst, Register src, const MemOperand& opnd);
    331 
    332   // Subtract Logical (Register - Mem)
    333   void SubLogical(Register dst, const MemOperand& opnd);
    334   void SubLogicalP(Register dst, const MemOperand& opnd);
    335   void SubLogicalP_ExtendSrc(Register dst, const MemOperand& opnd);
    336   // Subtract Logical 32-bit
    337   void SubLogical32(Register dst, Register src1, Register src2);
    338   // Subtract Logical With Borrow 32-bit
    339   void SubLogicalWithBorrow32(Register dst, Register src1, Register src2);
    340 
    341   // Multiply
    342   void MulP(Register dst, const Operand& opnd);
    343   void MulP(Register dst, Register src);
    344   void MulP(Register dst, const MemOperand& opnd);
    345   void Mul(Register dst, Register src1, Register src2);
    346   void Mul32(Register dst, const MemOperand& src1);
    347   void Mul32(Register dst, Register src1);
    348   void Mul32(Register dst, const Operand& src1);
    349   void MulHigh32(Register dst, Register src1, const MemOperand& src2);
    350   void MulHigh32(Register dst, Register src1, Register src2);
    351   void MulHigh32(Register dst, Register src1, const Operand& src2);
    352   void MulHighU32(Register dst, Register src1, const MemOperand& src2);
    353   void MulHighU32(Register dst, Register src1, Register src2);
    354   void MulHighU32(Register dst, Register src1, const Operand& src2);
    355   void Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
    356                                     const MemOperand& src2);
    357   void Mul32WithOverflowIfCCUnequal(Register dst, Register src1, Register src2);
    358   void Mul32WithOverflowIfCCUnequal(Register dst, Register src1,
    359                                     const Operand& src2);
    360   void Mul64(Register dst, const MemOperand& src1);
    361   void Mul64(Register dst, Register src1);
    362   void Mul64(Register dst, const Operand& src1);
    363   void MulPWithCondition(Register dst, Register src1, Register src2);
    364 
    365   // Divide
    366   void DivP(Register dividend, Register divider);
    367   void Div32(Register dst, Register src1, const MemOperand& src2);
    368   void Div32(Register dst, Register src1, Register src2);
    369   void DivU32(Register dst, Register src1, const MemOperand& src2);
    370   void DivU32(Register dst, Register src1, Register src2);
    371   void Div64(Register dst, Register src1, const MemOperand& src2);
    372   void Div64(Register dst, Register src1, Register src2);
    373   void DivU64(Register dst, Register src1, const MemOperand& src2);
    374   void DivU64(Register dst, Register src1, Register src2);
    375 
    376   // Mod
    377   void Mod32(Register dst, Register src1, const MemOperand& src2);
    378   void Mod32(Register dst, Register src1, Register src2);
    379   void ModU32(Register dst, Register src1, const MemOperand& src2);
    380   void ModU32(Register dst, Register src1, Register src2);
    381   void Mod64(Register dst, Register src1, const MemOperand& src2);
    382   void Mod64(Register dst, Register src1, Register src2);
    383   void ModU64(Register dst, Register src1, const MemOperand& src2);
    384   void ModU64(Register dst, Register src1, Register src2);
    385 
    386   // Square root
    387   void Sqrt(DoubleRegister result, DoubleRegister input);
    388   void Sqrt(DoubleRegister result, const MemOperand& input);
    389 
    390   // Compare
    391   void Cmp32(Register src1, Register src2);
    392   void CmpP(Register src1, Register src2);
    393   void Cmp32(Register dst, const Operand& opnd);
    394   void CmpP(Register dst, const Operand& opnd);
    395   void Cmp32(Register dst, const MemOperand& opnd);
    396   void CmpP(Register dst, const MemOperand& opnd);
    397   void CmpAndSwap(Register old_val, Register new_val, const MemOperand& opnd);
    398 
    399   // Compare Logical
    400   void CmpLogical32(Register src1, Register src2);
    401   void CmpLogicalP(Register src1, Register src2);
    402   void CmpLogical32(Register src1, const Operand& opnd);
    403   void CmpLogicalP(Register src1, const Operand& opnd);
    404   void CmpLogical32(Register dst, const MemOperand& opnd);
    405   void CmpLogicalP(Register dst, const MemOperand& opnd);
    406 
    407   // Compare Logical Byte (CLI/CLIY)
    408   void CmpLogicalByte(const MemOperand& mem, const Operand& imm);
    409 
    410   // Load 32bit
    411   void Load(Register dst, const MemOperand& opnd);
    412   void Load(Register dst, const Operand& opnd);
    413   void LoadW(Register dst, const MemOperand& opnd, Register scratch = no_reg);
    414   void LoadW(Register dst, Register src);
    415   void LoadlW(Register dst, const MemOperand& opnd, Register scratch = no_reg);
    416   void LoadlW(Register dst, Register src);
    417   void LoadLogicalHalfWordP(Register dst, const MemOperand& opnd);
    418   void LoadLogicalHalfWordP(Register dst, Register src);
    419   void LoadB(Register dst, const MemOperand& opnd);
    420   void LoadB(Register dst, Register src);
    421   void LoadlB(Register dst, const MemOperand& opnd);
    422   void LoadlB(Register dst, Register src);
    423 
    424   void LoadLogicalReversedWordP(Register dst, const MemOperand& opnd);
    425   void LoadLogicalReversedHalfWordP(Register dst, const MemOperand& opnd);
    426 
    427   // Load And Test
    428   void LoadAndTest32(Register dst, Register src);
    429   void LoadAndTestP_ExtendSrc(Register dst, Register src);
    430   void LoadAndTestP(Register dst, Register src);
    431 
    432   void LoadAndTest32(Register dst, const MemOperand& opnd);
    433   void LoadAndTestP(Register dst, const MemOperand& opnd);
    434 
    435   // Load Floating Point
    436   void LoadDouble(DoubleRegister dst, const MemOperand& opnd);
    437   void LoadFloat32(DoubleRegister dst, const MemOperand& opnd);
    438   void LoadFloat32ConvertToDouble(DoubleRegister dst, const MemOperand& mem);
    439 
    440   void AddFloat32(DoubleRegister dst, const MemOperand& opnd,
    441                   DoubleRegister scratch);
    442   void AddFloat64(DoubleRegister dst, const MemOperand& opnd,
    443                   DoubleRegister scratch);
    444   void SubFloat32(DoubleRegister dst, const MemOperand& opnd,
    445                   DoubleRegister scratch);
    446   void SubFloat64(DoubleRegister dst, const MemOperand& opnd,
    447                   DoubleRegister scratch);
    448   void MulFloat32(DoubleRegister dst, const MemOperand& opnd,
    449                   DoubleRegister scratch);
    450   void MulFloat64(DoubleRegister dst, const MemOperand& opnd,
    451                   DoubleRegister scratch);
    452   void DivFloat32(DoubleRegister dst, const MemOperand& opnd,
    453                   DoubleRegister scratch);
    454   void DivFloat64(DoubleRegister dst, const MemOperand& opnd,
    455                   DoubleRegister scratch);
    456   void LoadFloat32ToDouble(DoubleRegister dst, const MemOperand& opnd,
    457                            DoubleRegister scratch);
    458 
    459   // Load On Condition
    460   void LoadOnConditionP(Condition cond, Register dst, Register src);
    461 
    462   void LoadPositiveP(Register result, Register input);
    463   void LoadPositive32(Register result, Register input);
    464 
    465   // Store Floating Point
    466   void StoreDouble(DoubleRegister dst, const MemOperand& opnd);
    467   void StoreFloat32(DoubleRegister dst, const MemOperand& opnd);
    468   void StoreDoubleAsFloat32(DoubleRegister src, const MemOperand& mem,
    469                             DoubleRegister scratch);
    470 
    471   void Branch(Condition c, const Operand& opnd);
    472   void BranchOnCount(Register r1, Label* l);
    473 
    474   // Shifts
    475   void ShiftLeft(Register dst, Register src, Register val);
    476   void ShiftLeft(Register dst, Register src, const Operand& val);
    477   void ShiftRight(Register dst, Register src, Register val);
    478   void ShiftRight(Register dst, Register src, const Operand& val);
    479   void ShiftLeftArith(Register dst, Register src, Register shift);
    480   void ShiftLeftArith(Register dst, Register src, const Operand& val);
    481   void ShiftRightArith(Register dst, Register src, Register shift);
    482   void ShiftRightArith(Register dst, Register src, const Operand& val);
    483 
    484   void ClearRightImm(Register dst, Register src, const Operand& val);
    485 
    486   // Bitwise operations
    487   void And(Register dst, Register src);
    488   void AndP(Register dst, Register src);
    489   void And(Register dst, Register src1, Register src2);
    490   void AndP(Register dst, Register src1, Register src2);
    491   void And(Register dst, const MemOperand& opnd);
    492   void AndP(Register dst, const MemOperand& opnd);
    493   void And(Register dst, const Operand& opnd);
    494   void AndP(Register dst, const Operand& opnd);
    495   void And(Register dst, Register src, const Operand& opnd);
    496   void AndP(Register dst, Register src, const Operand& opnd);
    497   void Or(Register dst, Register src);
    498   void OrP(Register dst, Register src);
    499   void Or(Register dst, Register src1, Register src2);
    500   void OrP(Register dst, Register src1, Register src2);
    501   void Or(Register dst, const MemOperand& opnd);
    502   void OrP(Register dst, const MemOperand& opnd);
    503   void Or(Register dst, const Operand& opnd);
    504   void OrP(Register dst, const Operand& opnd);
    505   void Or(Register dst, Register src, const Operand& opnd);
    506   void OrP(Register dst, Register src, const Operand& opnd);
    507   void Xor(Register dst, Register src);
    508   void XorP(Register dst, Register src);
    509   void Xor(Register dst, Register src1, Register src2);
    510   void XorP(Register dst, Register src1, Register src2);
    511   void Xor(Register dst, const MemOperand& opnd);
    512   void XorP(Register dst, const MemOperand& opnd);
    513   void Xor(Register dst, const Operand& opnd);
    514   void XorP(Register dst, const Operand& opnd);
    515   void Xor(Register dst, Register src, const Operand& opnd);
    516   void XorP(Register dst, Register src, const Operand& opnd);
    517   void Popcnt32(Register dst, Register src);
    518   void Not32(Register dst, Register src = no_reg);
    519   void Not64(Register dst, Register src = no_reg);
    520   void NotP(Register dst, Register src = no_reg);
    521 
    522 #ifdef V8_TARGET_ARCH_S390X
    523   void Popcnt64(Register dst, Register src);
    524 #endif
    525 
    526   void mov(Register dst, const Operand& src);
    527 
    528   void CleanUInt32(Register x) {
    529 #ifdef V8_TARGET_ARCH_S390X
    530     llgfr(x, x);
    531 #endif
    532   }
    533 
    534 
    535   void push(Register src) {
    536     lay(sp, MemOperand(sp, -kPointerSize));
    537     StoreP(src, MemOperand(sp));
    538   }
    539 
    540   void pop(Register dst) {
    541     LoadP(dst, MemOperand(sp));
    542     la(sp, MemOperand(sp, kPointerSize));
    543   }
    544 
    545   void pop() { la(sp, MemOperand(sp, kPointerSize)); }
    546 
    547   void Push(Register src) { push(src); }
    548 
    549   // Push a handle.
    550   void Push(Handle<HeapObject> handle);
    551   void Push(Smi* smi);
    552 
    553   // Push two registers.  Pushes leftmost register first (to highest address).
    554   void Push(Register src1, Register src2) {
    555     lay(sp, MemOperand(sp, -kPointerSize * 2));
    556     StoreP(src1, MemOperand(sp, kPointerSize));
    557     StoreP(src2, MemOperand(sp, 0));
    558   }
    559 
    560   // Push three registers.  Pushes leftmost register first (to highest address).
    561   void Push(Register src1, Register src2, Register src3) {
    562     lay(sp, MemOperand(sp, -kPointerSize * 3));
    563     StoreP(src1, MemOperand(sp, kPointerSize * 2));
    564     StoreP(src2, MemOperand(sp, kPointerSize));
    565     StoreP(src3, MemOperand(sp, 0));
    566   }
    567 
    568   // Push four registers.  Pushes leftmost register first (to highest address).
    569   void Push(Register src1, Register src2, Register src3, Register src4) {
    570     lay(sp, MemOperand(sp, -kPointerSize * 4));
    571     StoreP(src1, MemOperand(sp, kPointerSize * 3));
    572     StoreP(src2, MemOperand(sp, kPointerSize * 2));
    573     StoreP(src3, MemOperand(sp, kPointerSize));
    574     StoreP(src4, MemOperand(sp, 0));
    575   }
    576 
    577   // Push five registers.  Pushes leftmost register first (to highest address).
    578   void Push(Register src1, Register src2, Register src3, Register src4,
    579             Register src5) {
    580     DCHECK(src1 != src2);
    581     DCHECK(src1 != src3);
    582     DCHECK(src2 != src3);
    583     DCHECK(src1 != src4);
    584     DCHECK(src2 != src4);
    585     DCHECK(src3 != src4);
    586     DCHECK(src1 != src5);
    587     DCHECK(src2 != src5);
    588     DCHECK(src3 != src5);
    589     DCHECK(src4 != src5);
    590 
    591     lay(sp, MemOperand(sp, -kPointerSize * 5));
    592     StoreP(src1, MemOperand(sp, kPointerSize * 4));
    593     StoreP(src2, MemOperand(sp, kPointerSize * 3));
    594     StoreP(src3, MemOperand(sp, kPointerSize * 2));
    595     StoreP(src4, MemOperand(sp, kPointerSize));
    596     StoreP(src5, MemOperand(sp, 0));
    597   }
    598 
    599   void Pop(Register dst) { pop(dst); }
    600 
    601   // Pop two registers. Pops rightmost register first (from lower address).
    602   void Pop(Register src1, Register src2) {
    603     LoadP(src2, MemOperand(sp, 0));
    604     LoadP(src1, MemOperand(sp, kPointerSize));
    605     la(sp, MemOperand(sp, 2 * kPointerSize));
    606   }
    607 
    608   // Pop three registers.  Pops rightmost register first (from lower address).
    609   void Pop(Register src1, Register src2, Register src3) {
    610     LoadP(src3, MemOperand(sp, 0));
    611     LoadP(src2, MemOperand(sp, kPointerSize));
    612     LoadP(src1, MemOperand(sp, 2 * kPointerSize));
    613     la(sp, MemOperand(sp, 3 * kPointerSize));
    614   }
    615 
    616   // Pop four registers.  Pops rightmost register first (from lower address).
    617   void Pop(Register src1, Register src2, Register src3, Register src4) {
    618     LoadP(src4, MemOperand(sp, 0));
    619     LoadP(src3, MemOperand(sp, kPointerSize));
    620     LoadP(src2, MemOperand(sp, 2 * kPointerSize));
    621     LoadP(src1, MemOperand(sp, 3 * kPointerSize));
    622     la(sp, MemOperand(sp, 4 * kPointerSize));
    623   }
    624 
    625   // Pop five registers.  Pops rightmost register first (from lower address).
    626   void Pop(Register src1, Register src2, Register src3, Register src4,
    627            Register src5) {
    628     LoadP(src5, MemOperand(sp, 0));
    629     LoadP(src4, MemOperand(sp, kPointerSize));
    630     LoadP(src3, MemOperand(sp, 2 * kPointerSize));
    631     LoadP(src2, MemOperand(sp, 3 * kPointerSize));
    632     LoadP(src1, MemOperand(sp, 4 * kPointerSize));
    633     la(sp, MemOperand(sp, 5 * kPointerSize));
    634   }
    635 
    636   // Push a fixed frame, consisting of lr, fp, constant pool.
    637   void PushCommonFrame(Register marker_reg = no_reg);
    638 
    639   // Push a standard frame, consisting of lr, fp, constant pool,
    640   // context and JS function
    641   void PushStandardFrame(Register function_reg);
    642 
    643   void PopCommonFrame(Register marker_reg = no_reg);
    644 
    645   // Restore caller's frame pointer and return address prior to being
    646   // overwritten by tail call stack preparation.
    647   void RestoreFrameStateForTailCall();
    648 
    649   void InitializeRootRegister() {
    650     ExternalReference roots_array_start =
    651         ExternalReference::roots_array_start(isolate());
    652     mov(kRootRegister, Operand(roots_array_start));
    653     AddP(kRootRegister, kRootRegister, Operand(kRootRegisterBias));
    654   }
    655 
    656   // If the value is a NaN, canonicalize the value else, do nothing.
    657   void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
    658   void CanonicalizeNaN(const DoubleRegister value) {
    659     CanonicalizeNaN(value, value);
    660   }
    661 
    662   // Converts the integer (untagged smi) in |src| to a double, storing
    663   // the result to |dst|
    664   void ConvertIntToDouble(DoubleRegister dst, Register src);
    665 
    666   // Converts the unsigned integer (untagged smi) in |src| to
    667   // a double, storing the result to |dst|
    668   void ConvertUnsignedIntToDouble(DoubleRegister dst, Register src);
    669 
    670   // Converts the integer (untagged smi) in |src| to
    671   // a float, storing the result in |dst|
    672   void ConvertIntToFloat(DoubleRegister dst, Register src);
    673 
    674   // Converts the unsigned integer (untagged smi) in |src| to
    675   // a float, storing the result in |dst|
    676   void ConvertUnsignedIntToFloat(DoubleRegister dst, Register src);
    677 
    678   void ConvertInt64ToFloat(DoubleRegister double_dst, Register src);
    679   void ConvertInt64ToDouble(DoubleRegister double_dst, Register src);
    680   void ConvertUnsignedInt64ToFloat(DoubleRegister double_dst, Register src);
    681   void ConvertUnsignedInt64ToDouble(DoubleRegister double_dst, Register src);
    682 
    683   void MovIntToFloat(DoubleRegister dst, Register src);
    684   void MovFloatToInt(Register dst, DoubleRegister src);
    685   void MovDoubleToInt64(Register dst, DoubleRegister src);
    686   void MovInt64ToDouble(DoubleRegister dst, Register src);
    687   // Converts the double_input to an integer.  Note that, upon return,
    688   // the contents of double_dst will also hold the fixed point representation.
    689   void ConvertFloat32ToInt64(const Register dst,
    690                              const DoubleRegister double_input,
    691                              FPRoundingMode rounding_mode = kRoundToZero);
    692 
    693   // Converts the double_input to an integer.  Note that, upon return,
    694   // the contents of double_dst will also hold the fixed point representation.
    695   void ConvertDoubleToInt64(const Register dst,
    696                             const DoubleRegister double_input,
    697                             FPRoundingMode rounding_mode = kRoundToZero);
    698   void ConvertDoubleToInt32(const Register dst,
    699                             const DoubleRegister double_input,
    700                             FPRoundingMode rounding_mode = kRoundToZero);
    701 
    702   void ConvertFloat32ToInt32(const Register result,
    703                              const DoubleRegister double_input,
    704                              FPRoundingMode rounding_mode);
    705   void ConvertFloat32ToUnsignedInt32(
    706       const Register result, const DoubleRegister double_input,
    707       FPRoundingMode rounding_mode = kRoundToZero);
    708   // Converts the double_input to an unsigned integer.  Note that, upon return,
    709   // the contents of double_dst will also hold the fixed point representation.
    710   void ConvertDoubleToUnsignedInt64(
    711       const Register dst, const DoubleRegister double_input,
    712       FPRoundingMode rounding_mode = kRoundToZero);
    713   void ConvertDoubleToUnsignedInt32(
    714       const Register dst, const DoubleRegister double_input,
    715       FPRoundingMode rounding_mode = kRoundToZero);
    716   void ConvertFloat32ToUnsignedInt64(
    717       const Register result, const DoubleRegister double_input,
    718       FPRoundingMode rounding_mode = kRoundToZero);
    719 
    720 #if !V8_TARGET_ARCH_S390X
    721   void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
    722                      Register src_high, Register scratch, Register shift);
    723   void ShiftLeftPair(Register dst_low, Register dst_high, Register src_low,
    724                      Register src_high, uint32_t shift);
    725   void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
    726                       Register src_high, Register scratch, Register shift);
    727   void ShiftRightPair(Register dst_low, Register dst_high, Register src_low,
    728                       Register src_high, uint32_t shift);
    729   void ShiftRightArithPair(Register dst_low, Register dst_high,
    730                            Register src_low, Register src_high,
    731                            Register scratch, Register shift);
    732   void ShiftRightArithPair(Register dst_low, Register dst_high,
    733                            Register src_low, Register src_high, uint32_t shift);
    734 #endif
    735 
    736   // Generates function and stub prologue code.
    737   void StubPrologue(StackFrame::Type type, Register base = no_reg,
    738                     int prologue_offset = 0);
    739   void Prologue(Register base, int prologue_offset = 0);
    740 
    741   // Get the actual activation frame alignment for target environment.
    742   static int ActivationFrameAlignment();
    743   // ----------------------------------------------------------------
    744   // new S390 macro-assembler interfaces that are slightly higher level
    745   // than assembler-s390 and may generate variable length sequences
    746 
    747   // load a literal signed int value <value> to GPR <dst>
    748   void LoadIntLiteral(Register dst, int value);
    749 
    750   // load an SMI value <value> to GPR <dst>
    751   void LoadSmiLiteral(Register dst, Smi* smi);
    752 
    753   // load a literal double value <value> to FPR <result>
    754   void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
    755   void LoadDoubleLiteral(DoubleRegister result, uint64_t value,
    756                          Register scratch);
    757 
    758   void LoadFloat32Literal(DoubleRegister result, float value, Register scratch);
    759 
    760   void StoreW(Register src, const MemOperand& mem, Register scratch = no_reg);
    761 
    762   void LoadHalfWordP(Register dst, Register src);
    763 
    764   void LoadHalfWordP(Register dst, const MemOperand& mem,
    765                      Register scratch = no_reg);
    766 
    767   void StoreHalfWord(Register src, const MemOperand& mem,
    768                      Register scratch = r0);
    769   void StoreByte(Register src, const MemOperand& mem, Register scratch = r0);
    770 
    771   void AddSmiLiteral(Register dst, Register src, Smi* smi,
    772                      Register scratch = r0);
    773   void SubSmiLiteral(Register dst, Register src, Smi* smi,
    774                      Register scratch = r0);
    775   void CmpSmiLiteral(Register src1, Smi* smi, Register scratch);
    776   void CmpLogicalSmiLiteral(Register src1, Smi* smi, Register scratch);
    777   void AndSmiLiteral(Register dst, Register src, Smi* smi);
    778 
    779   // Set new rounding mode RN to FPSCR
    780   void SetRoundingMode(FPRoundingMode RN);
    781 
    782   // reset rounding mode to default (kRoundToNearest)
    783   void ResetRoundingMode();
    784 
    785   // These exist to provide portability between 32 and 64bit
    786   void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
    787   void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
    788   void StoreP(const MemOperand& mem, const Operand& opnd,
    789               Register scratch = no_reg);
    790   void LoadMultipleP(Register dst1, Register dst2, const MemOperand& mem);
    791   void StoreMultipleP(Register dst1, Register dst2, const MemOperand& mem);
    792   void LoadMultipleW(Register dst1, Register dst2, const MemOperand& mem);
    793   void StoreMultipleW(Register dst1, Register dst2, const MemOperand& mem);
    794 
    795   void SwapP(Register src, Register dst, Register scratch);
    796   void SwapP(Register src, MemOperand dst, Register scratch);
    797   void SwapP(MemOperand src, MemOperand dst, Register scratch_0,
    798              Register scratch_1);
    799   void SwapFloat32(DoubleRegister src, DoubleRegister dst,
    800                    DoubleRegister scratch);
    801   void SwapFloat32(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
    802   void SwapFloat32(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
    803                    DoubleRegister scratch_1);
    804   void SwapDouble(DoubleRegister src, DoubleRegister dst,
    805                   DoubleRegister scratch);
    806   void SwapDouble(DoubleRegister src, MemOperand dst, DoubleRegister scratch);
    807   void SwapDouble(MemOperand src, MemOperand dst, DoubleRegister scratch_0,
    808                   DoubleRegister scratch_1);
    809 
    810   // Cleanse pointer address on 31bit by zero out top  bit.
    811   // This is a NOP on 64-bit.
    812   void CleanseP(Register src) {
    813 #if (V8_HOST_ARCH_S390 && !(V8_TARGET_ARCH_S390X))
    814     nilh(src, Operand(0x7FFF));
    815 #endif
    816   }
    817 
    818   void PrepareForTailCall(const ParameterCount& callee_args_count,
    819                           Register caller_args_count_reg, Register scratch0,
    820                           Register scratch1);
    821 
    822   // ---------------------------------------------------------------------------
    823   // Runtime calls
    824 
    825   // Call a code stub.
    826   void CallStubDelayed(CodeStub* stub);
    827 
    828   // Call a runtime routine. This expects {centry} to contain a fitting CEntry
    829   // builtin for the target runtime function and uses an indirect call.
    830   void CallRuntimeWithCEntry(Runtime::FunctionId fid, Register centry);
    831 
    832   // Before calling a C-function from generated code, align arguments on stack.
    833   // After aligning the frame, non-register arguments must be stored in
    834   // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
    835   // are word sized. If double arguments are used, this function assumes that
    836   // all double arguments are stored before core registers; otherwise the
    837   // correct alignment of the double values is not guaranteed.
    838   // Some compilers/platforms require the stack to be aligned when calling
    839   // C++ code.
    840   // Needs a scratch register to do some arithmetic. This register will be
    841   // trashed.
    842   void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
    843                             Register scratch);
    844   void PrepareCallCFunction(int num_reg_arguments, Register scratch);
    845 
    846   // There are two ways of passing double arguments on ARM, depending on
    847   // whether soft or hard floating point ABI is used. These functions
    848   // abstract parameter passing for the three different ways we call
    849   // C functions from generated code.
    850   void MovToFloatParameter(DoubleRegister src);
    851   void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
    852   void MovToFloatResult(DoubleRegister src);
    853 
    854   // Calls a C function and cleans up the space for arguments allocated
    855   // by PrepareCallCFunction. The called function is not allowed to trigger a
    856   // garbage collection, since that might move the code and invalidate the
    857   // return address (unless this is somehow accounted for by the called
    858   // function).
    859   void CallCFunction(ExternalReference function, int num_arguments);
    860   void CallCFunction(Register function, int num_arguments);
    861   void CallCFunction(ExternalReference function, int num_reg_arguments,
    862                      int num_double_arguments);
    863   void CallCFunction(Register function, int num_reg_arguments,
    864                      int num_double_arguments);
    865 
    866   void MovFromFloatParameter(DoubleRegister dst);
    867   void MovFromFloatResult(DoubleRegister dst);
    868 
    869   // Emit code for a truncating division by a constant. The dividend register is
    870   // unchanged and ip gets clobbered. Dividend and result must be different.
    871   void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
    872                          DoubleRegister double_input, StubCallMode stub_mode);
    873   void TryInlineTruncateDoubleToI(Register result, DoubleRegister double_input,
    874                                   Label* done);
    875 
    876   // ---------------------------------------------------------------------------
    877   // Debugging
    878 
    879   // Calls Abort(msg) if the condition cond is not satisfied.
    880   // Use --debug_code to enable.
    881   void Assert(Condition cond, AbortReason reason, CRegister cr = cr7);
    882 
    883   // Like Assert(), but always enabled.
    884   void Check(Condition cond, AbortReason reason, CRegister cr = cr7);
    885 
    886   // Print a message to stdout and abort execution.
    887   void Abort(AbortReason reason);
    888 
    889   inline bool AllowThisStubCall(CodeStub* stub);
    890 
    891   // ---------------------------------------------------------------------------
    892   // Bit testing/extraction
    893   //
    894   // Bit numbering is such that the least significant bit is bit 0
    895   // (for consistency between 32/64-bit).
    896 
    897   // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
    898   // and place them into the least significant bits of dst.
    899   inline void ExtractBitRange(Register dst, Register src, int rangeStart,
    900                               int rangeEnd) {
    901     DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
    902 
    903     // Try to use RISBG if possible.
    904     if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT)) {
    905       int shiftAmount = (64 - rangeEnd) % 64;  // Convert to shift left.
    906       int endBit = 63;  // End is always LSB after shifting.
    907       int startBit = 63 - rangeStart + rangeEnd;
    908       RotateInsertSelectBits(dst, src, Operand(startBit), Operand(endBit),
    909             Operand(shiftAmount), true);
    910     } else {
    911       if (rangeEnd > 0)  // Don't need to shift if rangeEnd is zero.
    912         ShiftRightP(dst, src, Operand(rangeEnd));
    913       else if (dst != src)  // If we didn't shift, we might need to copy
    914         LoadRR(dst, src);
    915       int width = rangeStart - rangeEnd + 1;
    916 #if V8_TARGET_ARCH_S390X
    917       uint64_t mask = (static_cast<uint64_t>(1) << width) - 1;
    918       nihf(dst, Operand(mask >> 32));
    919       nilf(dst, Operand(mask & 0xFFFFFFFF));
    920       ltgr(dst, dst);
    921 #else
    922       uint32_t mask = (1 << width) - 1;
    923       AndP(dst, Operand(mask));
    924 #endif
    925     }
    926   }
    927 
    928   inline void ExtractBit(Register dst, Register src, uint32_t bitNumber) {
    929     ExtractBitRange(dst, src, bitNumber, bitNumber);
    930   }
    931 
    932   // Extract consecutive bits (defined by mask) from src and place them
    933   // into the least significant bits of dst.
    934   inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
    935                              RCBit rc = LeaveRC) {
    936     int start = kBitsPerPointer - 1;
    937     int end;
    938     uintptr_t bit = (1L << start);
    939 
    940     while (bit && (mask & bit) == 0) {
    941       start--;
    942       bit >>= 1;
    943     }
    944     end = start;
    945     bit >>= 1;
    946 
    947     while (bit && (mask & bit)) {
    948       end--;
    949       bit >>= 1;
    950     }
    951 
    952     // 1-bits in mask must be contiguous
    953     DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
    954 
    955     ExtractBitRange(dst, src, start, end);
    956   }
    957 
    958   // Test single bit in value.
    959   inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
    960     ExtractBitRange(scratch, value, bitNumber, bitNumber);
    961   }
    962 
    963   // Test consecutive bit range in value.  Range is defined by
    964   // rangeStart - rangeEnd.
    965   inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
    966                            Register scratch = r0) {
    967     ExtractBitRange(scratch, value, rangeStart, rangeEnd);
    968   }
    969 
    970   // Test consecutive bit range in value.  Range is defined by mask.
    971   inline void TestBitMask(Register value, uintptr_t mask,
    972                           Register scratch = r0) {
    973     ExtractBitMask(scratch, value, mask, SetRC);
    974   }
    975   inline void TestIfSmi(Register value) { tmll(value, Operand(1)); }
    976 
    977   inline void TestIfSmi(MemOperand value) {
    978     if (is_uint12(value.offset())) {
    979       tm(value, Operand(1));
    980     } else if (is_int20(value.offset())) {
    981       tmy(value, Operand(1));
    982     } else {
    983       LoadB(r0, value);
    984       tmll(r0, Operand(1));
    985     }
    986   }
    987 
    988   inline void TestIfInt32(Register value) {
    989     // High bits must be identical to fit into an 32-bit integer
    990     cgfr(value, value);
    991   }
    992   void SmiUntag(Register reg, int scale = 0) { SmiUntag(reg, reg, scale); }
    993 
    994   void SmiUntag(Register dst, Register src, int scale = 0) {
    995     if (scale > kSmiShift) {
    996       ShiftLeftP(dst, src, Operand(scale - kSmiShift));
    997     } else if (scale < kSmiShift) {
    998       ShiftRightArithP(dst, src, Operand(kSmiShift - scale));
    999     } else {
   1000       // do nothing
   1001     }
   1002   }
   1003 
   1004   // Activation support.
   1005   void EnterFrame(StackFrame::Type type,
   1006                   bool load_constant_pool_pointer_reg = false);
   1007   // Returns the pc offset at which the frame ends.
   1008   int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
   1009 
   1010   void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
   1011                      Label* condition_met);
   1012 
   1013   void ResetSpeculationPoisonRegister();
   1014   void ComputeCodeStartAddress(Register dst);
   1015 
   1016  private:
   1017   static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
   1018 
   1019   void CallCFunctionHelper(Register function, int num_reg_arguments,
   1020                            int num_double_arguments);
   1021 
   1022   void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
   1023   int CalculateStackPassedWords(int num_reg_arguments,
   1024                                 int num_double_arguments);
   1025 };
   1026 
   1027 // MacroAssembler implements a collection of frequently used macros.
   1028 class MacroAssembler : public TurboAssembler {
   1029  public:
   1030   MacroAssembler(Isolate* isolate, void* buffer, int size,
   1031                  CodeObjectRequired create_code_object)
   1032       : MacroAssembler(isolate, AssemblerOptions::Default(isolate), buffer,
   1033                        size, create_code_object) {}
   1034   MacroAssembler(Isolate* isolate, const AssemblerOptions& options,
   1035                  void* buffer, int size, CodeObjectRequired create_code_object);
   1036 
   1037   // Call a code stub.
   1038   void TailCallStub(CodeStub* stub, Condition cond = al);
   1039 
   1040   void CallStub(CodeStub* stub, Condition cond = al);
   1041   void CallRuntime(const Runtime::Function* f, int num_arguments,
   1042                    SaveFPRegsMode save_doubles = kDontSaveFPRegs);
   1043   void CallRuntimeSaveDoubles(Runtime::FunctionId fid) {
   1044     const Runtime::Function* function = Runtime::FunctionForId(fid);
   1045     CallRuntime(function, function->nargs, kSaveFPRegs);
   1046   }
   1047 
   1048   // Convenience function: Same as above, but takes the fid instead.
   1049   void CallRuntime(Runtime::FunctionId fid,
   1050                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
   1051     const Runtime::Function* function = Runtime::FunctionForId(fid);
   1052     CallRuntime(function, function->nargs, save_doubles);
   1053   }
   1054 
   1055   // Convenience function: Same as above, but takes the fid instead.
   1056   void CallRuntime(Runtime::FunctionId fid, int num_arguments,
   1057                    SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
   1058     CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
   1059   }
   1060 
   1061   // Convenience function: tail call a runtime routine (jump).
   1062   void TailCallRuntime(Runtime::FunctionId fid);
   1063 
   1064   // ---------------------------------------------------------------------------
   1065   // Support functions.
   1066 
   1067   // Compare object type for heap object.  heap_object contains a non-Smi
   1068   // whose object type should be compared with the given type.  This both
   1069   // sets the flags and leaves the object type in the type_reg register.
   1070   // It leaves the map in the map register (unless the type_reg and map register
   1071   // are the same register).  It leaves the heap object in the heap_object
   1072   // register unless the heap_object register is the same register as one of the
   1073   // other registers.
   1074   // Type_reg can be no_reg. In that case ip is used.
   1075   void CompareObjectType(Register heap_object, Register map, Register type_reg,
   1076                          InstanceType type);
   1077 
   1078   // Compare instance type in a map.  map contains a valid map object whose
   1079   // object type should be compared with the given type.  This both
   1080   // sets the flags and leaves the object type in the type_reg register.
   1081   void CompareInstanceType(Register map, Register type_reg, InstanceType type);
   1082 
   1083   // Compare the object in a register to a value from the root list.
   1084   // Uses the ip register as scratch.
   1085   void CompareRoot(Register obj, Heap::RootListIndex index);
   1086   void PushRoot(Heap::RootListIndex index) {
   1087     LoadRoot(r0, index);
   1088     Push(r0);
   1089   }
   1090 
   1091   // Jump to a runtime routine.
   1092   void JumpToExternalReference(const ExternalReference& builtin,
   1093                                bool builtin_exit_frame = false);
   1094 
   1095   // Generates a trampoline to jump to the off-heap instruction stream.
   1096   void JumpToInstructionStream(Address entry);
   1097 
   1098   // Compare the object in a register to a value and jump if they are equal.
   1099   void JumpIfRoot(Register with, Heap::RootListIndex index, Label* if_equal) {
   1100     CompareRoot(with, index);
   1101     beq(if_equal);
   1102   }
   1103 
   1104   // Compare the object in a register to a value and jump if they are not equal.
   1105   void JumpIfNotRoot(Register with, Heap::RootListIndex index,
   1106                      Label* if_not_equal) {
   1107     CompareRoot(with, index);
   1108     bne(if_not_equal);
   1109   }
   1110 
   1111   // Try to convert a double to a signed 32-bit integer.
   1112   // CR_EQ in cr7 is set and result assigned if the conversion is exact.
   1113   void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
   1114                              Register scratch, DoubleRegister double_scratch);
   1115 
   1116   // ---------------------------------------------------------------------------
   1117   // In-place weak references.
   1118   void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
   1119 
   1120   // ---------------------------------------------------------------------------
   1121   // StatsCounter support
   1122 
   1123   void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
   1124                         Register scratch2);
   1125   void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
   1126                         Register scratch2);
   1127   // ---------------------------------------------------------------------------
   1128   // JavaScript invokes
   1129 
   1130   // Set up call kind marking in ecx. The method takes ecx as an
   1131   // explicit first parameter to make the code more readable at the
   1132   // call sites.
   1133   // void SetCallKind(Register dst, CallKind kind);
   1134 
   1135   // Removes current frame and its arguments from the stack preserving
   1136   // the arguments and a return address pushed to the stack for the next call.
   1137   // Both |callee_args_count| and |caller_args_count_reg| do not include
   1138   // receiver. |callee_args_count| is not modified, |caller_args_count_reg|
   1139   // is trashed.
   1140 
   1141   // Invoke the JavaScript function code by either calling or jumping.
   1142   void InvokeFunctionCode(Register function, Register new_target,
   1143                           const ParameterCount& expected,
   1144                           const ParameterCount& actual, InvokeFlag flag);
   1145 
   1146   // On function call, call into the debugger if necessary.
   1147   void CheckDebugHook(Register fun, Register new_target,
   1148                       const ParameterCount& expected,
   1149                       const ParameterCount& actual);
   1150 
   1151   // Invoke the JavaScript function in the given register. Changes the
   1152   // current context to the context in the function before invoking.
   1153   void InvokeFunction(Register function, Register new_target,
   1154                       const ParameterCount& actual, InvokeFlag flag);
   1155 
   1156   void InvokeFunction(Register function, const ParameterCount& expected,
   1157                       const ParameterCount& actual, InvokeFlag flag);
   1158 
   1159   // Frame restart support
   1160   void MaybeDropFrames();
   1161 
   1162   // Exception handling
   1163 
   1164   // Push a new stack handler and link into stack handler chain.
   1165   void PushStackHandler();
   1166 
   1167   // Unlink the stack handler on top of the stack from the stack handler chain.
   1168   // Must preserve the result register.
   1169   void PopStackHandler();
   1170 
   1171   // Enter exit frame.
   1172   // stack_space - extra stack space, used for parameters before call to C.
   1173   // At least one slot (for the return address) should be provided.
   1174   void EnterExitFrame(bool save_doubles, int stack_space = 1,
   1175                       StackFrame::Type frame_type = StackFrame::EXIT);
   1176 
   1177   // Leave the current exit frame. Expects the return value in r0.
   1178   // Expect the number of values, pushed prior to the exit frame, to
   1179   // remove in a register (or no_reg, if there is nothing to remove).
   1180   void LeaveExitFrame(bool save_doubles, Register argument_count,
   1181                       bool argument_count_is_length = false);
   1182 
   1183   // Load the global proxy from the current context.
   1184   void LoadGlobalProxy(Register dst) {
   1185     LoadNativeContextSlot(Context::GLOBAL_PROXY_INDEX, dst);
   1186   }
   1187 
   1188   void LoadNativeContextSlot(int index, Register dst);
   1189 
   1190   // ---------------------------------------------------------------------------
   1191   // Smi utilities
   1192 
   1193   // Shift left by kSmiShift
   1194   void SmiTag(Register reg) { SmiTag(reg, reg); }
   1195   void SmiTag(Register dst, Register src) {
   1196     ShiftLeftP(dst, src, Operand(kSmiShift));
   1197   }
   1198 
   1199   void SmiToPtrArrayOffset(Register dst, Register src) {
   1200 #if V8_TARGET_ARCH_S390X
   1201     STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
   1202     ShiftRightArithP(dst, src, Operand(kSmiShift - kPointerSizeLog2));
   1203 #else
   1204     STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
   1205     ShiftLeftP(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
   1206 #endif
   1207   }
   1208 
   1209   // Untag the source value into destination and jump if source is a smi.
   1210   // Souce and destination can be the same register.
   1211   void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
   1212 
   1213   // Jump if either of the registers contain a non-smi.
   1214   inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
   1215     TestIfSmi(value);
   1216     bne(not_smi_label /*, cr0*/);
   1217   }
   1218   // Jump if either of the registers contain a smi.
   1219   void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
   1220 
   1221   // Abort execution if argument is a smi, enabled via --debug-code.
   1222   void AssertNotSmi(Register object);
   1223   void AssertSmi(Register object);
   1224 
   1225 #if V8_TARGET_ARCH_S390X
   1226   // Ensure it is permissible to read/write int value directly from
   1227   // upper half of the smi.
   1228   STATIC_ASSERT(kSmiTag == 0);
   1229   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
   1230 #endif
   1231 #if V8_TARGET_LITTLE_ENDIAN
   1232 #define SmiWordOffset(offset) (offset + kPointerSize / 2)
   1233 #else
   1234 #define SmiWordOffset(offset) offset
   1235 #endif
   1236 
   1237   // Abort execution if argument is not a Constructor, enabled via --debug-code.
   1238   void AssertConstructor(Register object, Register scratch);
   1239 
   1240   // Abort execution if argument is not a JSFunction, enabled via --debug-code.
   1241   void AssertFunction(Register object);
   1242 
   1243   // Abort execution if argument is not a JSBoundFunction,
   1244   // enabled via --debug-code.
   1245   void AssertBoundFunction(Register object);
   1246 
   1247   // Abort execution if argument is not a JSGeneratorObject (or subclass),
   1248   // enabled via --debug-code.
   1249   void AssertGeneratorObject(Register object);
   1250 
   1251   // Abort execution if argument is not undefined or an AllocationSite, enabled
   1252   // via --debug-code.
   1253   void AssertUndefinedOrAllocationSite(Register object, Register scratch);
   1254 
   1255   template <typename Field>
   1256   void DecodeField(Register dst, Register src) {
   1257     ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
   1258   }
   1259 
   1260   template <typename Field>
   1261   void DecodeField(Register reg) {
   1262     DecodeField<Field>(reg, reg);
   1263   }
   1264 
   1265   // ---------------------------------------------------------------------------
   1266   // GC Support
   1267 
   1268   void IncrementalMarkingRecordWriteHelper(Register object, Register value,
   1269                                            Register address);
   1270 
   1271   void CallJSEntry(Register target);
   1272   static int CallSizeNotPredictableCodeSize(Address target,
   1273                                             RelocInfo::Mode rmode,
   1274                                             Condition cond = al);
   1275   void JumpToJSEntry(Register target);
   1276 
   1277   // Notify the garbage collector that we wrote a pointer into an object.
   1278   // |object| is the object being stored into, |value| is the object being
   1279   // stored.  value and scratch registers are clobbered by the operation.
   1280   // The offset is the offset from the start of the object, not the offset from
   1281   // the tagged HeapObject pointer.  For use with FieldMemOperand(reg, off).
   1282   void RecordWriteField(
   1283       Register object, int offset, Register value, Register scratch,
   1284       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
   1285       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
   1286       SmiCheck smi_check = INLINE_SMI_CHECK);
   1287 
   1288   // For a given |object| notify the garbage collector that the slot |address|
   1289   // has been written.  |value| is the object being stored. The value and
   1290   // address registers are clobbered by the operation.
   1291   void RecordWrite(
   1292       Register object, Register address, Register value,
   1293       LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
   1294       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
   1295       SmiCheck smi_check = INLINE_SMI_CHECK);
   1296 
   1297   // Push and pop the registers that can hold pointers, as defined by the
   1298   // RegList constant kSafepointSavedRegisters.
   1299   void PushSafepointRegisters();
   1300   void PopSafepointRegisters();
   1301 
   1302   void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
   1303                           Register scratch = no_reg);
   1304   void StoreRepresentation(Register src, const MemOperand& mem,
   1305                            Representation r, Register scratch = no_reg);
   1306 
   1307  private:
   1308   static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
   1309   // Helper functions for generating invokes.
   1310   void InvokePrologue(const ParameterCount& expected,
   1311                       const ParameterCount& actual, Label* done,
   1312                       bool* definitely_mismatches, InvokeFlag flag);
   1313 
   1314   // Compute memory operands for safepoint stack slots.
   1315   static int SafepointRegisterStackIndex(int reg_code);
   1316 
   1317   // Needs access to SafepointRegisterStackIndex for compiled frame
   1318   // traversal.
   1319   friend class StandardFrame;
   1320 };
   1321 
   1322 // -----------------------------------------------------------------------------
   1323 // Static helper functions.
   1324 
   1325 inline MemOperand ContextMemOperand(Register context, int index = 0) {
   1326   return MemOperand(context, Context::SlotOffset(index));
   1327 }
   1328 
   1329 inline MemOperand NativeContextMemOperand() {
   1330   return ContextMemOperand(cp, Context::NATIVE_CONTEXT_INDEX);
   1331 }
   1332 
   1333 #define ACCESS_MASM(masm) masm->
   1334 
   1335 }  // namespace internal
   1336 }  // namespace v8
   1337 
   1338 #endif  // V8_S390_MACRO_ASSEMBLER_S390_H_
   1339