Home | History | Annotate | Download | only in optimizing
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_COMPILER_OPTIMIZING_COMMON_ARM64_H_
     18 #define ART_COMPILER_OPTIMIZING_COMMON_ARM64_H_
     19 
     20 #include "code_generator.h"
     21 #include "instruction_simplifier_shared.h"
     22 #include "locations.h"
     23 #include "nodes.h"
     24 #include "utils/arm64/assembler_arm64.h"
     25 
     26 // TODO(VIXL): Make VIXL compile with -Wshadow.
     27 #pragma GCC diagnostic push
     28 #pragma GCC diagnostic ignored "-Wshadow"
     29 #include "aarch64/disasm-aarch64.h"
     30 #include "aarch64/macro-assembler-aarch64.h"
     31 #include "aarch64/simulator-aarch64.h"
     32 #pragma GCC diagnostic pop
     33 
     34 namespace art {
     35 
     36 using helpers::CanFitInShifterOperand;
     37 using helpers::HasShifterOperand;
     38 
     39 namespace arm64 {
     40 namespace helpers {
     41 
     42 // Convenience helpers to ease conversion to and from VIXL operands.
     43 static_assert((SP == 31) && (WSP == 31) && (XZR == 32) && (WZR == 32),
     44               "Unexpected values for register codes.");
     45 
     46 inline int VIXLRegCodeFromART(int code) {
     47   if (code == SP) {
     48     return vixl::aarch64::kSPRegInternalCode;
     49   }
     50   if (code == XZR) {
     51     return vixl::aarch64::kZeroRegCode;
     52   }
     53   return code;
     54 }
     55 
     56 inline int ARTRegCodeFromVIXL(int code) {
     57   if (code == vixl::aarch64::kSPRegInternalCode) {
     58     return SP;
     59   }
     60   if (code == vixl::aarch64::kZeroRegCode) {
     61     return XZR;
     62   }
     63   return code;
     64 }
     65 
     66 inline vixl::aarch64::Register XRegisterFrom(Location location) {
     67   DCHECK(location.IsRegister()) << location;
     68   return vixl::aarch64::Register::GetXRegFromCode(VIXLRegCodeFromART(location.reg()));
     69 }
     70 
     71 inline vixl::aarch64::Register WRegisterFrom(Location location) {
     72   DCHECK(location.IsRegister()) << location;
     73   return vixl::aarch64::Register::GetWRegFromCode(VIXLRegCodeFromART(location.reg()));
     74 }
     75 
     76 inline vixl::aarch64::Register RegisterFrom(Location location, DataType::Type type) {
     77   DCHECK(type != DataType::Type::kVoid && !DataType::IsFloatingPointType(type)) << type;
     78   return type == DataType::Type::kInt64 ? XRegisterFrom(location) : WRegisterFrom(location);
     79 }
     80 
     81 inline vixl::aarch64::Register OutputRegister(HInstruction* instr) {
     82   return RegisterFrom(instr->GetLocations()->Out(), instr->GetType());
     83 }
     84 
     85 inline vixl::aarch64::Register InputRegisterAt(HInstruction* instr, int input_index) {
     86   return RegisterFrom(instr->GetLocations()->InAt(input_index),
     87                       instr->InputAt(input_index)->GetType());
     88 }
     89 
     90 inline vixl::aarch64::FPRegister DRegisterFrom(Location location) {
     91   DCHECK(location.IsFpuRegister()) << location;
     92   return vixl::aarch64::FPRegister::GetDRegFromCode(location.reg());
     93 }
     94 
     95 inline vixl::aarch64::FPRegister QRegisterFrom(Location location) {
     96   DCHECK(location.IsFpuRegister()) << location;
     97   return vixl::aarch64::FPRegister::GetQRegFromCode(location.reg());
     98 }
     99 
    100 inline vixl::aarch64::FPRegister VRegisterFrom(Location location) {
    101   DCHECK(location.IsFpuRegister()) << location;
    102   return vixl::aarch64::FPRegister::GetVRegFromCode(location.reg());
    103 }
    104 
    105 inline vixl::aarch64::FPRegister SRegisterFrom(Location location) {
    106   DCHECK(location.IsFpuRegister()) << location;
    107   return vixl::aarch64::FPRegister::GetSRegFromCode(location.reg());
    108 }
    109 
    110 inline vixl::aarch64::FPRegister FPRegisterFrom(Location location, DataType::Type type) {
    111   DCHECK(DataType::IsFloatingPointType(type)) << type;
    112   return type == DataType::Type::kFloat64 ? DRegisterFrom(location) : SRegisterFrom(location);
    113 }
    114 
    115 inline vixl::aarch64::FPRegister OutputFPRegister(HInstruction* instr) {
    116   return FPRegisterFrom(instr->GetLocations()->Out(), instr->GetType());
    117 }
    118 
    119 inline vixl::aarch64::FPRegister InputFPRegisterAt(HInstruction* instr, int input_index) {
    120   return FPRegisterFrom(instr->GetLocations()->InAt(input_index),
    121                         instr->InputAt(input_index)->GetType());
    122 }
    123 
    124 inline vixl::aarch64::CPURegister CPURegisterFrom(Location location, DataType::Type type) {
    125   return DataType::IsFloatingPointType(type)
    126       ? vixl::aarch64::CPURegister(FPRegisterFrom(location, type))
    127       : vixl::aarch64::CPURegister(RegisterFrom(location, type));
    128 }
    129 
    130 inline vixl::aarch64::CPURegister OutputCPURegister(HInstruction* instr) {
    131   return DataType::IsFloatingPointType(instr->GetType())
    132       ? static_cast<vixl::aarch64::CPURegister>(OutputFPRegister(instr))
    133       : static_cast<vixl::aarch64::CPURegister>(OutputRegister(instr));
    134 }
    135 
    136 inline vixl::aarch64::CPURegister InputCPURegisterAt(HInstruction* instr, int index) {
    137   return DataType::IsFloatingPointType(instr->InputAt(index)->GetType())
    138       ? static_cast<vixl::aarch64::CPURegister>(InputFPRegisterAt(instr, index))
    139       : static_cast<vixl::aarch64::CPURegister>(InputRegisterAt(instr, index));
    140 }
    141 
    142 inline vixl::aarch64::CPURegister InputCPURegisterOrZeroRegAt(HInstruction* instr,
    143                                                                      int index) {
    144   HInstruction* input = instr->InputAt(index);
    145   DataType::Type input_type = input->GetType();
    146   if (input->IsConstant() && input->AsConstant()->IsZeroBitPattern()) {
    147     return (DataType::Size(input_type) >= vixl::aarch64::kXRegSizeInBytes)
    148         ? vixl::aarch64::Register(vixl::aarch64::xzr)
    149         : vixl::aarch64::Register(vixl::aarch64::wzr);
    150   }
    151   return InputCPURegisterAt(instr, index);
    152 }
    153 
    154 inline int64_t Int64FromLocation(Location location) {
    155   return Int64FromConstant(location.GetConstant());
    156 }
    157 
    158 inline vixl::aarch64::Operand OperandFrom(Location location, DataType::Type type) {
    159   if (location.IsRegister()) {
    160     return vixl::aarch64::Operand(RegisterFrom(location, type));
    161   } else {
    162     return vixl::aarch64::Operand(Int64FromLocation(location));
    163   }
    164 }
    165 
    166 inline vixl::aarch64::Operand InputOperandAt(HInstruction* instr, int input_index) {
    167   return OperandFrom(instr->GetLocations()->InAt(input_index),
    168                      instr->InputAt(input_index)->GetType());
    169 }
    170 
    171 inline vixl::aarch64::MemOperand StackOperandFrom(Location location) {
    172   return vixl::aarch64::MemOperand(vixl::aarch64::sp, location.GetStackIndex());
    173 }
    174 
    175 inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
    176                                                     size_t offset = 0) {
    177   // A heap reference must be 32bit, so fit in a W register.
    178   DCHECK(base.IsW());
    179   return vixl::aarch64::MemOperand(base.X(), offset);
    180 }
    181 
    182 inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
    183                                                     const vixl::aarch64::Register& regoffset,
    184                                                     vixl::aarch64::Shift shift = vixl::aarch64::LSL,
    185                                                     unsigned shift_amount = 0) {
    186   // A heap reference must be 32bit, so fit in a W register.
    187   DCHECK(base.IsW());
    188   return vixl::aarch64::MemOperand(base.X(), regoffset, shift, shift_amount);
    189 }
    190 
    191 inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base,
    192                                                     Offset offset) {
    193   return HeapOperand(base, offset.SizeValue());
    194 }
    195 
    196 inline vixl::aarch64::MemOperand HeapOperandFrom(Location location, Offset offset) {
    197   return HeapOperand(RegisterFrom(location, DataType::Type::kReference), offset);
    198 }
    199 
    200 inline Location LocationFrom(const vixl::aarch64::Register& reg) {
    201   return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.GetCode()));
    202 }
    203 
    204 inline Location LocationFrom(const vixl::aarch64::FPRegister& fpreg) {
    205   return Location::FpuRegisterLocation(fpreg.GetCode());
    206 }
    207 
    208 inline vixl::aarch64::Operand OperandFromMemOperand(
    209     const vixl::aarch64::MemOperand& mem_op) {
    210   if (mem_op.IsImmediateOffset()) {
    211     return vixl::aarch64::Operand(mem_op.GetOffset());
    212   } else {
    213     DCHECK(mem_op.IsRegisterOffset());
    214     if (mem_op.GetExtend() != vixl::aarch64::NO_EXTEND) {
    215       return vixl::aarch64::Operand(mem_op.GetRegisterOffset(),
    216                                     mem_op.GetExtend(),
    217                                     mem_op.GetShiftAmount());
    218     } else if (mem_op.GetShift() != vixl::aarch64::NO_SHIFT) {
    219       return vixl::aarch64::Operand(mem_op.GetRegisterOffset(),
    220                                     mem_op.GetShift(),
    221                                     mem_op.GetShiftAmount());
    222     } else {
    223       LOG(FATAL) << "Should not reach here";
    224       UNREACHABLE();
    225     }
    226   }
    227 }
    228 
    229 inline bool AddSubCanEncodeAsImmediate(int64_t value) {
    230   // If `value` does not fit but `-value` does, VIXL will automatically use
    231   // the 'opposite' instruction.
    232   return vixl::aarch64::Assembler::IsImmAddSub(value)
    233       || vixl::aarch64::Assembler::IsImmAddSub(-value);
    234 }
    235 
    236 inline bool Arm64CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* instr) {
    237   int64_t value = CodeGenerator::GetInt64ValueOf(constant);
    238 
    239   // TODO: Improve this when IsSIMDConstantEncodable method is implemented in VIXL.
    240   if (instr->IsVecReplicateScalar()) {
    241     if (constant->IsLongConstant()) {
    242       return false;
    243     } else if (constant->IsFloatConstant()) {
    244       return vixl::aarch64::Assembler::IsImmFP32(constant->AsFloatConstant()->GetValue());
    245     } else if (constant->IsDoubleConstant()) {
    246       return vixl::aarch64::Assembler::IsImmFP64(constant->AsDoubleConstant()->GetValue());
    247     }
    248     return IsUint<8>(value);
    249   }
    250 
    251   // Code generation for Min/Max:
    252   //    Cmp left_op, right_op
    253   //    Csel dst, left_op, right_op, cond
    254   if (instr->IsMin() || instr->IsMax()) {
    255     if (constant->GetUses().HasExactlyOneElement()) {
    256       // If value can be encoded as immediate for the Cmp, then let VIXL handle
    257       // the constant generation for the Csel.
    258       return AddSubCanEncodeAsImmediate(value);
    259     }
    260     // These values are encodable as immediates for Cmp and VIXL will use csinc and csinv
    261     // with the zr register as right_op, hence no constant generation is required.
    262     return constant->IsZeroBitPattern() || constant->IsOne() || constant->IsMinusOne();
    263   }
    264 
    265   // For single uses we let VIXL handle the constant generation since it will
    266   // use registers that are not managed by the register allocator (wip0, wip1).
    267   if (constant->GetUses().HasExactlyOneElement()) {
    268     return true;
    269   }
    270 
    271   // Our code generator ensures shift distances are within an encodable range.
    272   if (instr->IsRor()) {
    273     return true;
    274   }
    275 
    276   if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
    277     // Uses logical operations.
    278     return vixl::aarch64::Assembler::IsImmLogical(value, vixl::aarch64::kXRegSize);
    279   } else if (instr->IsNeg()) {
    280     // Uses mov -immediate.
    281     return vixl::aarch64::Assembler::IsImmMovn(value, vixl::aarch64::kXRegSize);
    282   } else {
    283     DCHECK(instr->IsAdd() ||
    284            instr->IsIntermediateAddress() ||
    285            instr->IsBoundsCheck() ||
    286            instr->IsCompare() ||
    287            instr->IsCondition() ||
    288            instr->IsSub())
    289         << instr->DebugName();
    290     // Uses aliases of ADD/SUB instructions.
    291     return AddSubCanEncodeAsImmediate(value);
    292   }
    293 }
    294 
    295 inline Location ARM64EncodableConstantOrRegister(HInstruction* constant,
    296                                                         HInstruction* instr) {
    297   if (constant->IsConstant()
    298       && Arm64CanEncodeConstantAsImmediate(constant->AsConstant(), instr)) {
    299     return Location::ConstantLocation(constant->AsConstant());
    300   }
    301 
    302   return Location::RequiresRegister();
    303 }
    304 
    305 // Check if registers in art register set have the same register code in vixl. If the register
    306 // codes are same, we can initialize vixl register list simply by the register masks. Currently,
    307 // only SP/WSP and ZXR/WZR codes are different between art and vixl.
    308 // Note: This function is only used for debug checks.
    309 inline bool ArtVixlRegCodeCoherentForRegSet(uint32_t art_core_registers,
    310                                             size_t num_core,
    311                                             uint32_t art_fpu_registers,
    312                                             size_t num_fpu) {
    313   // The register masks won't work if the number of register is larger than 32.
    314   DCHECK_GE(sizeof(art_core_registers) * 8, num_core);
    315   DCHECK_GE(sizeof(art_fpu_registers) * 8, num_fpu);
    316   for (size_t art_reg_code = 0;  art_reg_code < num_core; ++art_reg_code) {
    317     if (RegisterSet::Contains(art_core_registers, art_reg_code)) {
    318       if (art_reg_code != static_cast<size_t>(VIXLRegCodeFromART(art_reg_code))) {
    319         return false;
    320       }
    321     }
    322   }
    323   // There is no register code translation for float registers.
    324   return true;
    325 }
    326 
    327 inline vixl::aarch64::Shift ShiftFromOpKind(HDataProcWithShifterOp::OpKind op_kind) {
    328   switch (op_kind) {
    329     case HDataProcWithShifterOp::kASR: return vixl::aarch64::ASR;
    330     case HDataProcWithShifterOp::kLSL: return vixl::aarch64::LSL;
    331     case HDataProcWithShifterOp::kLSR: return vixl::aarch64::LSR;
    332     default:
    333       LOG(FATAL) << "Unexpected op kind " << op_kind;
    334       UNREACHABLE();
    335       return vixl::aarch64::NO_SHIFT;
    336   }
    337 }
    338 
    339 inline vixl::aarch64::Extend ExtendFromOpKind(HDataProcWithShifterOp::OpKind op_kind) {
    340   switch (op_kind) {
    341     case HDataProcWithShifterOp::kUXTB: return vixl::aarch64::UXTB;
    342     case HDataProcWithShifterOp::kUXTH: return vixl::aarch64::UXTH;
    343     case HDataProcWithShifterOp::kUXTW: return vixl::aarch64::UXTW;
    344     case HDataProcWithShifterOp::kSXTB: return vixl::aarch64::SXTB;
    345     case HDataProcWithShifterOp::kSXTH: return vixl::aarch64::SXTH;
    346     case HDataProcWithShifterOp::kSXTW: return vixl::aarch64::SXTW;
    347     default:
    348       LOG(FATAL) << "Unexpected op kind " << op_kind;
    349       UNREACHABLE();
    350       return vixl::aarch64::NO_EXTEND;
    351   }
    352 }
    353 
    354 inline bool ShifterOperandSupportsExtension(HInstruction* instruction) {
    355   DCHECK(HasShifterOperand(instruction, InstructionSet::kArm64));
    356   // Although the `neg` instruction is an alias of the `sub` instruction, `HNeg`
    357   // does *not* support extension. This is because the `extended register` form
    358   // of the `sub` instruction interprets the left register with code 31 as the
    359   // stack pointer and not the zero register. (So does the `immediate` form.) In
    360   // the other form `shifted register, the register with code 31 is interpreted
    361   // as the zero register.
    362   return instruction->IsAdd() || instruction->IsSub();
    363 }
    364 
    365 inline bool IsConstantZeroBitPattern(const HInstruction* instruction) {
    366   return instruction->IsConstant() && instruction->AsConstant()->IsZeroBitPattern();
    367 }
    368 
    369 }  // namespace helpers
    370 }  // namespace arm64
    371 }  // namespace art
    372 
    373 #endif  // ART_COMPILER_OPTIMIZING_COMMON_ARM64_H_
    374