Home | History | Annotate | Download | only in optimizing
      1 /*
      2  * Copyright (C) 2015 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_COMPILER_OPTIMIZING_COMMON_ARM64_H_
     18 #define ART_COMPILER_OPTIMIZING_COMMON_ARM64_H_
     19 
     20 #include "code_generator.h"
     21 #include "locations.h"
     22 #include "nodes.h"
     23 #include "utils/arm64/assembler_arm64.h"
     24 #include "vixl/a64/disasm-a64.h"
     25 #include "vixl/a64/macro-assembler-a64.h"
     26 
     27 namespace art {
     28 namespace arm64 {
     29 namespace helpers {
     30 
     31 // Convenience helpers to ease conversion to and from VIXL operands.
     32 static_assert((SP == 31) && (WSP == 31) && (XZR == 32) && (WZR == 32),
     33               "Unexpected values for register codes.");
     34 
     35 static inline int VIXLRegCodeFromART(int code) {
     36   if (code == SP) {
     37     return vixl::kSPRegInternalCode;
     38   }
     39   if (code == XZR) {
     40     return vixl::kZeroRegCode;
     41   }
     42   return code;
     43 }
     44 
     45 static inline int ARTRegCodeFromVIXL(int code) {
     46   if (code == vixl::kSPRegInternalCode) {
     47     return SP;
     48   }
     49   if (code == vixl::kZeroRegCode) {
     50     return XZR;
     51   }
     52   return code;
     53 }
     54 
     55 static inline vixl::Register XRegisterFrom(Location location) {
     56   DCHECK(location.IsRegister()) << location;
     57   return vixl::Register::XRegFromCode(VIXLRegCodeFromART(location.reg()));
     58 }
     59 
     60 static inline vixl::Register WRegisterFrom(Location location) {
     61   DCHECK(location.IsRegister()) << location;
     62   return vixl::Register::WRegFromCode(VIXLRegCodeFromART(location.reg()));
     63 }
     64 
     65 static inline vixl::Register RegisterFrom(Location location, Primitive::Type type) {
     66   DCHECK(type != Primitive::kPrimVoid && !Primitive::IsFloatingPointType(type)) << type;
     67   return type == Primitive::kPrimLong ? XRegisterFrom(location) : WRegisterFrom(location);
     68 }
     69 
     70 static inline vixl::Register OutputRegister(HInstruction* instr) {
     71   return RegisterFrom(instr->GetLocations()->Out(), instr->GetType());
     72 }
     73 
     74 static inline vixl::Register InputRegisterAt(HInstruction* instr, int input_index) {
     75   return RegisterFrom(instr->GetLocations()->InAt(input_index),
     76                       instr->InputAt(input_index)->GetType());
     77 }
     78 
     79 static inline vixl::FPRegister DRegisterFrom(Location location) {
     80   DCHECK(location.IsFpuRegister()) << location;
     81   return vixl::FPRegister::DRegFromCode(location.reg());
     82 }
     83 
     84 static inline vixl::FPRegister SRegisterFrom(Location location) {
     85   DCHECK(location.IsFpuRegister()) << location;
     86   return vixl::FPRegister::SRegFromCode(location.reg());
     87 }
     88 
     89 static inline vixl::FPRegister FPRegisterFrom(Location location, Primitive::Type type) {
     90   DCHECK(Primitive::IsFloatingPointType(type)) << type;
     91   return type == Primitive::kPrimDouble ? DRegisterFrom(location) : SRegisterFrom(location);
     92 }
     93 
     94 static inline vixl::FPRegister OutputFPRegister(HInstruction* instr) {
     95   return FPRegisterFrom(instr->GetLocations()->Out(), instr->GetType());
     96 }
     97 
     98 static inline vixl::FPRegister InputFPRegisterAt(HInstruction* instr, int input_index) {
     99   return FPRegisterFrom(instr->GetLocations()->InAt(input_index),
    100                         instr->InputAt(input_index)->GetType());
    101 }
    102 
    103 static inline vixl::CPURegister CPURegisterFrom(Location location, Primitive::Type type) {
    104   return Primitive::IsFloatingPointType(type) ? vixl::CPURegister(FPRegisterFrom(location, type))
    105                                               : vixl::CPURegister(RegisterFrom(location, type));
    106 }
    107 
    108 static inline vixl::CPURegister OutputCPURegister(HInstruction* instr) {
    109   return Primitive::IsFloatingPointType(instr->GetType())
    110       ? static_cast<vixl::CPURegister>(OutputFPRegister(instr))
    111       : static_cast<vixl::CPURegister>(OutputRegister(instr));
    112 }
    113 
    114 static inline vixl::CPURegister InputCPURegisterAt(HInstruction* instr, int index) {
    115   return Primitive::IsFloatingPointType(instr->InputAt(index)->GetType())
    116       ? static_cast<vixl::CPURegister>(InputFPRegisterAt(instr, index))
    117       : static_cast<vixl::CPURegister>(InputRegisterAt(instr, index));
    118 }
    119 
    120 static inline int64_t Int64ConstantFrom(Location location) {
    121   HConstant* instr = location.GetConstant();
    122   if (instr->IsIntConstant()) {
    123     return instr->AsIntConstant()->GetValue();
    124   } else if (instr->IsNullConstant()) {
    125     return 0;
    126   } else {
    127     DCHECK(instr->IsLongConstant()) << instr->DebugName();
    128     return instr->AsLongConstant()->GetValue();
    129   }
    130 }
    131 
    132 static inline vixl::Operand OperandFrom(Location location, Primitive::Type type) {
    133   if (location.IsRegister()) {
    134     return vixl::Operand(RegisterFrom(location, type));
    135   } else {
    136     return vixl::Operand(Int64ConstantFrom(location));
    137   }
    138 }
    139 
    140 static inline vixl::Operand InputOperandAt(HInstruction* instr, int input_index) {
    141   return OperandFrom(instr->GetLocations()->InAt(input_index),
    142                      instr->InputAt(input_index)->GetType());
    143 }
    144 
    145 static inline vixl::MemOperand StackOperandFrom(Location location) {
    146   return vixl::MemOperand(vixl::sp, location.GetStackIndex());
    147 }
    148 
    149 static inline vixl::MemOperand HeapOperand(const vixl::Register& base, size_t offset = 0) {
    150   // A heap reference must be 32bit, so fit in a W register.
    151   DCHECK(base.IsW());
    152   return vixl::MemOperand(base.X(), offset);
    153 }
    154 
    155 static inline vixl::MemOperand HeapOperand(const vixl::Register& base,
    156                                            const vixl::Register& regoffset,
    157                                            vixl::Shift shift = vixl::LSL,
    158                                            unsigned shift_amount = 0) {
    159   // A heap reference must be 32bit, so fit in a W register.
    160   DCHECK(base.IsW());
    161   return vixl::MemOperand(base.X(), regoffset, shift, shift_amount);
    162 }
    163 
    164 static inline vixl::MemOperand HeapOperand(const vixl::Register& base, Offset offset) {
    165   return HeapOperand(base, offset.SizeValue());
    166 }
    167 
    168 static inline vixl::MemOperand HeapOperandFrom(Location location, Offset offset) {
    169   return HeapOperand(RegisterFrom(location, Primitive::kPrimNot), offset);
    170 }
    171 
    172 static inline Location LocationFrom(const vixl::Register& reg) {
    173   return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.code()));
    174 }
    175 
    176 static inline Location LocationFrom(const vixl::FPRegister& fpreg) {
    177   return Location::FpuRegisterLocation(fpreg.code());
    178 }
    179 
    180 static inline vixl::Operand OperandFromMemOperand(const vixl::MemOperand& mem_op) {
    181   if (mem_op.IsImmediateOffset()) {
    182     return vixl::Operand(mem_op.offset());
    183   } else {
    184     DCHECK(mem_op.IsRegisterOffset());
    185     if (mem_op.extend() != vixl::NO_EXTEND) {
    186       return vixl::Operand(mem_op.regoffset(), mem_op.extend(), mem_op.shift_amount());
    187     } else if (mem_op.shift() != vixl::NO_SHIFT) {
    188       return vixl::Operand(mem_op.regoffset(), mem_op.shift(), mem_op.shift_amount());
    189     } else {
    190       LOG(FATAL) << "Should not reach here";
    191       UNREACHABLE();
    192     }
    193   }
    194 }
    195 
    196 static bool CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* instr) {
    197   DCHECK(constant->IsIntConstant() || constant->IsLongConstant() || constant->IsNullConstant())
    198       << constant->DebugName();
    199 
    200   // For single uses we let VIXL handle the constant generation since it will
    201   // use registers that are not managed by the register allocator (wip0, wip1).
    202   if (constant->GetUses().HasExactlyOneElement()) {
    203     return true;
    204   }
    205 
    206   // Our code generator ensures shift distances are within an encodable range.
    207   if (instr->IsRor()) {
    208     return true;
    209   }
    210 
    211   int64_t value = CodeGenerator::GetInt64ValueOf(constant);
    212 
    213   if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) {
    214     // Uses logical operations.
    215     return vixl::Assembler::IsImmLogical(value, vixl::kXRegSize);
    216   } else if (instr->IsNeg()) {
    217     // Uses mov -immediate.
    218     return vixl::Assembler::IsImmMovn(value, vixl::kXRegSize);
    219   } else {
    220     DCHECK(instr->IsAdd() ||
    221            instr->IsArm64IntermediateAddress() ||
    222            instr->IsBoundsCheck() ||
    223            instr->IsCompare() ||
    224            instr->IsCondition() ||
    225            instr->IsSub())
    226         << instr->DebugName();
    227     // Uses aliases of ADD/SUB instructions.
    228     // If `value` does not fit but `-value` does, VIXL will automatically use
    229     // the 'opposite' instruction.
    230     return vixl::Assembler::IsImmAddSub(value) || vixl::Assembler::IsImmAddSub(-value);
    231   }
    232 }
    233 
    234 static inline Location ARM64EncodableConstantOrRegister(HInstruction* constant,
    235                                                         HInstruction* instr) {
    236   if (constant->IsConstant()
    237       && CanEncodeConstantAsImmediate(constant->AsConstant(), instr)) {
    238     return Location::ConstantLocation(constant->AsConstant());
    239   }
    240 
    241   return Location::RequiresRegister();
    242 }
    243 
    244 // Check if registers in art register set have the same register code in vixl. If the register
    245 // codes are same, we can initialize vixl register list simply by the register masks. Currently,
    246 // only SP/WSP and ZXR/WZR codes are different between art and vixl.
    247 // Note: This function is only used for debug checks.
    248 static inline bool ArtVixlRegCodeCoherentForRegSet(uint32_t art_core_registers,
    249                                                    size_t num_core,
    250                                                    uint32_t art_fpu_registers,
    251                                                    size_t num_fpu) {
    252   // The register masks won't work if the number of register is larger than 32.
    253   DCHECK_GE(sizeof(art_core_registers) * 8, num_core);
    254   DCHECK_GE(sizeof(art_fpu_registers) * 8, num_fpu);
    255   for (size_t art_reg_code = 0;  art_reg_code < num_core; ++art_reg_code) {
    256     if (RegisterSet::Contains(art_core_registers, art_reg_code)) {
    257       if (art_reg_code != static_cast<size_t>(VIXLRegCodeFromART(art_reg_code))) {
    258         return false;
    259       }
    260     }
    261   }
    262   // There is no register code translation for float registers.
    263   return true;
    264 }
    265 
    266 static inline vixl::Shift ShiftFromOpKind(HArm64DataProcWithShifterOp::OpKind op_kind) {
    267   switch (op_kind) {
    268     case HArm64DataProcWithShifterOp::kASR: return vixl::ASR;
    269     case HArm64DataProcWithShifterOp::kLSL: return vixl::LSL;
    270     case HArm64DataProcWithShifterOp::kLSR: return vixl::LSR;
    271     default:
    272       LOG(FATAL) << "Unexpected op kind " << op_kind;
    273       UNREACHABLE();
    274       return vixl::NO_SHIFT;
    275   }
    276 }
    277 
    278 static inline vixl::Extend ExtendFromOpKind(HArm64DataProcWithShifterOp::OpKind op_kind) {
    279   switch (op_kind) {
    280     case HArm64DataProcWithShifterOp::kUXTB: return vixl::UXTB;
    281     case HArm64DataProcWithShifterOp::kUXTH: return vixl::UXTH;
    282     case HArm64DataProcWithShifterOp::kUXTW: return vixl::UXTW;
    283     case HArm64DataProcWithShifterOp::kSXTB: return vixl::SXTB;
    284     case HArm64DataProcWithShifterOp::kSXTH: return vixl::SXTH;
    285     case HArm64DataProcWithShifterOp::kSXTW: return vixl::SXTW;
    286     default:
    287       LOG(FATAL) << "Unexpected op kind " << op_kind;
    288       UNREACHABLE();
    289       return vixl::NO_EXTEND;
    290   }
    291 }
    292 
    293 static inline bool CanFitInShifterOperand(HInstruction* instruction) {
    294   if (instruction->IsTypeConversion()) {
    295     HTypeConversion* conversion = instruction->AsTypeConversion();
    296     Primitive::Type result_type = conversion->GetResultType();
    297     Primitive::Type input_type = conversion->GetInputType();
    298     // We don't expect to see the same type as input and result.
    299     return Primitive::IsIntegralType(result_type) && Primitive::IsIntegralType(input_type) &&
    300         (result_type != input_type);
    301   } else {
    302     return (instruction->IsShl() && instruction->AsShl()->InputAt(1)->IsIntConstant()) ||
    303         (instruction->IsShr() && instruction->AsShr()->InputAt(1)->IsIntConstant()) ||
    304         (instruction->IsUShr() && instruction->AsUShr()->InputAt(1)->IsIntConstant());
    305   }
    306 }
    307 
    308 static inline bool HasShifterOperand(HInstruction* instr) {
    309   // `neg` instructions are an alias of `sub` using the zero register as the
    310   // first register input.
    311   bool res = instr->IsAdd() || instr->IsAnd() || instr->IsNeg() ||
    312       instr->IsOr() || instr->IsSub() || instr->IsXor();
    313   return res;
    314 }
    315 
    316 static inline bool ShifterOperandSupportsExtension(HInstruction* instruction) {
    317   DCHECK(HasShifterOperand(instruction));
    318   // Although the `neg` instruction is an alias of the `sub` instruction, `HNeg`
    319   // does *not* support extension. This is because the `extended register` form
    320   // of the `sub` instruction interprets the left register with code 31 as the
    321   // stack pointer and not the zero register. (So does the `immediate` form.) In
    322   // the other form `shifted register, the register with code 31 is interpreted
    323   // as the zero register.
    324   return instruction->IsAdd() || instruction->IsSub();
    325 }
    326 
    327 }  // namespace helpers
    328 }  // namespace arm64
    329 }  // namespace art
    330 
    331 #endif  // ART_COMPILER_OPTIMIZING_COMMON_ARM64_H_
    332