1 /* 2 * Copyright (C) 2015 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef ART_COMPILER_OPTIMIZING_COMMON_ARM64_H_ 18 #define ART_COMPILER_OPTIMIZING_COMMON_ARM64_H_ 19 20 #include "code_generator.h" 21 #include "instruction_simplifier_shared.h" 22 #include "locations.h" 23 #include "nodes.h" 24 #include "utils/arm64/assembler_arm64.h" 25 26 // TODO(VIXL): Make VIXL compile with -Wshadow. 27 #pragma GCC diagnostic push 28 #pragma GCC diagnostic ignored "-Wshadow" 29 #include "aarch64/disasm-aarch64.h" 30 #include "aarch64/macro-assembler-aarch64.h" 31 #include "aarch64/simulator-aarch64.h" 32 #pragma GCC diagnostic pop 33 34 namespace art { 35 36 using helpers::CanFitInShifterOperand; 37 using helpers::HasShifterOperand; 38 39 namespace arm64 { 40 namespace helpers { 41 42 // Convenience helpers to ease conversion to and from VIXL operands. 43 static_assert((SP == 31) && (WSP == 31) && (XZR == 32) && (WZR == 32), 44 "Unexpected values for register codes."); 45 46 inline int VIXLRegCodeFromART(int code) { 47 if (code == SP) { 48 return vixl::aarch64::kSPRegInternalCode; 49 } 50 if (code == XZR) { 51 return vixl::aarch64::kZeroRegCode; 52 } 53 return code; 54 } 55 56 inline int ARTRegCodeFromVIXL(int code) { 57 if (code == vixl::aarch64::kSPRegInternalCode) { 58 return SP; 59 } 60 if (code == vixl::aarch64::kZeroRegCode) { 61 return XZR; 62 } 63 return code; 64 } 65 66 inline vixl::aarch64::Register XRegisterFrom(Location location) { 67 DCHECK(location.IsRegister()) << location; 68 return vixl::aarch64::Register::GetXRegFromCode(VIXLRegCodeFromART(location.reg())); 69 } 70 71 inline vixl::aarch64::Register WRegisterFrom(Location location) { 72 DCHECK(location.IsRegister()) << location; 73 return vixl::aarch64::Register::GetWRegFromCode(VIXLRegCodeFromART(location.reg())); 74 } 75 76 inline vixl::aarch64::Register RegisterFrom(Location location, DataType::Type type) { 77 DCHECK(type != DataType::Type::kVoid && !DataType::IsFloatingPointType(type)) << type; 78 return type == DataType::Type::kInt64 ? XRegisterFrom(location) : WRegisterFrom(location); 79 } 80 81 inline vixl::aarch64::Register OutputRegister(HInstruction* instr) { 82 return RegisterFrom(instr->GetLocations()->Out(), instr->GetType()); 83 } 84 85 inline vixl::aarch64::Register InputRegisterAt(HInstruction* instr, int input_index) { 86 return RegisterFrom(instr->GetLocations()->InAt(input_index), 87 instr->InputAt(input_index)->GetType()); 88 } 89 90 inline vixl::aarch64::FPRegister DRegisterFrom(Location location) { 91 DCHECK(location.IsFpuRegister()) << location; 92 return vixl::aarch64::FPRegister::GetDRegFromCode(location.reg()); 93 } 94 95 inline vixl::aarch64::FPRegister QRegisterFrom(Location location) { 96 DCHECK(location.IsFpuRegister()) << location; 97 return vixl::aarch64::FPRegister::GetQRegFromCode(location.reg()); 98 } 99 100 inline vixl::aarch64::FPRegister VRegisterFrom(Location location) { 101 DCHECK(location.IsFpuRegister()) << location; 102 return vixl::aarch64::FPRegister::GetVRegFromCode(location.reg()); 103 } 104 105 inline vixl::aarch64::FPRegister SRegisterFrom(Location location) { 106 DCHECK(location.IsFpuRegister()) << location; 107 return vixl::aarch64::FPRegister::GetSRegFromCode(location.reg()); 108 } 109 110 inline vixl::aarch64::FPRegister FPRegisterFrom(Location location, DataType::Type type) { 111 DCHECK(DataType::IsFloatingPointType(type)) << type; 112 return type == DataType::Type::kFloat64 ? DRegisterFrom(location) : SRegisterFrom(location); 113 } 114 115 inline vixl::aarch64::FPRegister OutputFPRegister(HInstruction* instr) { 116 return FPRegisterFrom(instr->GetLocations()->Out(), instr->GetType()); 117 } 118 119 inline vixl::aarch64::FPRegister InputFPRegisterAt(HInstruction* instr, int input_index) { 120 return FPRegisterFrom(instr->GetLocations()->InAt(input_index), 121 instr->InputAt(input_index)->GetType()); 122 } 123 124 inline vixl::aarch64::CPURegister CPURegisterFrom(Location location, DataType::Type type) { 125 return DataType::IsFloatingPointType(type) 126 ? vixl::aarch64::CPURegister(FPRegisterFrom(location, type)) 127 : vixl::aarch64::CPURegister(RegisterFrom(location, type)); 128 } 129 130 inline vixl::aarch64::CPURegister OutputCPURegister(HInstruction* instr) { 131 return DataType::IsFloatingPointType(instr->GetType()) 132 ? static_cast<vixl::aarch64::CPURegister>(OutputFPRegister(instr)) 133 : static_cast<vixl::aarch64::CPURegister>(OutputRegister(instr)); 134 } 135 136 inline vixl::aarch64::CPURegister InputCPURegisterAt(HInstruction* instr, int index) { 137 return DataType::IsFloatingPointType(instr->InputAt(index)->GetType()) 138 ? static_cast<vixl::aarch64::CPURegister>(InputFPRegisterAt(instr, index)) 139 : static_cast<vixl::aarch64::CPURegister>(InputRegisterAt(instr, index)); 140 } 141 142 inline vixl::aarch64::CPURegister InputCPURegisterOrZeroRegAt(HInstruction* instr, 143 int index) { 144 HInstruction* input = instr->InputAt(index); 145 DataType::Type input_type = input->GetType(); 146 if (input->IsConstant() && input->AsConstant()->IsZeroBitPattern()) { 147 return (DataType::Size(input_type) >= vixl::aarch64::kXRegSizeInBytes) 148 ? vixl::aarch64::Register(vixl::aarch64::xzr) 149 : vixl::aarch64::Register(vixl::aarch64::wzr); 150 } 151 return InputCPURegisterAt(instr, index); 152 } 153 154 inline int64_t Int64ConstantFrom(Location location) { 155 HConstant* instr = location.GetConstant(); 156 if (instr->IsIntConstant()) { 157 return instr->AsIntConstant()->GetValue(); 158 } else if (instr->IsNullConstant()) { 159 return 0; 160 } else { 161 DCHECK(instr->IsLongConstant()) << instr->DebugName(); 162 return instr->AsLongConstant()->GetValue(); 163 } 164 } 165 166 inline vixl::aarch64::Operand OperandFrom(Location location, DataType::Type type) { 167 if (location.IsRegister()) { 168 return vixl::aarch64::Operand(RegisterFrom(location, type)); 169 } else { 170 return vixl::aarch64::Operand(Int64ConstantFrom(location)); 171 } 172 } 173 174 inline vixl::aarch64::Operand InputOperandAt(HInstruction* instr, int input_index) { 175 return OperandFrom(instr->GetLocations()->InAt(input_index), 176 instr->InputAt(input_index)->GetType()); 177 } 178 179 inline vixl::aarch64::MemOperand StackOperandFrom(Location location) { 180 return vixl::aarch64::MemOperand(vixl::aarch64::sp, location.GetStackIndex()); 181 } 182 183 inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base, 184 size_t offset = 0) { 185 // A heap reference must be 32bit, so fit in a W register. 186 DCHECK(base.IsW()); 187 return vixl::aarch64::MemOperand(base.X(), offset); 188 } 189 190 inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base, 191 const vixl::aarch64::Register& regoffset, 192 vixl::aarch64::Shift shift = vixl::aarch64::LSL, 193 unsigned shift_amount = 0) { 194 // A heap reference must be 32bit, so fit in a W register. 195 DCHECK(base.IsW()); 196 return vixl::aarch64::MemOperand(base.X(), regoffset, shift, shift_amount); 197 } 198 199 inline vixl::aarch64::MemOperand HeapOperand(const vixl::aarch64::Register& base, 200 Offset offset) { 201 return HeapOperand(base, offset.SizeValue()); 202 } 203 204 inline vixl::aarch64::MemOperand HeapOperandFrom(Location location, Offset offset) { 205 return HeapOperand(RegisterFrom(location, DataType::Type::kReference), offset); 206 } 207 208 inline Location LocationFrom(const vixl::aarch64::Register& reg) { 209 return Location::RegisterLocation(ARTRegCodeFromVIXL(reg.GetCode())); 210 } 211 212 inline Location LocationFrom(const vixl::aarch64::FPRegister& fpreg) { 213 return Location::FpuRegisterLocation(fpreg.GetCode()); 214 } 215 216 inline vixl::aarch64::Operand OperandFromMemOperand( 217 const vixl::aarch64::MemOperand& mem_op) { 218 if (mem_op.IsImmediateOffset()) { 219 return vixl::aarch64::Operand(mem_op.GetOffset()); 220 } else { 221 DCHECK(mem_op.IsRegisterOffset()); 222 if (mem_op.GetExtend() != vixl::aarch64::NO_EXTEND) { 223 return vixl::aarch64::Operand(mem_op.GetRegisterOffset(), 224 mem_op.GetExtend(), 225 mem_op.GetShiftAmount()); 226 } else if (mem_op.GetShift() != vixl::aarch64::NO_SHIFT) { 227 return vixl::aarch64::Operand(mem_op.GetRegisterOffset(), 228 mem_op.GetShift(), 229 mem_op.GetShiftAmount()); 230 } else { 231 LOG(FATAL) << "Should not reach here"; 232 UNREACHABLE(); 233 } 234 } 235 } 236 237 inline bool Arm64CanEncodeConstantAsImmediate(HConstant* constant, HInstruction* instr) { 238 int64_t value = CodeGenerator::GetInt64ValueOf(constant); 239 240 // TODO: Improve this when IsSIMDConstantEncodable method is implemented in VIXL. 241 if (instr->IsVecReplicateScalar()) { 242 if (constant->IsLongConstant()) { 243 return false; 244 } else if (constant->IsFloatConstant()) { 245 return vixl::aarch64::Assembler::IsImmFP32(constant->AsFloatConstant()->GetValue()); 246 } else if (constant->IsDoubleConstant()) { 247 return vixl::aarch64::Assembler::IsImmFP64(constant->AsDoubleConstant()->GetValue()); 248 } 249 return IsUint<8>(value); 250 } 251 252 // For single uses we let VIXL handle the constant generation since it will 253 // use registers that are not managed by the register allocator (wip0, wip1). 254 if (constant->GetUses().HasExactlyOneElement()) { 255 return true; 256 } 257 258 // Our code generator ensures shift distances are within an encodable range. 259 if (instr->IsRor()) { 260 return true; 261 } 262 263 if (instr->IsAnd() || instr->IsOr() || instr->IsXor()) { 264 // Uses logical operations. 265 return vixl::aarch64::Assembler::IsImmLogical(value, vixl::aarch64::kXRegSize); 266 } else if (instr->IsNeg()) { 267 // Uses mov -immediate. 268 return vixl::aarch64::Assembler::IsImmMovn(value, vixl::aarch64::kXRegSize); 269 } else { 270 DCHECK(instr->IsAdd() || 271 instr->IsIntermediateAddress() || 272 instr->IsBoundsCheck() || 273 instr->IsCompare() || 274 instr->IsCondition() || 275 instr->IsSub()) 276 << instr->DebugName(); 277 // Uses aliases of ADD/SUB instructions. 278 // If `value` does not fit but `-value` does, VIXL will automatically use 279 // the 'opposite' instruction. 280 return vixl::aarch64::Assembler::IsImmAddSub(value) 281 || vixl::aarch64::Assembler::IsImmAddSub(-value); 282 } 283 } 284 285 inline Location ARM64EncodableConstantOrRegister(HInstruction* constant, 286 HInstruction* instr) { 287 if (constant->IsConstant() 288 && Arm64CanEncodeConstantAsImmediate(constant->AsConstant(), instr)) { 289 return Location::ConstantLocation(constant->AsConstant()); 290 } 291 292 return Location::RequiresRegister(); 293 } 294 295 // Check if registers in art register set have the same register code in vixl. If the register 296 // codes are same, we can initialize vixl register list simply by the register masks. Currently, 297 // only SP/WSP and ZXR/WZR codes are different between art and vixl. 298 // Note: This function is only used for debug checks. 299 inline bool ArtVixlRegCodeCoherentForRegSet(uint32_t art_core_registers, 300 size_t num_core, 301 uint32_t art_fpu_registers, 302 size_t num_fpu) { 303 // The register masks won't work if the number of register is larger than 32. 304 DCHECK_GE(sizeof(art_core_registers) * 8, num_core); 305 DCHECK_GE(sizeof(art_fpu_registers) * 8, num_fpu); 306 for (size_t art_reg_code = 0; art_reg_code < num_core; ++art_reg_code) { 307 if (RegisterSet::Contains(art_core_registers, art_reg_code)) { 308 if (art_reg_code != static_cast<size_t>(VIXLRegCodeFromART(art_reg_code))) { 309 return false; 310 } 311 } 312 } 313 // There is no register code translation for float registers. 314 return true; 315 } 316 317 inline vixl::aarch64::Shift ShiftFromOpKind(HDataProcWithShifterOp::OpKind op_kind) { 318 switch (op_kind) { 319 case HDataProcWithShifterOp::kASR: return vixl::aarch64::ASR; 320 case HDataProcWithShifterOp::kLSL: return vixl::aarch64::LSL; 321 case HDataProcWithShifterOp::kLSR: return vixl::aarch64::LSR; 322 default: 323 LOG(FATAL) << "Unexpected op kind " << op_kind; 324 UNREACHABLE(); 325 return vixl::aarch64::NO_SHIFT; 326 } 327 } 328 329 inline vixl::aarch64::Extend ExtendFromOpKind(HDataProcWithShifterOp::OpKind op_kind) { 330 switch (op_kind) { 331 case HDataProcWithShifterOp::kUXTB: return vixl::aarch64::UXTB; 332 case HDataProcWithShifterOp::kUXTH: return vixl::aarch64::UXTH; 333 case HDataProcWithShifterOp::kUXTW: return vixl::aarch64::UXTW; 334 case HDataProcWithShifterOp::kSXTB: return vixl::aarch64::SXTB; 335 case HDataProcWithShifterOp::kSXTH: return vixl::aarch64::SXTH; 336 case HDataProcWithShifterOp::kSXTW: return vixl::aarch64::SXTW; 337 default: 338 LOG(FATAL) << "Unexpected op kind " << op_kind; 339 UNREACHABLE(); 340 return vixl::aarch64::NO_EXTEND; 341 } 342 } 343 344 inline bool ShifterOperandSupportsExtension(HInstruction* instruction) { 345 DCHECK(HasShifterOperand(instruction, InstructionSet::kArm64)); 346 // Although the `neg` instruction is an alias of the `sub` instruction, `HNeg` 347 // does *not* support extension. This is because the `extended register` form 348 // of the `sub` instruction interprets the left register with code 31 as the 349 // stack pointer and not the zero register. (So does the `immediate` form.) In 350 // the other form `shifted register, the register with code 31 is interpreted 351 // as the zero register. 352 return instruction->IsAdd() || instruction->IsSub(); 353 } 354 355 inline bool IsConstantZeroBitPattern(const HInstruction* instruction) { 356 return instruction->IsConstant() && instruction->AsConstant()->IsZeroBitPattern(); 357 } 358 359 } // namespace helpers 360 } // namespace arm64 361 } // namespace art 362 363 #endif // ART_COMPILER_OPTIMIZING_COMMON_ARM64_H_ 364