1 //===-- AArch64ISelLowering.cpp - AArch64 DAG Lowering Implementation ----===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file implements the AArch64TargetLowering class. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "AArch64ISelLowering.h" 15 #include "AArch64CallingConvention.h" 16 #include "AArch64MachineFunctionInfo.h" 17 #include "AArch64PerfectShuffle.h" 18 #include "AArch64RegisterInfo.h" 19 #include "AArch64Subtarget.h" 20 #include "MCTargetDesc/AArch64AddressingModes.h" 21 #include "Utils/AArch64BaseInfo.h" 22 #include "llvm/ADT/APFloat.h" 23 #include "llvm/ADT/APInt.h" 24 #include "llvm/ADT/ArrayRef.h" 25 #include "llvm/ADT/STLExtras.h" 26 #include "llvm/ADT/SmallVector.h" 27 #include "llvm/ADT/Statistic.h" 28 #include "llvm/ADT/StringRef.h" 29 #include "llvm/ADT/StringSwitch.h" 30 #include "llvm/ADT/Triple.h" 31 #include "llvm/ADT/Twine.h" 32 #include "llvm/Analysis/VectorUtils.h" 33 #include "llvm/CodeGen/CallingConvLower.h" 34 #include "llvm/CodeGen/MachineBasicBlock.h" 35 #include "llvm/CodeGen/MachineFrameInfo.h" 36 #include "llvm/CodeGen/MachineFunction.h" 37 #include "llvm/CodeGen/MachineInstr.h" 38 #include "llvm/CodeGen/MachineInstrBuilder.h" 39 #include "llvm/CodeGen/MachineMemOperand.h" 40 #include "llvm/CodeGen/MachineRegisterInfo.h" 41 #include "llvm/CodeGen/RuntimeLibcalls.h" 42 #include "llvm/CodeGen/SelectionDAG.h" 43 #include "llvm/CodeGen/SelectionDAGNodes.h" 44 #include "llvm/CodeGen/TargetCallingConv.h" 45 #include "llvm/CodeGen/TargetInstrInfo.h" 46 #include "llvm/CodeGen/ValueTypes.h" 47 #include "llvm/IR/Attributes.h" 48 #include "llvm/IR/Constants.h" 49 #include "llvm/IR/DataLayout.h" 50 #include "llvm/IR/DebugLoc.h" 51 #include "llvm/IR/DerivedTypes.h" 52 #include "llvm/IR/Function.h" 53 #include "llvm/IR/GetElementPtrTypeIterator.h" 54 #include "llvm/IR/GlobalValue.h" 55 #include "llvm/IR/IRBuilder.h" 56 #include "llvm/IR/Instruction.h" 57 #include "llvm/IR/Instructions.h" 58 #include "llvm/IR/Intrinsics.h" 59 #include "llvm/IR/Module.h" 60 #include "llvm/IR/OperandTraits.h" 61 #include "llvm/IR/Type.h" 62 #include "llvm/IR/Use.h" 63 #include "llvm/IR/Value.h" 64 #include "llvm/MC/MCRegisterInfo.h" 65 #include "llvm/Support/Casting.h" 66 #include "llvm/Support/CodeGen.h" 67 #include "llvm/Support/CommandLine.h" 68 #include "llvm/Support/Compiler.h" 69 #include "llvm/Support/Debug.h" 70 #include "llvm/Support/ErrorHandling.h" 71 #include "llvm/Support/KnownBits.h" 72 #include "llvm/Support/MachineValueType.h" 73 #include "llvm/Support/MathExtras.h" 74 #include "llvm/Support/raw_ostream.h" 75 #include "llvm/Target/TargetMachine.h" 76 #include "llvm/Target/TargetOptions.h" 77 #include <algorithm> 78 #include <bitset> 79 #include <cassert> 80 #include <cctype> 81 #include <cstdint> 82 #include <cstdlib> 83 #include <iterator> 84 #include <limits> 85 #include <tuple> 86 #include <utility> 87 #include <vector> 88 89 using namespace llvm; 90 91 #define DEBUG_TYPE "aarch64-lower" 92 93 STATISTIC(NumTailCalls, "Number of tail calls"); 94 STATISTIC(NumShiftInserts, "Number of vector shift inserts"); 95 STATISTIC(NumOptimizedImms, "Number of times immediates were optimized"); 96 97 static cl::opt<bool> 98 EnableAArch64SlrGeneration("aarch64-shift-insert-generation", cl::Hidden, 99 cl::desc("Allow AArch64 SLI/SRI formation"), 100 cl::init(false)); 101 102 // FIXME: The necessary dtprel relocations don't seem to be supported 103 // well in the GNU bfd and gold linkers at the moment. Therefore, by 104 // default, for now, fall back to GeneralDynamic code generation. 105 cl::opt<bool> EnableAArch64ELFLocalDynamicTLSGeneration( 106 "aarch64-elf-ldtls-generation", cl::Hidden, 107 cl::desc("Allow AArch64 Local Dynamic TLS code generation"), 108 cl::init(false)); 109 110 static cl::opt<bool> 111 EnableOptimizeLogicalImm("aarch64-enable-logical-imm", cl::Hidden, 112 cl::desc("Enable AArch64 logical imm instruction " 113 "optimization"), 114 cl::init(true)); 115 116 /// Value type used for condition codes. 117 static const MVT MVT_CC = MVT::i32; 118 119 AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, 120 const AArch64Subtarget &STI) 121 : TargetLowering(TM), Subtarget(&STI) { 122 // AArch64 doesn't have comparisons which set GPRs or setcc instructions, so 123 // we have to make something up. Arbitrarily, choose ZeroOrOne. 124 setBooleanContents(ZeroOrOneBooleanContent); 125 // When comparing vectors the result sets the different elements in the 126 // vector to all-one or all-zero. 127 setBooleanVectorContents(ZeroOrNegativeOneBooleanContent); 128 129 // Set up the register classes. 130 addRegisterClass(MVT::i32, &AArch64::GPR32allRegClass); 131 addRegisterClass(MVT::i64, &AArch64::GPR64allRegClass); 132 133 if (Subtarget->hasFPARMv8()) { 134 addRegisterClass(MVT::f16, &AArch64::FPR16RegClass); 135 addRegisterClass(MVT::f32, &AArch64::FPR32RegClass); 136 addRegisterClass(MVT::f64, &AArch64::FPR64RegClass); 137 addRegisterClass(MVT::f128, &AArch64::FPR128RegClass); 138 } 139 140 if (Subtarget->hasNEON()) { 141 addRegisterClass(MVT::v16i8, &AArch64::FPR8RegClass); 142 addRegisterClass(MVT::v8i16, &AArch64::FPR16RegClass); 143 // Someone set us up the NEON. 144 addDRTypeForNEON(MVT::v2f32); 145 addDRTypeForNEON(MVT::v8i8); 146 addDRTypeForNEON(MVT::v4i16); 147 addDRTypeForNEON(MVT::v2i32); 148 addDRTypeForNEON(MVT::v1i64); 149 addDRTypeForNEON(MVT::v1f64); 150 addDRTypeForNEON(MVT::v4f16); 151 152 addQRTypeForNEON(MVT::v4f32); 153 addQRTypeForNEON(MVT::v2f64); 154 addQRTypeForNEON(MVT::v16i8); 155 addQRTypeForNEON(MVT::v8i16); 156 addQRTypeForNEON(MVT::v4i32); 157 addQRTypeForNEON(MVT::v2i64); 158 addQRTypeForNEON(MVT::v8f16); 159 } 160 161 // Compute derived properties from the register classes 162 computeRegisterProperties(Subtarget->getRegisterInfo()); 163 164 // Provide all sorts of operation actions 165 setOperationAction(ISD::GlobalAddress, MVT::i64, Custom); 166 setOperationAction(ISD::GlobalTLSAddress, MVT::i64, Custom); 167 setOperationAction(ISD::SETCC, MVT::i32, Custom); 168 setOperationAction(ISD::SETCC, MVT::i64, Custom); 169 setOperationAction(ISD::SETCC, MVT::f16, Custom); 170 setOperationAction(ISD::SETCC, MVT::f32, Custom); 171 setOperationAction(ISD::SETCC, MVT::f64, Custom); 172 setOperationAction(ISD::BITREVERSE, MVT::i32, Legal); 173 setOperationAction(ISD::BITREVERSE, MVT::i64, Legal); 174 setOperationAction(ISD::BRCOND, MVT::Other, Expand); 175 setOperationAction(ISD::BR_CC, MVT::i32, Custom); 176 setOperationAction(ISD::BR_CC, MVT::i64, Custom); 177 setOperationAction(ISD::BR_CC, MVT::f16, Custom); 178 setOperationAction(ISD::BR_CC, MVT::f32, Custom); 179 setOperationAction(ISD::BR_CC, MVT::f64, Custom); 180 setOperationAction(ISD::SELECT, MVT::i32, Custom); 181 setOperationAction(ISD::SELECT, MVT::i64, Custom); 182 setOperationAction(ISD::SELECT, MVT::f16, Custom); 183 setOperationAction(ISD::SELECT, MVT::f32, Custom); 184 setOperationAction(ISD::SELECT, MVT::f64, Custom); 185 setOperationAction(ISD::SELECT_CC, MVT::i32, Custom); 186 setOperationAction(ISD::SELECT_CC, MVT::i64, Custom); 187 setOperationAction(ISD::SELECT_CC, MVT::f16, Custom); 188 setOperationAction(ISD::SELECT_CC, MVT::f32, Custom); 189 setOperationAction(ISD::SELECT_CC, MVT::f64, Custom); 190 setOperationAction(ISD::BR_JT, MVT::Other, Expand); 191 setOperationAction(ISD::JumpTable, MVT::i64, Custom); 192 193 setOperationAction(ISD::SHL_PARTS, MVT::i64, Custom); 194 setOperationAction(ISD::SRA_PARTS, MVT::i64, Custom); 195 setOperationAction(ISD::SRL_PARTS, MVT::i64, Custom); 196 197 setOperationAction(ISD::FREM, MVT::f32, Expand); 198 setOperationAction(ISD::FREM, MVT::f64, Expand); 199 setOperationAction(ISD::FREM, MVT::f80, Expand); 200 201 setOperationAction(ISD::BUILD_PAIR, MVT::i64, Expand); 202 203 // Custom lowering hooks are needed for XOR 204 // to fold it into CSINC/CSINV. 205 setOperationAction(ISD::XOR, MVT::i32, Custom); 206 setOperationAction(ISD::XOR, MVT::i64, Custom); 207 208 // Virtually no operation on f128 is legal, but LLVM can't expand them when 209 // there's a valid register class, so we need custom operations in most cases. 210 setOperationAction(ISD::FABS, MVT::f128, Expand); 211 setOperationAction(ISD::FADD, MVT::f128, Custom); 212 setOperationAction(ISD::FCOPYSIGN, MVT::f128, Expand); 213 setOperationAction(ISD::FCOS, MVT::f128, Expand); 214 setOperationAction(ISD::FDIV, MVT::f128, Custom); 215 setOperationAction(ISD::FMA, MVT::f128, Expand); 216 setOperationAction(ISD::FMUL, MVT::f128, Custom); 217 setOperationAction(ISD::FNEG, MVT::f128, Expand); 218 setOperationAction(ISD::FPOW, MVT::f128, Expand); 219 setOperationAction(ISD::FREM, MVT::f128, Expand); 220 setOperationAction(ISD::FRINT, MVT::f128, Expand); 221 setOperationAction(ISD::FSIN, MVT::f128, Expand); 222 setOperationAction(ISD::FSINCOS, MVT::f128, Expand); 223 setOperationAction(ISD::FSQRT, MVT::f128, Expand); 224 setOperationAction(ISD::FSUB, MVT::f128, Custom); 225 setOperationAction(ISD::FTRUNC, MVT::f128, Expand); 226 setOperationAction(ISD::SETCC, MVT::f128, Custom); 227 setOperationAction(ISD::BR_CC, MVT::f128, Custom); 228 setOperationAction(ISD::SELECT, MVT::f128, Custom); 229 setOperationAction(ISD::SELECT_CC, MVT::f128, Custom); 230 setOperationAction(ISD::FP_EXTEND, MVT::f128, Custom); 231 232 // Lowering for many of the conversions is actually specified by the non-f128 233 // type. The LowerXXX function will be trivial when f128 isn't involved. 234 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom); 235 setOperationAction(ISD::FP_TO_SINT, MVT::i64, Custom); 236 setOperationAction(ISD::FP_TO_SINT, MVT::i128, Custom); 237 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom); 238 setOperationAction(ISD::FP_TO_UINT, MVT::i64, Custom); 239 setOperationAction(ISD::FP_TO_UINT, MVT::i128, Custom); 240 setOperationAction(ISD::SINT_TO_FP, MVT::i32, Custom); 241 setOperationAction(ISD::SINT_TO_FP, MVT::i64, Custom); 242 setOperationAction(ISD::SINT_TO_FP, MVT::i128, Custom); 243 setOperationAction(ISD::UINT_TO_FP, MVT::i32, Custom); 244 setOperationAction(ISD::UINT_TO_FP, MVT::i64, Custom); 245 setOperationAction(ISD::UINT_TO_FP, MVT::i128, Custom); 246 setOperationAction(ISD::FP_ROUND, MVT::f32, Custom); 247 setOperationAction(ISD::FP_ROUND, MVT::f64, Custom); 248 249 // Variable arguments. 250 setOperationAction(ISD::VASTART, MVT::Other, Custom); 251 setOperationAction(ISD::VAARG, MVT::Other, Custom); 252 setOperationAction(ISD::VACOPY, MVT::Other, Custom); 253 setOperationAction(ISD::VAEND, MVT::Other, Expand); 254 255 // Variable-sized objects. 256 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand); 257 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand); 258 259 if (Subtarget->isTargetWindows()) 260 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Custom); 261 else 262 setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i64, Expand); 263 264 // Constant pool entries 265 setOperationAction(ISD::ConstantPool, MVT::i64, Custom); 266 267 // BlockAddress 268 setOperationAction(ISD::BlockAddress, MVT::i64, Custom); 269 270 // Add/Sub overflow ops with MVT::Glues are lowered to NZCV dependences. 271 setOperationAction(ISD::ADDC, MVT::i32, Custom); 272 setOperationAction(ISD::ADDE, MVT::i32, Custom); 273 setOperationAction(ISD::SUBC, MVT::i32, Custom); 274 setOperationAction(ISD::SUBE, MVT::i32, Custom); 275 setOperationAction(ISD::ADDC, MVT::i64, Custom); 276 setOperationAction(ISD::ADDE, MVT::i64, Custom); 277 setOperationAction(ISD::SUBC, MVT::i64, Custom); 278 setOperationAction(ISD::SUBE, MVT::i64, Custom); 279 280 // AArch64 lacks both left-rotate and popcount instructions. 281 setOperationAction(ISD::ROTL, MVT::i32, Expand); 282 setOperationAction(ISD::ROTL, MVT::i64, Expand); 283 for (MVT VT : MVT::vector_valuetypes()) { 284 setOperationAction(ISD::ROTL, VT, Expand); 285 setOperationAction(ISD::ROTR, VT, Expand); 286 } 287 288 // AArch64 doesn't have {U|S}MUL_LOHI. 289 setOperationAction(ISD::UMUL_LOHI, MVT::i64, Expand); 290 setOperationAction(ISD::SMUL_LOHI, MVT::i64, Expand); 291 292 setOperationAction(ISD::CTPOP, MVT::i32, Custom); 293 setOperationAction(ISD::CTPOP, MVT::i64, Custom); 294 295 setOperationAction(ISD::SDIVREM, MVT::i32, Expand); 296 setOperationAction(ISD::SDIVREM, MVT::i64, Expand); 297 for (MVT VT : MVT::vector_valuetypes()) { 298 setOperationAction(ISD::SDIVREM, VT, Expand); 299 setOperationAction(ISD::UDIVREM, VT, Expand); 300 } 301 setOperationAction(ISD::SREM, MVT::i32, Expand); 302 setOperationAction(ISD::SREM, MVT::i64, Expand); 303 setOperationAction(ISD::UDIVREM, MVT::i32, Expand); 304 setOperationAction(ISD::UDIVREM, MVT::i64, Expand); 305 setOperationAction(ISD::UREM, MVT::i32, Expand); 306 setOperationAction(ISD::UREM, MVT::i64, Expand); 307 308 // Custom lower Add/Sub/Mul with overflow. 309 setOperationAction(ISD::SADDO, MVT::i32, Custom); 310 setOperationAction(ISD::SADDO, MVT::i64, Custom); 311 setOperationAction(ISD::UADDO, MVT::i32, Custom); 312 setOperationAction(ISD::UADDO, MVT::i64, Custom); 313 setOperationAction(ISD::SSUBO, MVT::i32, Custom); 314 setOperationAction(ISD::SSUBO, MVT::i64, Custom); 315 setOperationAction(ISD::USUBO, MVT::i32, Custom); 316 setOperationAction(ISD::USUBO, MVT::i64, Custom); 317 setOperationAction(ISD::SMULO, MVT::i32, Custom); 318 setOperationAction(ISD::SMULO, MVT::i64, Custom); 319 setOperationAction(ISD::UMULO, MVT::i32, Custom); 320 setOperationAction(ISD::UMULO, MVT::i64, Custom); 321 322 setOperationAction(ISD::FSIN, MVT::f32, Expand); 323 setOperationAction(ISD::FSIN, MVT::f64, Expand); 324 setOperationAction(ISD::FCOS, MVT::f32, Expand); 325 setOperationAction(ISD::FCOS, MVT::f64, Expand); 326 setOperationAction(ISD::FPOW, MVT::f32, Expand); 327 setOperationAction(ISD::FPOW, MVT::f64, Expand); 328 setOperationAction(ISD::FCOPYSIGN, MVT::f64, Custom); 329 setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom); 330 if (Subtarget->hasFullFP16()) 331 setOperationAction(ISD::FCOPYSIGN, MVT::f16, Custom); 332 else 333 setOperationAction(ISD::FCOPYSIGN, MVT::f16, Promote); 334 335 setOperationAction(ISD::FREM, MVT::f16, Promote); 336 setOperationAction(ISD::FREM, MVT::v4f16, Promote); 337 setOperationAction(ISD::FREM, MVT::v8f16, Promote); 338 setOperationAction(ISD::FPOW, MVT::f16, Promote); 339 setOperationAction(ISD::FPOW, MVT::v4f16, Promote); 340 setOperationAction(ISD::FPOW, MVT::v8f16, Promote); 341 setOperationAction(ISD::FPOWI, MVT::f16, Promote); 342 setOperationAction(ISD::FCOS, MVT::f16, Promote); 343 setOperationAction(ISD::FCOS, MVT::v4f16, Promote); 344 setOperationAction(ISD::FCOS, MVT::v8f16, Promote); 345 setOperationAction(ISD::FSIN, MVT::f16, Promote); 346 setOperationAction(ISD::FSIN, MVT::v4f16, Promote); 347 setOperationAction(ISD::FSIN, MVT::v8f16, Promote); 348 setOperationAction(ISD::FSINCOS, MVT::f16, Promote); 349 setOperationAction(ISD::FSINCOS, MVT::v4f16, Promote); 350 setOperationAction(ISD::FSINCOS, MVT::v8f16, Promote); 351 setOperationAction(ISD::FEXP, MVT::f16, Promote); 352 setOperationAction(ISD::FEXP, MVT::v4f16, Promote); 353 setOperationAction(ISD::FEXP, MVT::v8f16, Promote); 354 setOperationAction(ISD::FEXP2, MVT::f16, Promote); 355 setOperationAction(ISD::FEXP2, MVT::v4f16, Promote); 356 setOperationAction(ISD::FEXP2, MVT::v8f16, Promote); 357 setOperationAction(ISD::FLOG, MVT::f16, Promote); 358 setOperationAction(ISD::FLOG, MVT::v4f16, Promote); 359 setOperationAction(ISD::FLOG, MVT::v8f16, Promote); 360 setOperationAction(ISD::FLOG2, MVT::f16, Promote); 361 setOperationAction(ISD::FLOG2, MVT::v4f16, Promote); 362 setOperationAction(ISD::FLOG2, MVT::v8f16, Promote); 363 setOperationAction(ISD::FLOG10, MVT::f16, Promote); 364 setOperationAction(ISD::FLOG10, MVT::v4f16, Promote); 365 setOperationAction(ISD::FLOG10, MVT::v8f16, Promote); 366 367 if (!Subtarget->hasFullFP16()) { 368 setOperationAction(ISD::SELECT, MVT::f16, Promote); 369 setOperationAction(ISD::SELECT_CC, MVT::f16, Promote); 370 setOperationAction(ISD::SETCC, MVT::f16, Promote); 371 setOperationAction(ISD::BR_CC, MVT::f16, Promote); 372 setOperationAction(ISD::FADD, MVT::f16, Promote); 373 setOperationAction(ISD::FSUB, MVT::f16, Promote); 374 setOperationAction(ISD::FMUL, MVT::f16, Promote); 375 setOperationAction(ISD::FDIV, MVT::f16, Promote); 376 setOperationAction(ISD::FMA, MVT::f16, Promote); 377 setOperationAction(ISD::FNEG, MVT::f16, Promote); 378 setOperationAction(ISD::FABS, MVT::f16, Promote); 379 setOperationAction(ISD::FCEIL, MVT::f16, Promote); 380 setOperationAction(ISD::FSQRT, MVT::f16, Promote); 381 setOperationAction(ISD::FFLOOR, MVT::f16, Promote); 382 setOperationAction(ISD::FNEARBYINT, MVT::f16, Promote); 383 setOperationAction(ISD::FRINT, MVT::f16, Promote); 384 setOperationAction(ISD::FROUND, MVT::f16, Promote); 385 setOperationAction(ISD::FTRUNC, MVT::f16, Promote); 386 setOperationAction(ISD::FMINNUM, MVT::f16, Promote); 387 setOperationAction(ISD::FMAXNUM, MVT::f16, Promote); 388 setOperationAction(ISD::FMINNAN, MVT::f16, Promote); 389 setOperationAction(ISD::FMAXNAN, MVT::f16, Promote); 390 391 // promote v4f16 to v4f32 when that is known to be safe. 392 setOperationAction(ISD::FADD, MVT::v4f16, Promote); 393 setOperationAction(ISD::FSUB, MVT::v4f16, Promote); 394 setOperationAction(ISD::FMUL, MVT::v4f16, Promote); 395 setOperationAction(ISD::FDIV, MVT::v4f16, Promote); 396 setOperationAction(ISD::FP_EXTEND, MVT::v4f16, Promote); 397 setOperationAction(ISD::FP_ROUND, MVT::v4f16, Promote); 398 AddPromotedToType(ISD::FADD, MVT::v4f16, MVT::v4f32); 399 AddPromotedToType(ISD::FSUB, MVT::v4f16, MVT::v4f32); 400 AddPromotedToType(ISD::FMUL, MVT::v4f16, MVT::v4f32); 401 AddPromotedToType(ISD::FDIV, MVT::v4f16, MVT::v4f32); 402 AddPromotedToType(ISD::FP_EXTEND, MVT::v4f16, MVT::v4f32); 403 AddPromotedToType(ISD::FP_ROUND, MVT::v4f16, MVT::v4f32); 404 405 setOperationAction(ISD::FABS, MVT::v4f16, Expand); 406 setOperationAction(ISD::FNEG, MVT::v4f16, Expand); 407 setOperationAction(ISD::FROUND, MVT::v4f16, Expand); 408 setOperationAction(ISD::FMA, MVT::v4f16, Expand); 409 setOperationAction(ISD::SETCC, MVT::v4f16, Expand); 410 setOperationAction(ISD::BR_CC, MVT::v4f16, Expand); 411 setOperationAction(ISD::SELECT, MVT::v4f16, Expand); 412 setOperationAction(ISD::SELECT_CC, MVT::v4f16, Expand); 413 setOperationAction(ISD::FTRUNC, MVT::v4f16, Expand); 414 setOperationAction(ISD::FCOPYSIGN, MVT::v4f16, Expand); 415 setOperationAction(ISD::FFLOOR, MVT::v4f16, Expand); 416 setOperationAction(ISD::FCEIL, MVT::v4f16, Expand); 417 setOperationAction(ISD::FRINT, MVT::v4f16, Expand); 418 setOperationAction(ISD::FNEARBYINT, MVT::v4f16, Expand); 419 setOperationAction(ISD::FSQRT, MVT::v4f16, Expand); 420 421 setOperationAction(ISD::FABS, MVT::v8f16, Expand); 422 setOperationAction(ISD::FADD, MVT::v8f16, Expand); 423 setOperationAction(ISD::FCEIL, MVT::v8f16, Expand); 424 setOperationAction(ISD::FCOPYSIGN, MVT::v8f16, Expand); 425 setOperationAction(ISD::FDIV, MVT::v8f16, Expand); 426 setOperationAction(ISD::FFLOOR, MVT::v8f16, Expand); 427 setOperationAction(ISD::FMA, MVT::v8f16, Expand); 428 setOperationAction(ISD::FMUL, MVT::v8f16, Expand); 429 setOperationAction(ISD::FNEARBYINT, MVT::v8f16, Expand); 430 setOperationAction(ISD::FNEG, MVT::v8f16, Expand); 431 setOperationAction(ISD::FROUND, MVT::v8f16, Expand); 432 setOperationAction(ISD::FRINT, MVT::v8f16, Expand); 433 setOperationAction(ISD::FSQRT, MVT::v8f16, Expand); 434 setOperationAction(ISD::FSUB, MVT::v8f16, Expand); 435 setOperationAction(ISD::FTRUNC, MVT::v8f16, Expand); 436 setOperationAction(ISD::SETCC, MVT::v8f16, Expand); 437 setOperationAction(ISD::BR_CC, MVT::v8f16, Expand); 438 setOperationAction(ISD::SELECT, MVT::v8f16, Expand); 439 setOperationAction(ISD::SELECT_CC, MVT::v8f16, Expand); 440 setOperationAction(ISD::FP_EXTEND, MVT::v8f16, Expand); 441 } 442 443 // AArch64 has implementations of a lot of rounding-like FP operations. 444 for (MVT Ty : {MVT::f32, MVT::f64}) { 445 setOperationAction(ISD::FFLOOR, Ty, Legal); 446 setOperationAction(ISD::FNEARBYINT, Ty, Legal); 447 setOperationAction(ISD::FCEIL, Ty, Legal); 448 setOperationAction(ISD::FRINT, Ty, Legal); 449 setOperationAction(ISD::FTRUNC, Ty, Legal); 450 setOperationAction(ISD::FROUND, Ty, Legal); 451 setOperationAction(ISD::FMINNUM, Ty, Legal); 452 setOperationAction(ISD::FMAXNUM, Ty, Legal); 453 setOperationAction(ISD::FMINNAN, Ty, Legal); 454 setOperationAction(ISD::FMAXNAN, Ty, Legal); 455 } 456 457 if (Subtarget->hasFullFP16()) { 458 setOperationAction(ISD::FNEARBYINT, MVT::f16, Legal); 459 setOperationAction(ISD::FFLOOR, MVT::f16, Legal); 460 setOperationAction(ISD::FCEIL, MVT::f16, Legal); 461 setOperationAction(ISD::FRINT, MVT::f16, Legal); 462 setOperationAction(ISD::FTRUNC, MVT::f16, Legal); 463 setOperationAction(ISD::FROUND, MVT::f16, Legal); 464 setOperationAction(ISD::FMINNUM, MVT::f16, Legal); 465 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal); 466 setOperationAction(ISD::FMINNAN, MVT::f16, Legal); 467 setOperationAction(ISD::FMAXNAN, MVT::f16, Legal); 468 } 469 470 setOperationAction(ISD::PREFETCH, MVT::Other, Custom); 471 472 setOperationAction(ISD::FLT_ROUNDS_, MVT::i32, Custom); 473 474 setOperationAction(ISD::ATOMIC_CMP_SWAP, MVT::i128, Custom); 475 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i32, Custom); 476 setOperationAction(ISD::ATOMIC_LOAD_SUB, MVT::i64, Custom); 477 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i32, Custom); 478 setOperationAction(ISD::ATOMIC_LOAD_AND, MVT::i64, Custom); 479 480 // Lower READCYCLECOUNTER using an mrs from PMCCNTR_EL0. 481 // This requires the Performance Monitors extension. 482 if (Subtarget->hasPerfMon()) 483 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64, Legal); 484 485 if (getLibcallName(RTLIB::SINCOS_STRET_F32) != nullptr && 486 getLibcallName(RTLIB::SINCOS_STRET_F64) != nullptr) { 487 // Issue __sincos_stret if available. 488 setOperationAction(ISD::FSINCOS, MVT::f64, Custom); 489 setOperationAction(ISD::FSINCOS, MVT::f32, Custom); 490 } else { 491 setOperationAction(ISD::FSINCOS, MVT::f64, Expand); 492 setOperationAction(ISD::FSINCOS, MVT::f32, Expand); 493 } 494 495 // Make floating-point constants legal for the large code model, so they don't 496 // become loads from the constant pool. 497 if (Subtarget->isTargetMachO() && TM.getCodeModel() == CodeModel::Large) { 498 setOperationAction(ISD::ConstantFP, MVT::f32, Legal); 499 setOperationAction(ISD::ConstantFP, MVT::f64, Legal); 500 } 501 502 // AArch64 does not have floating-point extending loads, i1 sign-extending 503 // load, floating-point truncating stores, or v2i32->v2i16 truncating store. 504 for (MVT VT : MVT::fp_valuetypes()) { 505 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f16, Expand); 506 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f32, Expand); 507 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f64, Expand); 508 setLoadExtAction(ISD::EXTLOAD, VT, MVT::f80, Expand); 509 } 510 for (MVT VT : MVT::integer_valuetypes()) 511 setLoadExtAction(ISD::SEXTLOAD, VT, MVT::i1, Expand); 512 513 setTruncStoreAction(MVT::f32, MVT::f16, Expand); 514 setTruncStoreAction(MVT::f64, MVT::f32, Expand); 515 setTruncStoreAction(MVT::f64, MVT::f16, Expand); 516 setTruncStoreAction(MVT::f128, MVT::f80, Expand); 517 setTruncStoreAction(MVT::f128, MVT::f64, Expand); 518 setTruncStoreAction(MVT::f128, MVT::f32, Expand); 519 setTruncStoreAction(MVT::f128, MVT::f16, Expand); 520 521 setOperationAction(ISD::BITCAST, MVT::i16, Custom); 522 setOperationAction(ISD::BITCAST, MVT::f16, Custom); 523 524 // Indexed loads and stores are supported. 525 for (unsigned im = (unsigned)ISD::PRE_INC; 526 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 527 setIndexedLoadAction(im, MVT::i8, Legal); 528 setIndexedLoadAction(im, MVT::i16, Legal); 529 setIndexedLoadAction(im, MVT::i32, Legal); 530 setIndexedLoadAction(im, MVT::i64, Legal); 531 setIndexedLoadAction(im, MVT::f64, Legal); 532 setIndexedLoadAction(im, MVT::f32, Legal); 533 setIndexedLoadAction(im, MVT::f16, Legal); 534 setIndexedStoreAction(im, MVT::i8, Legal); 535 setIndexedStoreAction(im, MVT::i16, Legal); 536 setIndexedStoreAction(im, MVT::i32, Legal); 537 setIndexedStoreAction(im, MVT::i64, Legal); 538 setIndexedStoreAction(im, MVT::f64, Legal); 539 setIndexedStoreAction(im, MVT::f32, Legal); 540 setIndexedStoreAction(im, MVT::f16, Legal); 541 } 542 543 // Trap. 544 setOperationAction(ISD::TRAP, MVT::Other, Legal); 545 546 // We combine OR nodes for bitfield operations. 547 setTargetDAGCombine(ISD::OR); 548 549 // Vector add and sub nodes may conceal a high-half opportunity. 550 // Also, try to fold ADD into CSINC/CSINV.. 551 setTargetDAGCombine(ISD::ADD); 552 setTargetDAGCombine(ISD::SUB); 553 setTargetDAGCombine(ISD::SRL); 554 setTargetDAGCombine(ISD::XOR); 555 setTargetDAGCombine(ISD::SINT_TO_FP); 556 setTargetDAGCombine(ISD::UINT_TO_FP); 557 558 setTargetDAGCombine(ISD::FP_TO_SINT); 559 setTargetDAGCombine(ISD::FP_TO_UINT); 560 setTargetDAGCombine(ISD::FDIV); 561 562 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN); 563 564 setTargetDAGCombine(ISD::ANY_EXTEND); 565 setTargetDAGCombine(ISD::ZERO_EXTEND); 566 setTargetDAGCombine(ISD::SIGN_EXTEND); 567 setTargetDAGCombine(ISD::BITCAST); 568 setTargetDAGCombine(ISD::CONCAT_VECTORS); 569 setTargetDAGCombine(ISD::STORE); 570 if (Subtarget->supportsAddressTopByteIgnored()) 571 setTargetDAGCombine(ISD::LOAD); 572 573 setTargetDAGCombine(ISD::MUL); 574 575 setTargetDAGCombine(ISD::SELECT); 576 setTargetDAGCombine(ISD::VSELECT); 577 578 setTargetDAGCombine(ISD::INTRINSIC_VOID); 579 setTargetDAGCombine(ISD::INTRINSIC_W_CHAIN); 580 setTargetDAGCombine(ISD::INSERT_VECTOR_ELT); 581 582 setTargetDAGCombine(ISD::GlobalAddress); 583 584 // In case of strict alignment, avoid an excessive number of byte wide stores. 585 MaxStoresPerMemsetOptSize = 8; 586 MaxStoresPerMemset = Subtarget->requiresStrictAlign() 587 ? MaxStoresPerMemsetOptSize : 32; 588 589 MaxGluedStoresPerMemcpy = 4; 590 MaxStoresPerMemcpyOptSize = 4; 591 MaxStoresPerMemcpy = Subtarget->requiresStrictAlign() 592 ? MaxStoresPerMemcpyOptSize : 16; 593 594 MaxStoresPerMemmoveOptSize = MaxStoresPerMemmove = 4; 595 596 setStackPointerRegisterToSaveRestore(AArch64::SP); 597 598 setSchedulingPreference(Sched::Hybrid); 599 600 EnableExtLdPromotion = true; 601 602 // Set required alignment. 603 setMinFunctionAlignment(2); 604 // Set preferred alignments. 605 setPrefFunctionAlignment(STI.getPrefFunctionAlignment()); 606 setPrefLoopAlignment(STI.getPrefLoopAlignment()); 607 608 // Only change the limit for entries in a jump table if specified by 609 // the subtarget, but not at the command line. 610 unsigned MaxJT = STI.getMaximumJumpTableSize(); 611 if (MaxJT && getMaximumJumpTableSize() == 0) 612 setMaximumJumpTableSize(MaxJT); 613 614 setHasExtractBitsInsn(true); 615 616 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom); 617 618 if (Subtarget->hasNEON()) { 619 // FIXME: v1f64 shouldn't be legal if we can avoid it, because it leads to 620 // silliness like this: 621 setOperationAction(ISD::FABS, MVT::v1f64, Expand); 622 setOperationAction(ISD::FADD, MVT::v1f64, Expand); 623 setOperationAction(ISD::FCEIL, MVT::v1f64, Expand); 624 setOperationAction(ISD::FCOPYSIGN, MVT::v1f64, Expand); 625 setOperationAction(ISD::FCOS, MVT::v1f64, Expand); 626 setOperationAction(ISD::FDIV, MVT::v1f64, Expand); 627 setOperationAction(ISD::FFLOOR, MVT::v1f64, Expand); 628 setOperationAction(ISD::FMA, MVT::v1f64, Expand); 629 setOperationAction(ISD::FMUL, MVT::v1f64, Expand); 630 setOperationAction(ISD::FNEARBYINT, MVT::v1f64, Expand); 631 setOperationAction(ISD::FNEG, MVT::v1f64, Expand); 632 setOperationAction(ISD::FPOW, MVT::v1f64, Expand); 633 setOperationAction(ISD::FREM, MVT::v1f64, Expand); 634 setOperationAction(ISD::FROUND, MVT::v1f64, Expand); 635 setOperationAction(ISD::FRINT, MVT::v1f64, Expand); 636 setOperationAction(ISD::FSIN, MVT::v1f64, Expand); 637 setOperationAction(ISD::FSINCOS, MVT::v1f64, Expand); 638 setOperationAction(ISD::FSQRT, MVT::v1f64, Expand); 639 setOperationAction(ISD::FSUB, MVT::v1f64, Expand); 640 setOperationAction(ISD::FTRUNC, MVT::v1f64, Expand); 641 setOperationAction(ISD::SETCC, MVT::v1f64, Expand); 642 setOperationAction(ISD::BR_CC, MVT::v1f64, Expand); 643 setOperationAction(ISD::SELECT, MVT::v1f64, Expand); 644 setOperationAction(ISD::SELECT_CC, MVT::v1f64, Expand); 645 setOperationAction(ISD::FP_EXTEND, MVT::v1f64, Expand); 646 647 setOperationAction(ISD::FP_TO_SINT, MVT::v1i64, Expand); 648 setOperationAction(ISD::FP_TO_UINT, MVT::v1i64, Expand); 649 setOperationAction(ISD::SINT_TO_FP, MVT::v1i64, Expand); 650 setOperationAction(ISD::UINT_TO_FP, MVT::v1i64, Expand); 651 setOperationAction(ISD::FP_ROUND, MVT::v1f64, Expand); 652 653 setOperationAction(ISD::MUL, MVT::v1i64, Expand); 654 655 // AArch64 doesn't have a direct vector ->f32 conversion instructions for 656 // elements smaller than i32, so promote the input to i32 first. 657 setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v4i8, MVT::v4i32); 658 setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v4i8, MVT::v4i32); 659 setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v4i16, MVT::v4i32); 660 setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v4i16, MVT::v4i32); 661 // i8 and i16 vector elements also need promotion to i32 for v8i8 or v8i16 662 // -> v8f16 conversions. 663 setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v8i8, MVT::v8i32); 664 setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v8i8, MVT::v8i32); 665 setOperationPromotedToType(ISD::SINT_TO_FP, MVT::v8i16, MVT::v8i32); 666 setOperationPromotedToType(ISD::UINT_TO_FP, MVT::v8i16, MVT::v8i32); 667 // Similarly, there is no direct i32 -> f64 vector conversion instruction. 668 setOperationAction(ISD::SINT_TO_FP, MVT::v2i32, Custom); 669 setOperationAction(ISD::UINT_TO_FP, MVT::v2i32, Custom); 670 setOperationAction(ISD::SINT_TO_FP, MVT::v2i64, Custom); 671 setOperationAction(ISD::UINT_TO_FP, MVT::v2i64, Custom); 672 // Or, direct i32 -> f16 vector conversion. Set it so custom, so the 673 // conversion happens in two steps: v4i32 -> v4f32 -> v4f16 674 setOperationAction(ISD::SINT_TO_FP, MVT::v4i32, Custom); 675 setOperationAction(ISD::UINT_TO_FP, MVT::v4i32, Custom); 676 677 setOperationAction(ISD::CTLZ, MVT::v1i64, Expand); 678 setOperationAction(ISD::CTLZ, MVT::v2i64, Expand); 679 680 setOperationAction(ISD::CTTZ, MVT::v2i8, Expand); 681 setOperationAction(ISD::CTTZ, MVT::v4i16, Expand); 682 setOperationAction(ISD::CTTZ, MVT::v2i32, Expand); 683 setOperationAction(ISD::CTTZ, MVT::v1i64, Expand); 684 setOperationAction(ISD::CTTZ, MVT::v16i8, Expand); 685 setOperationAction(ISD::CTTZ, MVT::v8i16, Expand); 686 setOperationAction(ISD::CTTZ, MVT::v4i32, Expand); 687 setOperationAction(ISD::CTTZ, MVT::v2i64, Expand); 688 689 // AArch64 doesn't have MUL.2d: 690 setOperationAction(ISD::MUL, MVT::v2i64, Expand); 691 // Custom handling for some quad-vector types to detect MULL. 692 setOperationAction(ISD::MUL, MVT::v8i16, Custom); 693 setOperationAction(ISD::MUL, MVT::v4i32, Custom); 694 setOperationAction(ISD::MUL, MVT::v2i64, Custom); 695 696 // Vector reductions 697 for (MVT VT : MVT::integer_valuetypes()) { 698 setOperationAction(ISD::VECREDUCE_ADD, VT, Custom); 699 setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom); 700 setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom); 701 setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom); 702 setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom); 703 } 704 for (MVT VT : MVT::fp_valuetypes()) { 705 setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom); 706 setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom); 707 } 708 709 setOperationAction(ISD::ANY_EXTEND, MVT::v4i32, Legal); 710 setTruncStoreAction(MVT::v2i32, MVT::v2i16, Expand); 711 // Likewise, narrowing and extending vector loads/stores aren't handled 712 // directly. 713 for (MVT VT : MVT::vector_valuetypes()) { 714 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand); 715 716 if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32) { 717 setOperationAction(ISD::MULHS, VT, Custom); 718 setOperationAction(ISD::MULHU, VT, Custom); 719 } else { 720 setOperationAction(ISD::MULHS, VT, Expand); 721 setOperationAction(ISD::MULHU, VT, Expand); 722 } 723 setOperationAction(ISD::SMUL_LOHI, VT, Expand); 724 setOperationAction(ISD::UMUL_LOHI, VT, Expand); 725 726 setOperationAction(ISD::BSWAP, VT, Expand); 727 728 for (MVT InnerVT : MVT::vector_valuetypes()) { 729 setTruncStoreAction(VT, InnerVT, Expand); 730 setLoadExtAction(ISD::SEXTLOAD, VT, InnerVT, Expand); 731 setLoadExtAction(ISD::ZEXTLOAD, VT, InnerVT, Expand); 732 setLoadExtAction(ISD::EXTLOAD, VT, InnerVT, Expand); 733 } 734 } 735 736 // AArch64 has implementations of a lot of rounding-like FP operations. 737 for (MVT Ty : {MVT::v2f32, MVT::v4f32, MVT::v2f64}) { 738 setOperationAction(ISD::FFLOOR, Ty, Legal); 739 setOperationAction(ISD::FNEARBYINT, Ty, Legal); 740 setOperationAction(ISD::FCEIL, Ty, Legal); 741 setOperationAction(ISD::FRINT, Ty, Legal); 742 setOperationAction(ISD::FTRUNC, Ty, Legal); 743 setOperationAction(ISD::FROUND, Ty, Legal); 744 } 745 746 setTruncStoreAction(MVT::v4i16, MVT::v4i8, Custom); 747 } 748 749 PredictableSelectIsExpensive = Subtarget->predictableSelectIsExpensive(); 750 } 751 752 void AArch64TargetLowering::addTypeForNEON(MVT VT, MVT PromotedBitwiseVT) { 753 assert(VT.isVector() && "VT should be a vector type"); 754 755 if (VT.isFloatingPoint()) { 756 MVT PromoteTo = EVT(VT).changeVectorElementTypeToInteger().getSimpleVT(); 757 setOperationPromotedToType(ISD::LOAD, VT, PromoteTo); 758 setOperationPromotedToType(ISD::STORE, VT, PromoteTo); 759 } 760 761 // Mark vector float intrinsics as expand. 762 if (VT == MVT::v2f32 || VT == MVT::v4f32 || VT == MVT::v2f64) { 763 setOperationAction(ISD::FSIN, VT, Expand); 764 setOperationAction(ISD::FCOS, VT, Expand); 765 setOperationAction(ISD::FPOW, VT, Expand); 766 setOperationAction(ISD::FLOG, VT, Expand); 767 setOperationAction(ISD::FLOG2, VT, Expand); 768 setOperationAction(ISD::FLOG10, VT, Expand); 769 setOperationAction(ISD::FEXP, VT, Expand); 770 setOperationAction(ISD::FEXP2, VT, Expand); 771 772 // But we do support custom-lowering for FCOPYSIGN. 773 setOperationAction(ISD::FCOPYSIGN, VT, Custom); 774 } 775 776 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom); 777 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom); 778 setOperationAction(ISD::BUILD_VECTOR, VT, Custom); 779 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom); 780 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom); 781 setOperationAction(ISD::SRA, VT, Custom); 782 setOperationAction(ISD::SRL, VT, Custom); 783 setOperationAction(ISD::SHL, VT, Custom); 784 setOperationAction(ISD::AND, VT, Custom); 785 setOperationAction(ISD::OR, VT, Custom); 786 setOperationAction(ISD::SETCC, VT, Custom); 787 setOperationAction(ISD::CONCAT_VECTORS, VT, Legal); 788 789 setOperationAction(ISD::SELECT, VT, Expand); 790 setOperationAction(ISD::SELECT_CC, VT, Expand); 791 setOperationAction(ISD::VSELECT, VT, Expand); 792 for (MVT InnerVT : MVT::all_valuetypes()) 793 setLoadExtAction(ISD::EXTLOAD, InnerVT, VT, Expand); 794 795 // CNT supports only B element sizes. 796 if (VT != MVT::v8i8 && VT != MVT::v16i8) 797 setOperationAction(ISD::CTPOP, VT, Expand); 798 799 setOperationAction(ISD::UDIV, VT, Expand); 800 setOperationAction(ISD::SDIV, VT, Expand); 801 setOperationAction(ISD::UREM, VT, Expand); 802 setOperationAction(ISD::SREM, VT, Expand); 803 setOperationAction(ISD::FREM, VT, Expand); 804 805 setOperationAction(ISD::FP_TO_SINT, VT, Custom); 806 setOperationAction(ISD::FP_TO_UINT, VT, Custom); 807 808 if (!VT.isFloatingPoint()) 809 setOperationAction(ISD::ABS, VT, Legal); 810 811 // [SU][MIN|MAX] are available for all NEON types apart from i64. 812 if (!VT.isFloatingPoint() && VT != MVT::v2i64 && VT != MVT::v1i64) 813 for (unsigned Opcode : {ISD::SMIN, ISD::SMAX, ISD::UMIN, ISD::UMAX}) 814 setOperationAction(Opcode, VT, Legal); 815 816 // F[MIN|MAX][NUM|NAN] are available for all FP NEON types. 817 if (VT.isFloatingPoint() && 818 (VT.getVectorElementType() != MVT::f16 || Subtarget->hasFullFP16())) 819 for (unsigned Opcode : {ISD::FMINNAN, ISD::FMAXNAN, 820 ISD::FMINNUM, ISD::FMAXNUM}) 821 setOperationAction(Opcode, VT, Legal); 822 823 if (Subtarget->isLittleEndian()) { 824 for (unsigned im = (unsigned)ISD::PRE_INC; 825 im != (unsigned)ISD::LAST_INDEXED_MODE; ++im) { 826 setIndexedLoadAction(im, VT, Legal); 827 setIndexedStoreAction(im, VT, Legal); 828 } 829 } 830 } 831 832 void AArch64TargetLowering::addDRTypeForNEON(MVT VT) { 833 addRegisterClass(VT, &AArch64::FPR64RegClass); 834 addTypeForNEON(VT, MVT::v2i32); 835 } 836 837 void AArch64TargetLowering::addQRTypeForNEON(MVT VT) { 838 addRegisterClass(VT, &AArch64::FPR128RegClass); 839 addTypeForNEON(VT, MVT::v4i32); 840 } 841 842 EVT AArch64TargetLowering::getSetCCResultType(const DataLayout &, LLVMContext &, 843 EVT VT) const { 844 if (!VT.isVector()) 845 return MVT::i32; 846 return VT.changeVectorElementTypeToInteger(); 847 } 848 849 static bool optimizeLogicalImm(SDValue Op, unsigned Size, uint64_t Imm, 850 const APInt &Demanded, 851 TargetLowering::TargetLoweringOpt &TLO, 852 unsigned NewOpc) { 853 uint64_t OldImm = Imm, NewImm, Enc; 854 uint64_t Mask = ((uint64_t)(-1LL) >> (64 - Size)), OrigMask = Mask; 855 856 // Return if the immediate is already all zeros, all ones, a bimm32 or a 857 // bimm64. 858 if (Imm == 0 || Imm == Mask || 859 AArch64_AM::isLogicalImmediate(Imm & Mask, Size)) 860 return false; 861 862 unsigned EltSize = Size; 863 uint64_t DemandedBits = Demanded.getZExtValue(); 864 865 // Clear bits that are not demanded. 866 Imm &= DemandedBits; 867 868 while (true) { 869 // The goal here is to set the non-demanded bits in a way that minimizes 870 // the number of switching between 0 and 1. In order to achieve this goal, 871 // we set the non-demanded bits to the value of the preceding demanded bits. 872 // For example, if we have an immediate 0bx10xx0x1 ('x' indicates a 873 // non-demanded bit), we copy bit0 (1) to the least significant 'x', 874 // bit2 (0) to 'xx', and bit6 (1) to the most significant 'x'. 875 // The final result is 0b11000011. 876 uint64_t NonDemandedBits = ~DemandedBits; 877 uint64_t InvertedImm = ~Imm & DemandedBits; 878 uint64_t RotatedImm = 879 ((InvertedImm << 1) | (InvertedImm >> (EltSize - 1) & 1)) & 880 NonDemandedBits; 881 uint64_t Sum = RotatedImm + NonDemandedBits; 882 bool Carry = NonDemandedBits & ~Sum & (1ULL << (EltSize - 1)); 883 uint64_t Ones = (Sum + Carry) & NonDemandedBits; 884 NewImm = (Imm | Ones) & Mask; 885 886 // If NewImm or its bitwise NOT is a shifted mask, it is a bitmask immediate 887 // or all-ones or all-zeros, in which case we can stop searching. Otherwise, 888 // we halve the element size and continue the search. 889 if (isShiftedMask_64(NewImm) || isShiftedMask_64(~(NewImm | ~Mask))) 890 break; 891 892 // We cannot shrink the element size any further if it is 2-bits. 893 if (EltSize == 2) 894 return false; 895 896 EltSize /= 2; 897 Mask >>= EltSize; 898 uint64_t Hi = Imm >> EltSize, DemandedBitsHi = DemandedBits >> EltSize; 899 900 // Return if there is mismatch in any of the demanded bits of Imm and Hi. 901 if (((Imm ^ Hi) & (DemandedBits & DemandedBitsHi) & Mask) != 0) 902 return false; 903 904 // Merge the upper and lower halves of Imm and DemandedBits. 905 Imm |= Hi; 906 DemandedBits |= DemandedBitsHi; 907 } 908 909 ++NumOptimizedImms; 910 911 // Replicate the element across the register width. 912 while (EltSize < Size) { 913 NewImm |= NewImm << EltSize; 914 EltSize *= 2; 915 } 916 917 (void)OldImm; 918 assert(((OldImm ^ NewImm) & Demanded.getZExtValue()) == 0 && 919 "demanded bits should never be altered"); 920 assert(OldImm != NewImm && "the new imm shouldn't be equal to the old imm"); 921 922 // Create the new constant immediate node. 923 EVT VT = Op.getValueType(); 924 SDLoc DL(Op); 925 SDValue New; 926 927 // If the new constant immediate is all-zeros or all-ones, let the target 928 // independent DAG combine optimize this node. 929 if (NewImm == 0 || NewImm == OrigMask) { 930 New = TLO.DAG.getNode(Op.getOpcode(), DL, VT, Op.getOperand(0), 931 TLO.DAG.getConstant(NewImm, DL, VT)); 932 // Otherwise, create a machine node so that target independent DAG combine 933 // doesn't undo this optimization. 934 } else { 935 Enc = AArch64_AM::encodeLogicalImmediate(NewImm, Size); 936 SDValue EncConst = TLO.DAG.getTargetConstant(Enc, DL, VT); 937 New = SDValue( 938 TLO.DAG.getMachineNode(NewOpc, DL, VT, Op.getOperand(0), EncConst), 0); 939 } 940 941 return TLO.CombineTo(Op, New); 942 } 943 944 bool AArch64TargetLowering::targetShrinkDemandedConstant( 945 SDValue Op, const APInt &Demanded, TargetLoweringOpt &TLO) const { 946 // Delay this optimization to as late as possible. 947 if (!TLO.LegalOps) 948 return false; 949 950 if (!EnableOptimizeLogicalImm) 951 return false; 952 953 EVT VT = Op.getValueType(); 954 if (VT.isVector()) 955 return false; 956 957 unsigned Size = VT.getSizeInBits(); 958 assert((Size == 32 || Size == 64) && 959 "i32 or i64 is expected after legalization."); 960 961 // Exit early if we demand all bits. 962 if (Demanded.countPopulation() == Size) 963 return false; 964 965 unsigned NewOpc; 966 switch (Op.getOpcode()) { 967 default: 968 return false; 969 case ISD::AND: 970 NewOpc = Size == 32 ? AArch64::ANDWri : AArch64::ANDXri; 971 break; 972 case ISD::OR: 973 NewOpc = Size == 32 ? AArch64::ORRWri : AArch64::ORRXri; 974 break; 975 case ISD::XOR: 976 NewOpc = Size == 32 ? AArch64::EORWri : AArch64::EORXri; 977 break; 978 } 979 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 980 if (!C) 981 return false; 982 uint64_t Imm = C->getZExtValue(); 983 return optimizeLogicalImm(Op, Size, Imm, Demanded, TLO, NewOpc); 984 } 985 986 /// computeKnownBitsForTargetNode - Determine which of the bits specified in 987 /// Mask are known to be either zero or one and return them Known. 988 void AArch64TargetLowering::computeKnownBitsForTargetNode( 989 const SDValue Op, KnownBits &Known, 990 const APInt &DemandedElts, const SelectionDAG &DAG, unsigned Depth) const { 991 switch (Op.getOpcode()) { 992 default: 993 break; 994 case AArch64ISD::CSEL: { 995 KnownBits Known2; 996 DAG.computeKnownBits(Op->getOperand(0), Known, Depth + 1); 997 DAG.computeKnownBits(Op->getOperand(1), Known2, Depth + 1); 998 Known.Zero &= Known2.Zero; 999 Known.One &= Known2.One; 1000 break; 1001 } 1002 case ISD::INTRINSIC_W_CHAIN: { 1003 ConstantSDNode *CN = cast<ConstantSDNode>(Op->getOperand(1)); 1004 Intrinsic::ID IntID = static_cast<Intrinsic::ID>(CN->getZExtValue()); 1005 switch (IntID) { 1006 default: return; 1007 case Intrinsic::aarch64_ldaxr: 1008 case Intrinsic::aarch64_ldxr: { 1009 unsigned BitWidth = Known.getBitWidth(); 1010 EVT VT = cast<MemIntrinsicSDNode>(Op)->getMemoryVT(); 1011 unsigned MemBits = VT.getScalarSizeInBits(); 1012 Known.Zero |= APInt::getHighBitsSet(BitWidth, BitWidth - MemBits); 1013 return; 1014 } 1015 } 1016 break; 1017 } 1018 case ISD::INTRINSIC_WO_CHAIN: 1019 case ISD::INTRINSIC_VOID: { 1020 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 1021 switch (IntNo) { 1022 default: 1023 break; 1024 case Intrinsic::aarch64_neon_umaxv: 1025 case Intrinsic::aarch64_neon_uminv: { 1026 // Figure out the datatype of the vector operand. The UMINV instruction 1027 // will zero extend the result, so we can mark as known zero all the 1028 // bits larger than the element datatype. 32-bit or larget doesn't need 1029 // this as those are legal types and will be handled by isel directly. 1030 MVT VT = Op.getOperand(1).getValueType().getSimpleVT(); 1031 unsigned BitWidth = Known.getBitWidth(); 1032 if (VT == MVT::v8i8 || VT == MVT::v16i8) { 1033 assert(BitWidth >= 8 && "Unexpected width!"); 1034 APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 8); 1035 Known.Zero |= Mask; 1036 } else if (VT == MVT::v4i16 || VT == MVT::v8i16) { 1037 assert(BitWidth >= 16 && "Unexpected width!"); 1038 APInt Mask = APInt::getHighBitsSet(BitWidth, BitWidth - 16); 1039 Known.Zero |= Mask; 1040 } 1041 break; 1042 } break; 1043 } 1044 } 1045 } 1046 } 1047 1048 MVT AArch64TargetLowering::getScalarShiftAmountTy(const DataLayout &DL, 1049 EVT) const { 1050 return MVT::i64; 1051 } 1052 1053 bool AArch64TargetLowering::allowsMisalignedMemoryAccesses(EVT VT, 1054 unsigned AddrSpace, 1055 unsigned Align, 1056 bool *Fast) const { 1057 if (Subtarget->requiresStrictAlign()) 1058 return false; 1059 1060 if (Fast) { 1061 // Some CPUs are fine with unaligned stores except for 128-bit ones. 1062 *Fast = !Subtarget->isMisaligned128StoreSlow() || VT.getStoreSize() != 16 || 1063 // See comments in performSTORECombine() for more details about 1064 // these conditions. 1065 1066 // Code that uses clang vector extensions can mark that it 1067 // wants unaligned accesses to be treated as fast by 1068 // underspecifying alignment to be 1 or 2. 1069 Align <= 2 || 1070 1071 // Disregard v2i64. Memcpy lowering produces those and splitting 1072 // them regresses performance on micro-benchmarks and olden/bh. 1073 VT == MVT::v2i64; 1074 } 1075 return true; 1076 } 1077 1078 FastISel * 1079 AArch64TargetLowering::createFastISel(FunctionLoweringInfo &funcInfo, 1080 const TargetLibraryInfo *libInfo) const { 1081 return AArch64::createFastISel(funcInfo, libInfo); 1082 } 1083 1084 const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const { 1085 switch ((AArch64ISD::NodeType)Opcode) { 1086 case AArch64ISD::FIRST_NUMBER: break; 1087 case AArch64ISD::CALL: return "AArch64ISD::CALL"; 1088 case AArch64ISD::ADRP: return "AArch64ISD::ADRP"; 1089 case AArch64ISD::ADDlow: return "AArch64ISD::ADDlow"; 1090 case AArch64ISD::LOADgot: return "AArch64ISD::LOADgot"; 1091 case AArch64ISD::RET_FLAG: return "AArch64ISD::RET_FLAG"; 1092 case AArch64ISD::BRCOND: return "AArch64ISD::BRCOND"; 1093 case AArch64ISD::CSEL: return "AArch64ISD::CSEL"; 1094 case AArch64ISD::FCSEL: return "AArch64ISD::FCSEL"; 1095 case AArch64ISD::CSINV: return "AArch64ISD::CSINV"; 1096 case AArch64ISD::CSNEG: return "AArch64ISD::CSNEG"; 1097 case AArch64ISD::CSINC: return "AArch64ISD::CSINC"; 1098 case AArch64ISD::THREAD_POINTER: return "AArch64ISD::THREAD_POINTER"; 1099 case AArch64ISD::TLSDESC_CALLSEQ: return "AArch64ISD::TLSDESC_CALLSEQ"; 1100 case AArch64ISD::ADC: return "AArch64ISD::ADC"; 1101 case AArch64ISD::SBC: return "AArch64ISD::SBC"; 1102 case AArch64ISD::ADDS: return "AArch64ISD::ADDS"; 1103 case AArch64ISD::SUBS: return "AArch64ISD::SUBS"; 1104 case AArch64ISD::ADCS: return "AArch64ISD::ADCS"; 1105 case AArch64ISD::SBCS: return "AArch64ISD::SBCS"; 1106 case AArch64ISD::ANDS: return "AArch64ISD::ANDS"; 1107 case AArch64ISD::CCMP: return "AArch64ISD::CCMP"; 1108 case AArch64ISD::CCMN: return "AArch64ISD::CCMN"; 1109 case AArch64ISD::FCCMP: return "AArch64ISD::FCCMP"; 1110 case AArch64ISD::FCMP: return "AArch64ISD::FCMP"; 1111 case AArch64ISD::DUP: return "AArch64ISD::DUP"; 1112 case AArch64ISD::DUPLANE8: return "AArch64ISD::DUPLANE8"; 1113 case AArch64ISD::DUPLANE16: return "AArch64ISD::DUPLANE16"; 1114 case AArch64ISD::DUPLANE32: return "AArch64ISD::DUPLANE32"; 1115 case AArch64ISD::DUPLANE64: return "AArch64ISD::DUPLANE64"; 1116 case AArch64ISD::MOVI: return "AArch64ISD::MOVI"; 1117 case AArch64ISD::MOVIshift: return "AArch64ISD::MOVIshift"; 1118 case AArch64ISD::MOVIedit: return "AArch64ISD::MOVIedit"; 1119 case AArch64ISD::MOVImsl: return "AArch64ISD::MOVImsl"; 1120 case AArch64ISD::FMOV: return "AArch64ISD::FMOV"; 1121 case AArch64ISD::MVNIshift: return "AArch64ISD::MVNIshift"; 1122 case AArch64ISD::MVNImsl: return "AArch64ISD::MVNImsl"; 1123 case AArch64ISD::BICi: return "AArch64ISD::BICi"; 1124 case AArch64ISD::ORRi: return "AArch64ISD::ORRi"; 1125 case AArch64ISD::BSL: return "AArch64ISD::BSL"; 1126 case AArch64ISD::NEG: return "AArch64ISD::NEG"; 1127 case AArch64ISD::EXTR: return "AArch64ISD::EXTR"; 1128 case AArch64ISD::ZIP1: return "AArch64ISD::ZIP1"; 1129 case AArch64ISD::ZIP2: return "AArch64ISD::ZIP2"; 1130 case AArch64ISD::UZP1: return "AArch64ISD::UZP1"; 1131 case AArch64ISD::UZP2: return "AArch64ISD::UZP2"; 1132 case AArch64ISD::TRN1: return "AArch64ISD::TRN1"; 1133 case AArch64ISD::TRN2: return "AArch64ISD::TRN2"; 1134 case AArch64ISD::REV16: return "AArch64ISD::REV16"; 1135 case AArch64ISD::REV32: return "AArch64ISD::REV32"; 1136 case AArch64ISD::REV64: return "AArch64ISD::REV64"; 1137 case AArch64ISD::EXT: return "AArch64ISD::EXT"; 1138 case AArch64ISD::VSHL: return "AArch64ISD::VSHL"; 1139 case AArch64ISD::VLSHR: return "AArch64ISD::VLSHR"; 1140 case AArch64ISD::VASHR: return "AArch64ISD::VASHR"; 1141 case AArch64ISD::CMEQ: return "AArch64ISD::CMEQ"; 1142 case AArch64ISD::CMGE: return "AArch64ISD::CMGE"; 1143 case AArch64ISD::CMGT: return "AArch64ISD::CMGT"; 1144 case AArch64ISD::CMHI: return "AArch64ISD::CMHI"; 1145 case AArch64ISD::CMHS: return "AArch64ISD::CMHS"; 1146 case AArch64ISD::FCMEQ: return "AArch64ISD::FCMEQ"; 1147 case AArch64ISD::FCMGE: return "AArch64ISD::FCMGE"; 1148 case AArch64ISD::FCMGT: return "AArch64ISD::FCMGT"; 1149 case AArch64ISD::CMEQz: return "AArch64ISD::CMEQz"; 1150 case AArch64ISD::CMGEz: return "AArch64ISD::CMGEz"; 1151 case AArch64ISD::CMGTz: return "AArch64ISD::CMGTz"; 1152 case AArch64ISD::CMLEz: return "AArch64ISD::CMLEz"; 1153 case AArch64ISD::CMLTz: return "AArch64ISD::CMLTz"; 1154 case AArch64ISD::FCMEQz: return "AArch64ISD::FCMEQz"; 1155 case AArch64ISD::FCMGEz: return "AArch64ISD::FCMGEz"; 1156 case AArch64ISD::FCMGTz: return "AArch64ISD::FCMGTz"; 1157 case AArch64ISD::FCMLEz: return "AArch64ISD::FCMLEz"; 1158 case AArch64ISD::FCMLTz: return "AArch64ISD::FCMLTz"; 1159 case AArch64ISD::SADDV: return "AArch64ISD::SADDV"; 1160 case AArch64ISD::UADDV: return "AArch64ISD::UADDV"; 1161 case AArch64ISD::SMINV: return "AArch64ISD::SMINV"; 1162 case AArch64ISD::UMINV: return "AArch64ISD::UMINV"; 1163 case AArch64ISD::SMAXV: return "AArch64ISD::SMAXV"; 1164 case AArch64ISD::UMAXV: return "AArch64ISD::UMAXV"; 1165 case AArch64ISD::NOT: return "AArch64ISD::NOT"; 1166 case AArch64ISD::BIT: return "AArch64ISD::BIT"; 1167 case AArch64ISD::CBZ: return "AArch64ISD::CBZ"; 1168 case AArch64ISD::CBNZ: return "AArch64ISD::CBNZ"; 1169 case AArch64ISD::TBZ: return "AArch64ISD::TBZ"; 1170 case AArch64ISD::TBNZ: return "AArch64ISD::TBNZ"; 1171 case AArch64ISD::TC_RETURN: return "AArch64ISD::TC_RETURN"; 1172 case AArch64ISD::PREFETCH: return "AArch64ISD::PREFETCH"; 1173 case AArch64ISD::SITOF: return "AArch64ISD::SITOF"; 1174 case AArch64ISD::UITOF: return "AArch64ISD::UITOF"; 1175 case AArch64ISD::NVCAST: return "AArch64ISD::NVCAST"; 1176 case AArch64ISD::SQSHL_I: return "AArch64ISD::SQSHL_I"; 1177 case AArch64ISD::UQSHL_I: return "AArch64ISD::UQSHL_I"; 1178 case AArch64ISD::SRSHR_I: return "AArch64ISD::SRSHR_I"; 1179 case AArch64ISD::URSHR_I: return "AArch64ISD::URSHR_I"; 1180 case AArch64ISD::SQSHLU_I: return "AArch64ISD::SQSHLU_I"; 1181 case AArch64ISD::WrapperLarge: return "AArch64ISD::WrapperLarge"; 1182 case AArch64ISD::LD2post: return "AArch64ISD::LD2post"; 1183 case AArch64ISD::LD3post: return "AArch64ISD::LD3post"; 1184 case AArch64ISD::LD4post: return "AArch64ISD::LD4post"; 1185 case AArch64ISD::ST2post: return "AArch64ISD::ST2post"; 1186 case AArch64ISD::ST3post: return "AArch64ISD::ST3post"; 1187 case AArch64ISD::ST4post: return "AArch64ISD::ST4post"; 1188 case AArch64ISD::LD1x2post: return "AArch64ISD::LD1x2post"; 1189 case AArch64ISD::LD1x3post: return "AArch64ISD::LD1x3post"; 1190 case AArch64ISD::LD1x4post: return "AArch64ISD::LD1x4post"; 1191 case AArch64ISD::ST1x2post: return "AArch64ISD::ST1x2post"; 1192 case AArch64ISD::ST1x3post: return "AArch64ISD::ST1x3post"; 1193 case AArch64ISD::ST1x4post: return "AArch64ISD::ST1x4post"; 1194 case AArch64ISD::LD1DUPpost: return "AArch64ISD::LD1DUPpost"; 1195 case AArch64ISD::LD2DUPpost: return "AArch64ISD::LD2DUPpost"; 1196 case AArch64ISD::LD3DUPpost: return "AArch64ISD::LD3DUPpost"; 1197 case AArch64ISD::LD4DUPpost: return "AArch64ISD::LD4DUPpost"; 1198 case AArch64ISD::LD1LANEpost: return "AArch64ISD::LD1LANEpost"; 1199 case AArch64ISD::LD2LANEpost: return "AArch64ISD::LD2LANEpost"; 1200 case AArch64ISD::LD3LANEpost: return "AArch64ISD::LD3LANEpost"; 1201 case AArch64ISD::LD4LANEpost: return "AArch64ISD::LD4LANEpost"; 1202 case AArch64ISD::ST2LANEpost: return "AArch64ISD::ST2LANEpost"; 1203 case AArch64ISD::ST3LANEpost: return "AArch64ISD::ST3LANEpost"; 1204 case AArch64ISD::ST4LANEpost: return "AArch64ISD::ST4LANEpost"; 1205 case AArch64ISD::SMULL: return "AArch64ISD::SMULL"; 1206 case AArch64ISD::UMULL: return "AArch64ISD::UMULL"; 1207 case AArch64ISD::FRECPE: return "AArch64ISD::FRECPE"; 1208 case AArch64ISD::FRECPS: return "AArch64ISD::FRECPS"; 1209 case AArch64ISD::FRSQRTE: return "AArch64ISD::FRSQRTE"; 1210 case AArch64ISD::FRSQRTS: return "AArch64ISD::FRSQRTS"; 1211 } 1212 return nullptr; 1213 } 1214 1215 MachineBasicBlock * 1216 AArch64TargetLowering::EmitF128CSEL(MachineInstr &MI, 1217 MachineBasicBlock *MBB) const { 1218 // We materialise the F128CSEL pseudo-instruction as some control flow and a 1219 // phi node: 1220 1221 // OrigBB: 1222 // [... previous instrs leading to comparison ...] 1223 // b.ne TrueBB 1224 // b EndBB 1225 // TrueBB: 1226 // ; Fallthrough 1227 // EndBB: 1228 // Dest = PHI [IfTrue, TrueBB], [IfFalse, OrigBB] 1229 1230 MachineFunction *MF = MBB->getParent(); 1231 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 1232 const BasicBlock *LLVM_BB = MBB->getBasicBlock(); 1233 DebugLoc DL = MI.getDebugLoc(); 1234 MachineFunction::iterator It = ++MBB->getIterator(); 1235 1236 unsigned DestReg = MI.getOperand(0).getReg(); 1237 unsigned IfTrueReg = MI.getOperand(1).getReg(); 1238 unsigned IfFalseReg = MI.getOperand(2).getReg(); 1239 unsigned CondCode = MI.getOperand(3).getImm(); 1240 bool NZCVKilled = MI.getOperand(4).isKill(); 1241 1242 MachineBasicBlock *TrueBB = MF->CreateMachineBasicBlock(LLVM_BB); 1243 MachineBasicBlock *EndBB = MF->CreateMachineBasicBlock(LLVM_BB); 1244 MF->insert(It, TrueBB); 1245 MF->insert(It, EndBB); 1246 1247 // Transfer rest of current basic-block to EndBB 1248 EndBB->splice(EndBB->begin(), MBB, std::next(MachineBasicBlock::iterator(MI)), 1249 MBB->end()); 1250 EndBB->transferSuccessorsAndUpdatePHIs(MBB); 1251 1252 BuildMI(MBB, DL, TII->get(AArch64::Bcc)).addImm(CondCode).addMBB(TrueBB); 1253 BuildMI(MBB, DL, TII->get(AArch64::B)).addMBB(EndBB); 1254 MBB->addSuccessor(TrueBB); 1255 MBB->addSuccessor(EndBB); 1256 1257 // TrueBB falls through to the end. 1258 TrueBB->addSuccessor(EndBB); 1259 1260 if (!NZCVKilled) { 1261 TrueBB->addLiveIn(AArch64::NZCV); 1262 EndBB->addLiveIn(AArch64::NZCV); 1263 } 1264 1265 BuildMI(*EndBB, EndBB->begin(), DL, TII->get(AArch64::PHI), DestReg) 1266 .addReg(IfTrueReg) 1267 .addMBB(TrueBB) 1268 .addReg(IfFalseReg) 1269 .addMBB(MBB); 1270 1271 MI.eraseFromParent(); 1272 return EndBB; 1273 } 1274 1275 MachineBasicBlock *AArch64TargetLowering::EmitInstrWithCustomInserter( 1276 MachineInstr &MI, MachineBasicBlock *BB) const { 1277 switch (MI.getOpcode()) { 1278 default: 1279 #ifndef NDEBUG 1280 MI.dump(); 1281 #endif 1282 llvm_unreachable("Unexpected instruction for custom inserter!"); 1283 1284 case AArch64::F128CSEL: 1285 return EmitF128CSEL(MI, BB); 1286 1287 case TargetOpcode::STACKMAP: 1288 case TargetOpcode::PATCHPOINT: 1289 return emitPatchPoint(MI, BB); 1290 } 1291 } 1292 1293 //===----------------------------------------------------------------------===// 1294 // AArch64 Lowering private implementation. 1295 //===----------------------------------------------------------------------===// 1296 1297 //===----------------------------------------------------------------------===// 1298 // Lowering Code 1299 //===----------------------------------------------------------------------===// 1300 1301 /// changeIntCCToAArch64CC - Convert a DAG integer condition code to an AArch64 1302 /// CC 1303 static AArch64CC::CondCode changeIntCCToAArch64CC(ISD::CondCode CC) { 1304 switch (CC) { 1305 default: 1306 llvm_unreachable("Unknown condition code!"); 1307 case ISD::SETNE: 1308 return AArch64CC::NE; 1309 case ISD::SETEQ: 1310 return AArch64CC::EQ; 1311 case ISD::SETGT: 1312 return AArch64CC::GT; 1313 case ISD::SETGE: 1314 return AArch64CC::GE; 1315 case ISD::SETLT: 1316 return AArch64CC::LT; 1317 case ISD::SETLE: 1318 return AArch64CC::LE; 1319 case ISD::SETUGT: 1320 return AArch64CC::HI; 1321 case ISD::SETUGE: 1322 return AArch64CC::HS; 1323 case ISD::SETULT: 1324 return AArch64CC::LO; 1325 case ISD::SETULE: 1326 return AArch64CC::LS; 1327 } 1328 } 1329 1330 /// changeFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 CC. 1331 static void changeFPCCToAArch64CC(ISD::CondCode CC, 1332 AArch64CC::CondCode &CondCode, 1333 AArch64CC::CondCode &CondCode2) { 1334 CondCode2 = AArch64CC::AL; 1335 switch (CC) { 1336 default: 1337 llvm_unreachable("Unknown FP condition!"); 1338 case ISD::SETEQ: 1339 case ISD::SETOEQ: 1340 CondCode = AArch64CC::EQ; 1341 break; 1342 case ISD::SETGT: 1343 case ISD::SETOGT: 1344 CondCode = AArch64CC::GT; 1345 break; 1346 case ISD::SETGE: 1347 case ISD::SETOGE: 1348 CondCode = AArch64CC::GE; 1349 break; 1350 case ISD::SETOLT: 1351 CondCode = AArch64CC::MI; 1352 break; 1353 case ISD::SETOLE: 1354 CondCode = AArch64CC::LS; 1355 break; 1356 case ISD::SETONE: 1357 CondCode = AArch64CC::MI; 1358 CondCode2 = AArch64CC::GT; 1359 break; 1360 case ISD::SETO: 1361 CondCode = AArch64CC::VC; 1362 break; 1363 case ISD::SETUO: 1364 CondCode = AArch64CC::VS; 1365 break; 1366 case ISD::SETUEQ: 1367 CondCode = AArch64CC::EQ; 1368 CondCode2 = AArch64CC::VS; 1369 break; 1370 case ISD::SETUGT: 1371 CondCode = AArch64CC::HI; 1372 break; 1373 case ISD::SETUGE: 1374 CondCode = AArch64CC::PL; 1375 break; 1376 case ISD::SETLT: 1377 case ISD::SETULT: 1378 CondCode = AArch64CC::LT; 1379 break; 1380 case ISD::SETLE: 1381 case ISD::SETULE: 1382 CondCode = AArch64CC::LE; 1383 break; 1384 case ISD::SETNE: 1385 case ISD::SETUNE: 1386 CondCode = AArch64CC::NE; 1387 break; 1388 } 1389 } 1390 1391 /// Convert a DAG fp condition code to an AArch64 CC. 1392 /// This differs from changeFPCCToAArch64CC in that it returns cond codes that 1393 /// should be AND'ed instead of OR'ed. 1394 static void changeFPCCToANDAArch64CC(ISD::CondCode CC, 1395 AArch64CC::CondCode &CondCode, 1396 AArch64CC::CondCode &CondCode2) { 1397 CondCode2 = AArch64CC::AL; 1398 switch (CC) { 1399 default: 1400 changeFPCCToAArch64CC(CC, CondCode, CondCode2); 1401 assert(CondCode2 == AArch64CC::AL); 1402 break; 1403 case ISD::SETONE: 1404 // (a one b) 1405 // == ((a olt b) || (a ogt b)) 1406 // == ((a ord b) && (a une b)) 1407 CondCode = AArch64CC::VC; 1408 CondCode2 = AArch64CC::NE; 1409 break; 1410 case ISD::SETUEQ: 1411 // (a ueq b) 1412 // == ((a uno b) || (a oeq b)) 1413 // == ((a ule b) && (a uge b)) 1414 CondCode = AArch64CC::PL; 1415 CondCode2 = AArch64CC::LE; 1416 break; 1417 } 1418 } 1419 1420 /// changeVectorFPCCToAArch64CC - Convert a DAG fp condition code to an AArch64 1421 /// CC usable with the vector instructions. Fewer operations are available 1422 /// without a real NZCV register, so we have to use less efficient combinations 1423 /// to get the same effect. 1424 static void changeVectorFPCCToAArch64CC(ISD::CondCode CC, 1425 AArch64CC::CondCode &CondCode, 1426 AArch64CC::CondCode &CondCode2, 1427 bool &Invert) { 1428 Invert = false; 1429 switch (CC) { 1430 default: 1431 // Mostly the scalar mappings work fine. 1432 changeFPCCToAArch64CC(CC, CondCode, CondCode2); 1433 break; 1434 case ISD::SETUO: 1435 Invert = true; 1436 LLVM_FALLTHROUGH; 1437 case ISD::SETO: 1438 CondCode = AArch64CC::MI; 1439 CondCode2 = AArch64CC::GE; 1440 break; 1441 case ISD::SETUEQ: 1442 case ISD::SETULT: 1443 case ISD::SETULE: 1444 case ISD::SETUGT: 1445 case ISD::SETUGE: 1446 // All of the compare-mask comparisons are ordered, but we can switch 1447 // between the two by a double inversion. E.g. ULE == !OGT. 1448 Invert = true; 1449 changeFPCCToAArch64CC(getSetCCInverse(CC, false), CondCode, CondCode2); 1450 break; 1451 } 1452 } 1453 1454 static bool isLegalArithImmed(uint64_t C) { 1455 // Matches AArch64DAGToDAGISel::SelectArithImmed(). 1456 bool IsLegal = (C >> 12 == 0) || ((C & 0xFFFULL) == 0 && C >> 24 == 0); 1457 LLVM_DEBUG(dbgs() << "Is imm " << C 1458 << " legal: " << (IsLegal ? "yes\n" : "no\n")); 1459 return IsLegal; 1460 } 1461 1462 static SDValue emitComparison(SDValue LHS, SDValue RHS, ISD::CondCode CC, 1463 const SDLoc &dl, SelectionDAG &DAG) { 1464 EVT VT = LHS.getValueType(); 1465 const bool FullFP16 = 1466 static_cast<const AArch64Subtarget &>(DAG.getSubtarget()).hasFullFP16(); 1467 1468 if (VT.isFloatingPoint()) { 1469 assert(VT != MVT::f128); 1470 if (VT == MVT::f16 && !FullFP16) { 1471 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, LHS); 1472 RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, RHS); 1473 VT = MVT::f32; 1474 } 1475 return DAG.getNode(AArch64ISD::FCMP, dl, VT, LHS, RHS); 1476 } 1477 1478 // The CMP instruction is just an alias for SUBS, and representing it as 1479 // SUBS means that it's possible to get CSE with subtract operations. 1480 // A later phase can perform the optimization of setting the destination 1481 // register to WZR/XZR if it ends up being unused. 1482 unsigned Opcode = AArch64ISD::SUBS; 1483 1484 if (RHS.getOpcode() == ISD::SUB && isNullConstant(RHS.getOperand(0)) && 1485 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 1486 // We'd like to combine a (CMP op1, (sub 0, op2) into a CMN instruction on 1487 // the grounds that "op1 - (-op2) == op1 + op2". However, the C and V flags 1488 // can be set differently by this operation. It comes down to whether 1489 // "SInt(~op2)+1 == SInt(~op2+1)" (and the same for UInt). If they are then 1490 // everything is fine. If not then the optimization is wrong. Thus general 1491 // comparisons are only valid if op2 != 0. 1492 1493 // So, finally, the only LLVM-native comparisons that don't mention C and V 1494 // are SETEQ and SETNE. They're the only ones we can safely use CMN for in 1495 // the absence of information about op2. 1496 Opcode = AArch64ISD::ADDS; 1497 RHS = RHS.getOperand(1); 1498 } else if (LHS.getOpcode() == ISD::AND && isNullConstant(RHS) && 1499 !isUnsignedIntSetCC(CC)) { 1500 // Similarly, (CMP (and X, Y), 0) can be implemented with a TST 1501 // (a.k.a. ANDS) except that the flags are only guaranteed to work for one 1502 // of the signed comparisons. 1503 Opcode = AArch64ISD::ANDS; 1504 RHS = LHS.getOperand(1); 1505 LHS = LHS.getOperand(0); 1506 } 1507 1508 return DAG.getNode(Opcode, dl, DAG.getVTList(VT, MVT_CC), LHS, RHS) 1509 .getValue(1); 1510 } 1511 1512 /// \defgroup AArch64CCMP CMP;CCMP matching 1513 /// 1514 /// These functions deal with the formation of CMP;CCMP;... sequences. 1515 /// The CCMP/CCMN/FCCMP/FCCMPE instructions allow the conditional execution of 1516 /// a comparison. They set the NZCV flags to a predefined value if their 1517 /// predicate is false. This allows to express arbitrary conjunctions, for 1518 /// example "cmp 0 (and (setCA (cmp A)) (setCB (cmp B)))" 1519 /// expressed as: 1520 /// cmp A 1521 /// ccmp B, inv(CB), CA 1522 /// check for CB flags 1523 /// 1524 /// This naturally lets us implement chains of AND operations with SETCC 1525 /// operands. And we can even implement some other situations by transforming 1526 /// them: 1527 /// - We can implement (NEG SETCC) i.e. negating a single comparison by 1528 /// negating the flags used in a CCMP/FCCMP operations. 1529 /// - We can negate the result of a whole chain of CMP/CCMP/FCCMP operations 1530 /// by negating the flags we test for afterwards. i.e. 1531 /// NEG (CMP CCMP CCCMP ...) can be implemented. 1532 /// - Note that we can only ever negate all previously processed results. 1533 /// What we can not implement by flipping the flags to test is a negation 1534 /// of two sub-trees (because the negation affects all sub-trees emitted so 1535 /// far, so the 2nd sub-tree we emit would also affect the first). 1536 /// With those tools we can implement some OR operations: 1537 /// - (OR (SETCC A) (SETCC B)) can be implemented via: 1538 /// NEG (AND (NEG (SETCC A)) (NEG (SETCC B))) 1539 /// - After transforming OR to NEG/AND combinations we may be able to use NEG 1540 /// elimination rules from earlier to implement the whole thing as a 1541 /// CCMP/FCCMP chain. 1542 /// 1543 /// As complete example: 1544 /// or (or (setCA (cmp A)) (setCB (cmp B))) 1545 /// (and (setCC (cmp C)) (setCD (cmp D)))" 1546 /// can be reassociated to: 1547 /// or (and (setCC (cmp C)) setCD (cmp D)) 1548 // (or (setCA (cmp A)) (setCB (cmp B))) 1549 /// can be transformed to: 1550 /// not (and (not (and (setCC (cmp C)) (setCD (cmp D)))) 1551 /// (and (not (setCA (cmp A)) (not (setCB (cmp B))))))" 1552 /// which can be implemented as: 1553 /// cmp C 1554 /// ccmp D, inv(CD), CC 1555 /// ccmp A, CA, inv(CD) 1556 /// ccmp B, CB, inv(CA) 1557 /// check for CB flags 1558 /// 1559 /// A counterexample is "or (and A B) (and C D)" which translates to 1560 /// not (and (not (and (not A) (not B))) (not (and (not C) (not D)))), we 1561 /// can only implement 1 of the inner (not) operations, but not both! 1562 /// @{ 1563 1564 /// Create a conditional comparison; Use CCMP, CCMN or FCCMP as appropriate. 1565 static SDValue emitConditionalComparison(SDValue LHS, SDValue RHS, 1566 ISD::CondCode CC, SDValue CCOp, 1567 AArch64CC::CondCode Predicate, 1568 AArch64CC::CondCode OutCC, 1569 const SDLoc &DL, SelectionDAG &DAG) { 1570 unsigned Opcode = 0; 1571 const bool FullFP16 = 1572 static_cast<const AArch64Subtarget &>(DAG.getSubtarget()).hasFullFP16(); 1573 1574 if (LHS.getValueType().isFloatingPoint()) { 1575 assert(LHS.getValueType() != MVT::f128); 1576 if (LHS.getValueType() == MVT::f16 && !FullFP16) { 1577 LHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, LHS); 1578 RHS = DAG.getNode(ISD::FP_EXTEND, DL, MVT::f32, RHS); 1579 } 1580 Opcode = AArch64ISD::FCCMP; 1581 } else if (RHS.getOpcode() == ISD::SUB) { 1582 SDValue SubOp0 = RHS.getOperand(0); 1583 if (isNullConstant(SubOp0) && (CC == ISD::SETEQ || CC == ISD::SETNE)) { 1584 // See emitComparison() on why we can only do this for SETEQ and SETNE. 1585 Opcode = AArch64ISD::CCMN; 1586 RHS = RHS.getOperand(1); 1587 } 1588 } 1589 if (Opcode == 0) 1590 Opcode = AArch64ISD::CCMP; 1591 1592 SDValue Condition = DAG.getConstant(Predicate, DL, MVT_CC); 1593 AArch64CC::CondCode InvOutCC = AArch64CC::getInvertedCondCode(OutCC); 1594 unsigned NZCV = AArch64CC::getNZCVToSatisfyCondCode(InvOutCC); 1595 SDValue NZCVOp = DAG.getConstant(NZCV, DL, MVT::i32); 1596 return DAG.getNode(Opcode, DL, MVT_CC, LHS, RHS, NZCVOp, Condition, CCOp); 1597 } 1598 1599 /// Returns true if @p Val is a tree of AND/OR/SETCC operations that can be 1600 /// expressed as a conjunction. See \ref AArch64CCMP. 1601 /// \param CanNegate Set to true if we can negate the whole sub-tree just by 1602 /// changing the conditions on the SETCC tests. 1603 /// (this means we can call emitConjunctionRec() with 1604 /// Negate==true on this sub-tree) 1605 /// \param MustBeFirst Set to true if this subtree needs to be negated and we 1606 /// cannot do the negation naturally. We are required to 1607 /// emit the subtree first in this case. 1608 /// \param WillNegate Is true if are called when the result of this 1609 /// subexpression must be negated. This happens when the 1610 /// outer expression is an OR. We can use this fact to know 1611 /// that we have a double negation (or (or ...) ...) that 1612 /// can be implemented for free. 1613 static bool canEmitConjunction(const SDValue Val, bool &CanNegate, 1614 bool &MustBeFirst, bool WillNegate, 1615 unsigned Depth = 0) { 1616 if (!Val.hasOneUse()) 1617 return false; 1618 unsigned Opcode = Val->getOpcode(); 1619 if (Opcode == ISD::SETCC) { 1620 if (Val->getOperand(0).getValueType() == MVT::f128) 1621 return false; 1622 CanNegate = true; 1623 MustBeFirst = false; 1624 return true; 1625 } 1626 // Protect against exponential runtime and stack overflow. 1627 if (Depth > 6) 1628 return false; 1629 if (Opcode == ISD::AND || Opcode == ISD::OR) { 1630 bool IsOR = Opcode == ISD::OR; 1631 SDValue O0 = Val->getOperand(0); 1632 SDValue O1 = Val->getOperand(1); 1633 bool CanNegateL; 1634 bool MustBeFirstL; 1635 if (!canEmitConjunction(O0, CanNegateL, MustBeFirstL, IsOR, Depth+1)) 1636 return false; 1637 bool CanNegateR; 1638 bool MustBeFirstR; 1639 if (!canEmitConjunction(O1, CanNegateR, MustBeFirstR, IsOR, Depth+1)) 1640 return false; 1641 1642 if (MustBeFirstL && MustBeFirstR) 1643 return false; 1644 1645 if (IsOR) { 1646 // For an OR expression we need to be able to naturally negate at least 1647 // one side or we cannot do the transformation at all. 1648 if (!CanNegateL && !CanNegateR) 1649 return false; 1650 // If we the result of the OR will be negated and we can naturally negate 1651 // the leafs, then this sub-tree as a whole negates naturally. 1652 CanNegate = WillNegate && CanNegateL && CanNegateR; 1653 // If we cannot naturally negate the whole sub-tree, then this must be 1654 // emitted first. 1655 MustBeFirst = !CanNegate; 1656 } else { 1657 assert(Opcode == ISD::AND && "Must be OR or AND"); 1658 // We cannot naturally negate an AND operation. 1659 CanNegate = false; 1660 MustBeFirst = MustBeFirstL || MustBeFirstR; 1661 } 1662 return true; 1663 } 1664 return false; 1665 } 1666 1667 /// Emit conjunction or disjunction tree with the CMP/FCMP followed by a chain 1668 /// of CCMP/CFCMP ops. See @ref AArch64CCMP. 1669 /// Tries to transform the given i1 producing node @p Val to a series compare 1670 /// and conditional compare operations. @returns an NZCV flags producing node 1671 /// and sets @p OutCC to the flags that should be tested or returns SDValue() if 1672 /// transformation was not possible. 1673 /// \p Negate is true if we want this sub-tree being negated just by changing 1674 /// SETCC conditions. 1675 static SDValue emitConjunctionRec(SelectionDAG &DAG, SDValue Val, 1676 AArch64CC::CondCode &OutCC, bool Negate, SDValue CCOp, 1677 AArch64CC::CondCode Predicate) { 1678 // We're at a tree leaf, produce a conditional comparison operation. 1679 unsigned Opcode = Val->getOpcode(); 1680 if (Opcode == ISD::SETCC) { 1681 SDValue LHS = Val->getOperand(0); 1682 SDValue RHS = Val->getOperand(1); 1683 ISD::CondCode CC = cast<CondCodeSDNode>(Val->getOperand(2))->get(); 1684 bool isInteger = LHS.getValueType().isInteger(); 1685 if (Negate) 1686 CC = getSetCCInverse(CC, isInteger); 1687 SDLoc DL(Val); 1688 // Determine OutCC and handle FP special case. 1689 if (isInteger) { 1690 OutCC = changeIntCCToAArch64CC(CC); 1691 } else { 1692 assert(LHS.getValueType().isFloatingPoint()); 1693 AArch64CC::CondCode ExtraCC; 1694 changeFPCCToANDAArch64CC(CC, OutCC, ExtraCC); 1695 // Some floating point conditions can't be tested with a single condition 1696 // code. Construct an additional comparison in this case. 1697 if (ExtraCC != AArch64CC::AL) { 1698 SDValue ExtraCmp; 1699 if (!CCOp.getNode()) 1700 ExtraCmp = emitComparison(LHS, RHS, CC, DL, DAG); 1701 else 1702 ExtraCmp = emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate, 1703 ExtraCC, DL, DAG); 1704 CCOp = ExtraCmp; 1705 Predicate = ExtraCC; 1706 } 1707 } 1708 1709 // Produce a normal comparison if we are first in the chain 1710 if (!CCOp) 1711 return emitComparison(LHS, RHS, CC, DL, DAG); 1712 // Otherwise produce a ccmp. 1713 return emitConditionalComparison(LHS, RHS, CC, CCOp, Predicate, OutCC, DL, 1714 DAG); 1715 } 1716 assert(Val->hasOneUse() && "Valid conjunction/disjunction tree"); 1717 1718 bool IsOR = Opcode == ISD::OR; 1719 1720 SDValue LHS = Val->getOperand(0); 1721 bool CanNegateL; 1722 bool MustBeFirstL; 1723 bool ValidL = canEmitConjunction(LHS, CanNegateL, MustBeFirstL, IsOR); 1724 assert(ValidL && "Valid conjunction/disjunction tree"); 1725 (void)ValidL; 1726 1727 SDValue RHS = Val->getOperand(1); 1728 bool CanNegateR; 1729 bool MustBeFirstR; 1730 bool ValidR = canEmitConjunction(RHS, CanNegateR, MustBeFirstR, IsOR); 1731 assert(ValidR && "Valid conjunction/disjunction tree"); 1732 (void)ValidR; 1733 1734 // Swap sub-tree that must come first to the right side. 1735 if (MustBeFirstL) { 1736 assert(!MustBeFirstR && "Valid conjunction/disjunction tree"); 1737 std::swap(LHS, RHS); 1738 std::swap(CanNegateL, CanNegateR); 1739 std::swap(MustBeFirstL, MustBeFirstR); 1740 } 1741 1742 bool NegateR; 1743 bool NegateAfterR; 1744 bool NegateL; 1745 bool NegateAfterAll; 1746 if (Opcode == ISD::OR) { 1747 // Swap the sub-tree that we can negate naturally to the left. 1748 if (!CanNegateL) { 1749 assert(CanNegateR && "at least one side must be negatable"); 1750 assert(!MustBeFirstR && "invalid conjunction/disjunction tree"); 1751 assert(!Negate); 1752 std::swap(LHS, RHS); 1753 NegateR = false; 1754 NegateAfterR = true; 1755 } else { 1756 // Negate the left sub-tree if possible, otherwise negate the result. 1757 NegateR = CanNegateR; 1758 NegateAfterR = !CanNegateR; 1759 } 1760 NegateL = true; 1761 NegateAfterAll = !Negate; 1762 } else { 1763 assert(Opcode == ISD::AND && "Valid conjunction/disjunction tree"); 1764 assert(!Negate && "Valid conjunction/disjunction tree"); 1765 1766 NegateL = false; 1767 NegateR = false; 1768 NegateAfterR = false; 1769 NegateAfterAll = false; 1770 } 1771 1772 // Emit sub-trees. 1773 AArch64CC::CondCode RHSCC; 1774 SDValue CmpR = emitConjunctionRec(DAG, RHS, RHSCC, NegateR, CCOp, Predicate); 1775 if (NegateAfterR) 1776 RHSCC = AArch64CC::getInvertedCondCode(RHSCC); 1777 SDValue CmpL = emitConjunctionRec(DAG, LHS, OutCC, NegateL, CmpR, RHSCC); 1778 if (NegateAfterAll) 1779 OutCC = AArch64CC::getInvertedCondCode(OutCC); 1780 return CmpL; 1781 } 1782 1783 /// Emit expression as a conjunction (a series of CCMP/CFCMP ops). 1784 /// In some cases this is even possible with OR operations in the expression. 1785 /// See \ref AArch64CCMP. 1786 /// \see emitConjunctionRec(). 1787 static SDValue emitConjunction(SelectionDAG &DAG, SDValue Val, 1788 AArch64CC::CondCode &OutCC) { 1789 bool DummyCanNegate; 1790 bool DummyMustBeFirst; 1791 if (!canEmitConjunction(Val, DummyCanNegate, DummyMustBeFirst, false)) 1792 return SDValue(); 1793 1794 return emitConjunctionRec(DAG, Val, OutCC, false, SDValue(), AArch64CC::AL); 1795 } 1796 1797 /// @} 1798 1799 static SDValue getAArch64Cmp(SDValue LHS, SDValue RHS, ISD::CondCode CC, 1800 SDValue &AArch64cc, SelectionDAG &DAG, 1801 const SDLoc &dl) { 1802 if (ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS.getNode())) { 1803 EVT VT = RHS.getValueType(); 1804 uint64_t C = RHSC->getZExtValue(); 1805 if (!isLegalArithImmed(C)) { 1806 // Constant does not fit, try adjusting it by one? 1807 switch (CC) { 1808 default: 1809 break; 1810 case ISD::SETLT: 1811 case ISD::SETGE: 1812 if ((VT == MVT::i32 && C != 0x80000000 && 1813 isLegalArithImmed((uint32_t)(C - 1))) || 1814 (VT == MVT::i64 && C != 0x80000000ULL && 1815 isLegalArithImmed(C - 1ULL))) { 1816 CC = (CC == ISD::SETLT) ? ISD::SETLE : ISD::SETGT; 1817 C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1; 1818 RHS = DAG.getConstant(C, dl, VT); 1819 } 1820 break; 1821 case ISD::SETULT: 1822 case ISD::SETUGE: 1823 if ((VT == MVT::i32 && C != 0 && 1824 isLegalArithImmed((uint32_t)(C - 1))) || 1825 (VT == MVT::i64 && C != 0ULL && isLegalArithImmed(C - 1ULL))) { 1826 CC = (CC == ISD::SETULT) ? ISD::SETULE : ISD::SETUGT; 1827 C = (VT == MVT::i32) ? (uint32_t)(C - 1) : C - 1; 1828 RHS = DAG.getConstant(C, dl, VT); 1829 } 1830 break; 1831 case ISD::SETLE: 1832 case ISD::SETGT: 1833 if ((VT == MVT::i32 && C != INT32_MAX && 1834 isLegalArithImmed((uint32_t)(C + 1))) || 1835 (VT == MVT::i64 && C != INT64_MAX && 1836 isLegalArithImmed(C + 1ULL))) { 1837 CC = (CC == ISD::SETLE) ? ISD::SETLT : ISD::SETGE; 1838 C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1; 1839 RHS = DAG.getConstant(C, dl, VT); 1840 } 1841 break; 1842 case ISD::SETULE: 1843 case ISD::SETUGT: 1844 if ((VT == MVT::i32 && C != UINT32_MAX && 1845 isLegalArithImmed((uint32_t)(C + 1))) || 1846 (VT == MVT::i64 && C != UINT64_MAX && 1847 isLegalArithImmed(C + 1ULL))) { 1848 CC = (CC == ISD::SETULE) ? ISD::SETULT : ISD::SETUGE; 1849 C = (VT == MVT::i32) ? (uint32_t)(C + 1) : C + 1; 1850 RHS = DAG.getConstant(C, dl, VT); 1851 } 1852 break; 1853 } 1854 } 1855 } 1856 SDValue Cmp; 1857 AArch64CC::CondCode AArch64CC; 1858 if ((CC == ISD::SETEQ || CC == ISD::SETNE) && isa<ConstantSDNode>(RHS)) { 1859 const ConstantSDNode *RHSC = cast<ConstantSDNode>(RHS); 1860 1861 // The imm operand of ADDS is an unsigned immediate, in the range 0 to 4095. 1862 // For the i8 operand, the largest immediate is 255, so this can be easily 1863 // encoded in the compare instruction. For the i16 operand, however, the 1864 // largest immediate cannot be encoded in the compare. 1865 // Therefore, use a sign extending load and cmn to avoid materializing the 1866 // -1 constant. For example, 1867 // movz w1, #65535 1868 // ldrh w0, [x0, #0] 1869 // cmp w0, w1 1870 // > 1871 // ldrsh w0, [x0, #0] 1872 // cmn w0, #1 1873 // Fundamental, we're relying on the property that (zext LHS) == (zext RHS) 1874 // if and only if (sext LHS) == (sext RHS). The checks are in place to 1875 // ensure both the LHS and RHS are truly zero extended and to make sure the 1876 // transformation is profitable. 1877 if ((RHSC->getZExtValue() >> 16 == 0) && isa<LoadSDNode>(LHS) && 1878 cast<LoadSDNode>(LHS)->getExtensionType() == ISD::ZEXTLOAD && 1879 cast<LoadSDNode>(LHS)->getMemoryVT() == MVT::i16 && 1880 LHS.getNode()->hasNUsesOfValue(1, 0)) { 1881 int16_t ValueofRHS = cast<ConstantSDNode>(RHS)->getZExtValue(); 1882 if (ValueofRHS < 0 && isLegalArithImmed(-ValueofRHS)) { 1883 SDValue SExt = 1884 DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, LHS.getValueType(), LHS, 1885 DAG.getValueType(MVT::i16)); 1886 Cmp = emitComparison(SExt, DAG.getConstant(ValueofRHS, dl, 1887 RHS.getValueType()), 1888 CC, dl, DAG); 1889 AArch64CC = changeIntCCToAArch64CC(CC); 1890 } 1891 } 1892 1893 if (!Cmp && (RHSC->isNullValue() || RHSC->isOne())) { 1894 if ((Cmp = emitConjunction(DAG, LHS, AArch64CC))) { 1895 if ((CC == ISD::SETNE) ^ RHSC->isNullValue()) 1896 AArch64CC = AArch64CC::getInvertedCondCode(AArch64CC); 1897 } 1898 } 1899 } 1900 1901 if (!Cmp) { 1902 Cmp = emitComparison(LHS, RHS, CC, dl, DAG); 1903 AArch64CC = changeIntCCToAArch64CC(CC); 1904 } 1905 AArch64cc = DAG.getConstant(AArch64CC, dl, MVT_CC); 1906 return Cmp; 1907 } 1908 1909 static std::pair<SDValue, SDValue> 1910 getAArch64XALUOOp(AArch64CC::CondCode &CC, SDValue Op, SelectionDAG &DAG) { 1911 assert((Op.getValueType() == MVT::i32 || Op.getValueType() == MVT::i64) && 1912 "Unsupported value type"); 1913 SDValue Value, Overflow; 1914 SDLoc DL(Op); 1915 SDValue LHS = Op.getOperand(0); 1916 SDValue RHS = Op.getOperand(1); 1917 unsigned Opc = 0; 1918 switch (Op.getOpcode()) { 1919 default: 1920 llvm_unreachable("Unknown overflow instruction!"); 1921 case ISD::SADDO: 1922 Opc = AArch64ISD::ADDS; 1923 CC = AArch64CC::VS; 1924 break; 1925 case ISD::UADDO: 1926 Opc = AArch64ISD::ADDS; 1927 CC = AArch64CC::HS; 1928 break; 1929 case ISD::SSUBO: 1930 Opc = AArch64ISD::SUBS; 1931 CC = AArch64CC::VS; 1932 break; 1933 case ISD::USUBO: 1934 Opc = AArch64ISD::SUBS; 1935 CC = AArch64CC::LO; 1936 break; 1937 // Multiply needs a little bit extra work. 1938 case ISD::SMULO: 1939 case ISD::UMULO: { 1940 CC = AArch64CC::NE; 1941 bool IsSigned = Op.getOpcode() == ISD::SMULO; 1942 if (Op.getValueType() == MVT::i32) { 1943 unsigned ExtendOpc = IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 1944 // For a 32 bit multiply with overflow check we want the instruction 1945 // selector to generate a widening multiply (SMADDL/UMADDL). For that we 1946 // need to generate the following pattern: 1947 // (i64 add 0, (i64 mul (i64 sext|zext i32 %a), (i64 sext|zext i32 %b)) 1948 LHS = DAG.getNode(ExtendOpc, DL, MVT::i64, LHS); 1949 RHS = DAG.getNode(ExtendOpc, DL, MVT::i64, RHS); 1950 SDValue Mul = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS); 1951 SDValue Add = DAG.getNode(ISD::ADD, DL, MVT::i64, Mul, 1952 DAG.getConstant(0, DL, MVT::i64)); 1953 // On AArch64 the upper 32 bits are always zero extended for a 32 bit 1954 // operation. We need to clear out the upper 32 bits, because we used a 1955 // widening multiply that wrote all 64 bits. In the end this should be a 1956 // noop. 1957 Value = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Add); 1958 if (IsSigned) { 1959 // The signed overflow check requires more than just a simple check for 1960 // any bit set in the upper 32 bits of the result. These bits could be 1961 // just the sign bits of a negative number. To perform the overflow 1962 // check we have to arithmetic shift right the 32nd bit of the result by 1963 // 31 bits. Then we compare the result to the upper 32 bits. 1964 SDValue UpperBits = DAG.getNode(ISD::SRL, DL, MVT::i64, Add, 1965 DAG.getConstant(32, DL, MVT::i64)); 1966 UpperBits = DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, UpperBits); 1967 SDValue LowerBits = DAG.getNode(ISD::SRA, DL, MVT::i32, Value, 1968 DAG.getConstant(31, DL, MVT::i64)); 1969 // It is important that LowerBits is last, otherwise the arithmetic 1970 // shift will not be folded into the compare (SUBS). 1971 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32); 1972 Overflow = DAG.getNode(AArch64ISD::SUBS, DL, VTs, UpperBits, LowerBits) 1973 .getValue(1); 1974 } else { 1975 // The overflow check for unsigned multiply is easy. We only need to 1976 // check if any of the upper 32 bits are set. This can be done with a 1977 // CMP (shifted register). For that we need to generate the following 1978 // pattern: 1979 // (i64 AArch64ISD::SUBS i64 0, (i64 srl i64 %Mul, i64 32) 1980 SDValue UpperBits = DAG.getNode(ISD::SRL, DL, MVT::i64, Mul, 1981 DAG.getConstant(32, DL, MVT::i64)); 1982 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32); 1983 Overflow = 1984 DAG.getNode(AArch64ISD::SUBS, DL, VTs, 1985 DAG.getConstant(0, DL, MVT::i64), 1986 UpperBits).getValue(1); 1987 } 1988 break; 1989 } 1990 assert(Op.getValueType() == MVT::i64 && "Expected an i64 value type"); 1991 // For the 64 bit multiply 1992 Value = DAG.getNode(ISD::MUL, DL, MVT::i64, LHS, RHS); 1993 if (IsSigned) { 1994 SDValue UpperBits = DAG.getNode(ISD::MULHS, DL, MVT::i64, LHS, RHS); 1995 SDValue LowerBits = DAG.getNode(ISD::SRA, DL, MVT::i64, Value, 1996 DAG.getConstant(63, DL, MVT::i64)); 1997 // It is important that LowerBits is last, otherwise the arithmetic 1998 // shift will not be folded into the compare (SUBS). 1999 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32); 2000 Overflow = DAG.getNode(AArch64ISD::SUBS, DL, VTs, UpperBits, LowerBits) 2001 .getValue(1); 2002 } else { 2003 SDValue UpperBits = DAG.getNode(ISD::MULHU, DL, MVT::i64, LHS, RHS); 2004 SDVTList VTs = DAG.getVTList(MVT::i64, MVT::i32); 2005 Overflow = 2006 DAG.getNode(AArch64ISD::SUBS, DL, VTs, 2007 DAG.getConstant(0, DL, MVT::i64), 2008 UpperBits).getValue(1); 2009 } 2010 break; 2011 } 2012 } // switch (...) 2013 2014 if (Opc) { 2015 SDVTList VTs = DAG.getVTList(Op->getValueType(0), MVT::i32); 2016 2017 // Emit the AArch64 operation with overflow check. 2018 Value = DAG.getNode(Opc, DL, VTs, LHS, RHS); 2019 Overflow = Value.getValue(1); 2020 } 2021 return std::make_pair(Value, Overflow); 2022 } 2023 2024 SDValue AArch64TargetLowering::LowerF128Call(SDValue Op, SelectionDAG &DAG, 2025 RTLIB::Libcall Call) const { 2026 SmallVector<SDValue, 2> Ops(Op->op_begin(), Op->op_end()); 2027 return makeLibCall(DAG, Call, MVT::f128, Ops, false, SDLoc(Op)).first; 2028 } 2029 2030 // Returns true if the given Op is the overflow flag result of an overflow 2031 // intrinsic operation. 2032 static bool isOverflowIntrOpRes(SDValue Op) { 2033 unsigned Opc = Op.getOpcode(); 2034 return (Op.getResNo() == 1 && 2035 (Opc == ISD::SADDO || Opc == ISD::UADDO || Opc == ISD::SSUBO || 2036 Opc == ISD::USUBO || Opc == ISD::SMULO || Opc == ISD::UMULO)); 2037 } 2038 2039 static SDValue LowerXOR(SDValue Op, SelectionDAG &DAG) { 2040 SDValue Sel = Op.getOperand(0); 2041 SDValue Other = Op.getOperand(1); 2042 SDLoc dl(Sel); 2043 2044 // If the operand is an overflow checking operation, invert the condition 2045 // code and kill the Not operation. I.e., transform: 2046 // (xor (overflow_op_bool, 1)) 2047 // --> 2048 // (csel 1, 0, invert(cc), overflow_op_bool) 2049 // ... which later gets transformed to just a cset instruction with an 2050 // inverted condition code, rather than a cset + eor sequence. 2051 if (isOneConstant(Other) && isOverflowIntrOpRes(Sel)) { 2052 // Only lower legal XALUO ops. 2053 if (!DAG.getTargetLoweringInfo().isTypeLegal(Sel->getValueType(0))) 2054 return SDValue(); 2055 2056 SDValue TVal = DAG.getConstant(1, dl, MVT::i32); 2057 SDValue FVal = DAG.getConstant(0, dl, MVT::i32); 2058 AArch64CC::CondCode CC; 2059 SDValue Value, Overflow; 2060 std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Sel.getValue(0), DAG); 2061 SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), dl, MVT::i32); 2062 return DAG.getNode(AArch64ISD::CSEL, dl, Op.getValueType(), TVal, FVal, 2063 CCVal, Overflow); 2064 } 2065 // If neither operand is a SELECT_CC, give up. 2066 if (Sel.getOpcode() != ISD::SELECT_CC) 2067 std::swap(Sel, Other); 2068 if (Sel.getOpcode() != ISD::SELECT_CC) 2069 return Op; 2070 2071 // The folding we want to perform is: 2072 // (xor x, (select_cc a, b, cc, 0, -1) ) 2073 // --> 2074 // (csel x, (xor x, -1), cc ...) 2075 // 2076 // The latter will get matched to a CSINV instruction. 2077 2078 ISD::CondCode CC = cast<CondCodeSDNode>(Sel.getOperand(4))->get(); 2079 SDValue LHS = Sel.getOperand(0); 2080 SDValue RHS = Sel.getOperand(1); 2081 SDValue TVal = Sel.getOperand(2); 2082 SDValue FVal = Sel.getOperand(3); 2083 2084 // FIXME: This could be generalized to non-integer comparisons. 2085 if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64) 2086 return Op; 2087 2088 ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal); 2089 ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal); 2090 2091 // The values aren't constants, this isn't the pattern we're looking for. 2092 if (!CFVal || !CTVal) 2093 return Op; 2094 2095 // We can commute the SELECT_CC by inverting the condition. This 2096 // might be needed to make this fit into a CSINV pattern. 2097 if (CTVal->isAllOnesValue() && CFVal->isNullValue()) { 2098 std::swap(TVal, FVal); 2099 std::swap(CTVal, CFVal); 2100 CC = ISD::getSetCCInverse(CC, true); 2101 } 2102 2103 // If the constants line up, perform the transform! 2104 if (CTVal->isNullValue() && CFVal->isAllOnesValue()) { 2105 SDValue CCVal; 2106 SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl); 2107 2108 FVal = Other; 2109 TVal = DAG.getNode(ISD::XOR, dl, Other.getValueType(), Other, 2110 DAG.getConstant(-1ULL, dl, Other.getValueType())); 2111 2112 return DAG.getNode(AArch64ISD::CSEL, dl, Sel.getValueType(), FVal, TVal, 2113 CCVal, Cmp); 2114 } 2115 2116 return Op; 2117 } 2118 2119 static SDValue LowerADDC_ADDE_SUBC_SUBE(SDValue Op, SelectionDAG &DAG) { 2120 EVT VT = Op.getValueType(); 2121 2122 // Let legalize expand this if it isn't a legal type yet. 2123 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 2124 return SDValue(); 2125 2126 SDVTList VTs = DAG.getVTList(VT, MVT::i32); 2127 2128 unsigned Opc; 2129 bool ExtraOp = false; 2130 switch (Op.getOpcode()) { 2131 default: 2132 llvm_unreachable("Invalid code"); 2133 case ISD::ADDC: 2134 Opc = AArch64ISD::ADDS; 2135 break; 2136 case ISD::SUBC: 2137 Opc = AArch64ISD::SUBS; 2138 break; 2139 case ISD::ADDE: 2140 Opc = AArch64ISD::ADCS; 2141 ExtraOp = true; 2142 break; 2143 case ISD::SUBE: 2144 Opc = AArch64ISD::SBCS; 2145 ExtraOp = true; 2146 break; 2147 } 2148 2149 if (!ExtraOp) 2150 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1)); 2151 return DAG.getNode(Opc, SDLoc(Op), VTs, Op.getOperand(0), Op.getOperand(1), 2152 Op.getOperand(2)); 2153 } 2154 2155 static SDValue LowerXALUO(SDValue Op, SelectionDAG &DAG) { 2156 // Let legalize expand this if it isn't a legal type yet. 2157 if (!DAG.getTargetLoweringInfo().isTypeLegal(Op.getValueType())) 2158 return SDValue(); 2159 2160 SDLoc dl(Op); 2161 AArch64CC::CondCode CC; 2162 // The actual operation that sets the overflow or carry flag. 2163 SDValue Value, Overflow; 2164 std::tie(Value, Overflow) = getAArch64XALUOOp(CC, Op, DAG); 2165 2166 // We use 0 and 1 as false and true values. 2167 SDValue TVal = DAG.getConstant(1, dl, MVT::i32); 2168 SDValue FVal = DAG.getConstant(0, dl, MVT::i32); 2169 2170 // We use an inverted condition, because the conditional select is inverted 2171 // too. This will allow it to be selected to a single instruction: 2172 // CSINC Wd, WZR, WZR, invert(cond). 2173 SDValue CCVal = DAG.getConstant(getInvertedCondCode(CC), dl, MVT::i32); 2174 Overflow = DAG.getNode(AArch64ISD::CSEL, dl, MVT::i32, FVal, TVal, 2175 CCVal, Overflow); 2176 2177 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::i32); 2178 return DAG.getNode(ISD::MERGE_VALUES, dl, VTs, Value, Overflow); 2179 } 2180 2181 // Prefetch operands are: 2182 // 1: Address to prefetch 2183 // 2: bool isWrite 2184 // 3: int locality (0 = no locality ... 3 = extreme locality) 2185 // 4: bool isDataCache 2186 static SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) { 2187 SDLoc DL(Op); 2188 unsigned IsWrite = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 2189 unsigned Locality = cast<ConstantSDNode>(Op.getOperand(3))->getZExtValue(); 2190 unsigned IsData = cast<ConstantSDNode>(Op.getOperand(4))->getZExtValue(); 2191 2192 bool IsStream = !Locality; 2193 // When the locality number is set 2194 if (Locality) { 2195 // The front-end should have filtered out the out-of-range values 2196 assert(Locality <= 3 && "Prefetch locality out-of-range"); 2197 // The locality degree is the opposite of the cache speed. 2198 // Put the number the other way around. 2199 // The encoding starts at 0 for level 1 2200 Locality = 3 - Locality; 2201 } 2202 2203 // built the mask value encoding the expected behavior. 2204 unsigned PrfOp = (IsWrite << 4) | // Load/Store bit 2205 (!IsData << 3) | // IsDataCache bit 2206 (Locality << 1) | // Cache level bits 2207 (unsigned)IsStream; // Stream bit 2208 return DAG.getNode(AArch64ISD::PREFETCH, DL, MVT::Other, Op.getOperand(0), 2209 DAG.getConstant(PrfOp, DL, MVT::i32), Op.getOperand(1)); 2210 } 2211 2212 SDValue AArch64TargetLowering::LowerFP_EXTEND(SDValue Op, 2213 SelectionDAG &DAG) const { 2214 assert(Op.getValueType() == MVT::f128 && "Unexpected lowering"); 2215 2216 RTLIB::Libcall LC; 2217 LC = RTLIB::getFPEXT(Op.getOperand(0).getValueType(), Op.getValueType()); 2218 2219 return LowerF128Call(Op, DAG, LC); 2220 } 2221 2222 SDValue AArch64TargetLowering::LowerFP_ROUND(SDValue Op, 2223 SelectionDAG &DAG) const { 2224 if (Op.getOperand(0).getValueType() != MVT::f128) { 2225 // It's legal except when f128 is involved 2226 return Op; 2227 } 2228 2229 RTLIB::Libcall LC; 2230 LC = RTLIB::getFPROUND(Op.getOperand(0).getValueType(), Op.getValueType()); 2231 2232 // FP_ROUND node has a second operand indicating whether it is known to be 2233 // precise. That doesn't take part in the LibCall so we can't directly use 2234 // LowerF128Call. 2235 SDValue SrcVal = Op.getOperand(0); 2236 return makeLibCall(DAG, LC, Op.getValueType(), SrcVal, /*isSigned*/ false, 2237 SDLoc(Op)).first; 2238 } 2239 2240 static SDValue LowerVectorFP_TO_INT(SDValue Op, SelectionDAG &DAG) { 2241 // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp. 2242 // Any additional optimization in this function should be recorded 2243 // in the cost tables. 2244 EVT InVT = Op.getOperand(0).getValueType(); 2245 EVT VT = Op.getValueType(); 2246 unsigned NumElts = InVT.getVectorNumElements(); 2247 2248 // f16 vectors are promoted to f32 before a conversion. 2249 if (InVT.getVectorElementType() == MVT::f16) { 2250 MVT NewVT = MVT::getVectorVT(MVT::f32, NumElts); 2251 SDLoc dl(Op); 2252 return DAG.getNode( 2253 Op.getOpcode(), dl, Op.getValueType(), 2254 DAG.getNode(ISD::FP_EXTEND, dl, NewVT, Op.getOperand(0))); 2255 } 2256 2257 if (VT.getSizeInBits() < InVT.getSizeInBits()) { 2258 SDLoc dl(Op); 2259 SDValue Cv = 2260 DAG.getNode(Op.getOpcode(), dl, InVT.changeVectorElementTypeToInteger(), 2261 Op.getOperand(0)); 2262 return DAG.getNode(ISD::TRUNCATE, dl, VT, Cv); 2263 } 2264 2265 if (VT.getSizeInBits() > InVT.getSizeInBits()) { 2266 SDLoc dl(Op); 2267 MVT ExtVT = 2268 MVT::getVectorVT(MVT::getFloatingPointVT(VT.getScalarSizeInBits()), 2269 VT.getVectorNumElements()); 2270 SDValue Ext = DAG.getNode(ISD::FP_EXTEND, dl, ExtVT, Op.getOperand(0)); 2271 return DAG.getNode(Op.getOpcode(), dl, VT, Ext); 2272 } 2273 2274 // Type changing conversions are illegal. 2275 return Op; 2276 } 2277 2278 SDValue AArch64TargetLowering::LowerFP_TO_INT(SDValue Op, 2279 SelectionDAG &DAG) const { 2280 if (Op.getOperand(0).getValueType().isVector()) 2281 return LowerVectorFP_TO_INT(Op, DAG); 2282 2283 // f16 conversions are promoted to f32 when full fp16 is not supported. 2284 if (Op.getOperand(0).getValueType() == MVT::f16 && 2285 !Subtarget->hasFullFP16()) { 2286 SDLoc dl(Op); 2287 return DAG.getNode( 2288 Op.getOpcode(), dl, Op.getValueType(), 2289 DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, Op.getOperand(0))); 2290 } 2291 2292 if (Op.getOperand(0).getValueType() != MVT::f128) { 2293 // It's legal except when f128 is involved 2294 return Op; 2295 } 2296 2297 RTLIB::Libcall LC; 2298 if (Op.getOpcode() == ISD::FP_TO_SINT) 2299 LC = RTLIB::getFPTOSINT(Op.getOperand(0).getValueType(), Op.getValueType()); 2300 else 2301 LC = RTLIB::getFPTOUINT(Op.getOperand(0).getValueType(), Op.getValueType()); 2302 2303 SmallVector<SDValue, 2> Ops(Op->op_begin(), Op->op_end()); 2304 return makeLibCall(DAG, LC, Op.getValueType(), Ops, false, SDLoc(Op)).first; 2305 } 2306 2307 static SDValue LowerVectorINT_TO_FP(SDValue Op, SelectionDAG &DAG) { 2308 // Warning: We maintain cost tables in AArch64TargetTransformInfo.cpp. 2309 // Any additional optimization in this function should be recorded 2310 // in the cost tables. 2311 EVT VT = Op.getValueType(); 2312 SDLoc dl(Op); 2313 SDValue In = Op.getOperand(0); 2314 EVT InVT = In.getValueType(); 2315 2316 if (VT.getSizeInBits() < InVT.getSizeInBits()) { 2317 MVT CastVT = 2318 MVT::getVectorVT(MVT::getFloatingPointVT(InVT.getScalarSizeInBits()), 2319 InVT.getVectorNumElements()); 2320 In = DAG.getNode(Op.getOpcode(), dl, CastVT, In); 2321 return DAG.getNode(ISD::FP_ROUND, dl, VT, In, DAG.getIntPtrConstant(0, dl)); 2322 } 2323 2324 if (VT.getSizeInBits() > InVT.getSizeInBits()) { 2325 unsigned CastOpc = 2326 Op.getOpcode() == ISD::SINT_TO_FP ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND; 2327 EVT CastVT = VT.changeVectorElementTypeToInteger(); 2328 In = DAG.getNode(CastOpc, dl, CastVT, In); 2329 return DAG.getNode(Op.getOpcode(), dl, VT, In); 2330 } 2331 2332 return Op; 2333 } 2334 2335 SDValue AArch64TargetLowering::LowerINT_TO_FP(SDValue Op, 2336 SelectionDAG &DAG) const { 2337 if (Op.getValueType().isVector()) 2338 return LowerVectorINT_TO_FP(Op, DAG); 2339 2340 // f16 conversions are promoted to f32 when full fp16 is not supported. 2341 if (Op.getValueType() == MVT::f16 && 2342 !Subtarget->hasFullFP16()) { 2343 SDLoc dl(Op); 2344 return DAG.getNode( 2345 ISD::FP_ROUND, dl, MVT::f16, 2346 DAG.getNode(Op.getOpcode(), dl, MVT::f32, Op.getOperand(0)), 2347 DAG.getIntPtrConstant(0, dl)); 2348 } 2349 2350 // i128 conversions are libcalls. 2351 if (Op.getOperand(0).getValueType() == MVT::i128) 2352 return SDValue(); 2353 2354 // Other conversions are legal, unless it's to the completely software-based 2355 // fp128. 2356 if (Op.getValueType() != MVT::f128) 2357 return Op; 2358 2359 RTLIB::Libcall LC; 2360 if (Op.getOpcode() == ISD::SINT_TO_FP) 2361 LC = RTLIB::getSINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType()); 2362 else 2363 LC = RTLIB::getUINTTOFP(Op.getOperand(0).getValueType(), Op.getValueType()); 2364 2365 return LowerF128Call(Op, DAG, LC); 2366 } 2367 2368 SDValue AArch64TargetLowering::LowerFSINCOS(SDValue Op, 2369 SelectionDAG &DAG) const { 2370 // For iOS, we want to call an alternative entry point: __sincos_stret, 2371 // which returns the values in two S / D registers. 2372 SDLoc dl(Op); 2373 SDValue Arg = Op.getOperand(0); 2374 EVT ArgVT = Arg.getValueType(); 2375 Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext()); 2376 2377 ArgListTy Args; 2378 ArgListEntry Entry; 2379 2380 Entry.Node = Arg; 2381 Entry.Ty = ArgTy; 2382 Entry.IsSExt = false; 2383 Entry.IsZExt = false; 2384 Args.push_back(Entry); 2385 2386 RTLIB::Libcall LC = ArgVT == MVT::f64 ? RTLIB::SINCOS_STRET_F64 2387 : RTLIB::SINCOS_STRET_F32; 2388 const char *LibcallName = getLibcallName(LC); 2389 SDValue Callee = 2390 DAG.getExternalSymbol(LibcallName, getPointerTy(DAG.getDataLayout())); 2391 2392 StructType *RetTy = StructType::get(ArgTy, ArgTy); 2393 TargetLowering::CallLoweringInfo CLI(DAG); 2394 CLI.setDebugLoc(dl) 2395 .setChain(DAG.getEntryNode()) 2396 .setLibCallee(CallingConv::Fast, RetTy, Callee, std::move(Args)); 2397 2398 std::pair<SDValue, SDValue> CallResult = LowerCallTo(CLI); 2399 return CallResult.first; 2400 } 2401 2402 static SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) { 2403 if (Op.getValueType() != MVT::f16) 2404 return SDValue(); 2405 2406 assert(Op.getOperand(0).getValueType() == MVT::i16); 2407 SDLoc DL(Op); 2408 2409 Op = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Op.getOperand(0)); 2410 Op = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Op); 2411 return SDValue( 2412 DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, DL, MVT::f16, Op, 2413 DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)), 2414 0); 2415 } 2416 2417 static EVT getExtensionTo64Bits(const EVT &OrigVT) { 2418 if (OrigVT.getSizeInBits() >= 64) 2419 return OrigVT; 2420 2421 assert(OrigVT.isSimple() && "Expecting a simple value type"); 2422 2423 MVT::SimpleValueType OrigSimpleTy = OrigVT.getSimpleVT().SimpleTy; 2424 switch (OrigSimpleTy) { 2425 default: llvm_unreachable("Unexpected Vector Type"); 2426 case MVT::v2i8: 2427 case MVT::v2i16: 2428 return MVT::v2i32; 2429 case MVT::v4i8: 2430 return MVT::v4i16; 2431 } 2432 } 2433 2434 static SDValue addRequiredExtensionForVectorMULL(SDValue N, SelectionDAG &DAG, 2435 const EVT &OrigTy, 2436 const EVT &ExtTy, 2437 unsigned ExtOpcode) { 2438 // The vector originally had a size of OrigTy. It was then extended to ExtTy. 2439 // We expect the ExtTy to be 128-bits total. If the OrigTy is less than 2440 // 64-bits we need to insert a new extension so that it will be 64-bits. 2441 assert(ExtTy.is128BitVector() && "Unexpected extension size"); 2442 if (OrigTy.getSizeInBits() >= 64) 2443 return N; 2444 2445 // Must extend size to at least 64 bits to be used as an operand for VMULL. 2446 EVT NewVT = getExtensionTo64Bits(OrigTy); 2447 2448 return DAG.getNode(ExtOpcode, SDLoc(N), NewVT, N); 2449 } 2450 2451 static bool isExtendedBUILD_VECTOR(SDNode *N, SelectionDAG &DAG, 2452 bool isSigned) { 2453 EVT VT = N->getValueType(0); 2454 2455 if (N->getOpcode() != ISD::BUILD_VECTOR) 2456 return false; 2457 2458 for (const SDValue &Elt : N->op_values()) { 2459 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(Elt)) { 2460 unsigned EltSize = VT.getScalarSizeInBits(); 2461 unsigned HalfSize = EltSize / 2; 2462 if (isSigned) { 2463 if (!isIntN(HalfSize, C->getSExtValue())) 2464 return false; 2465 } else { 2466 if (!isUIntN(HalfSize, C->getZExtValue())) 2467 return false; 2468 } 2469 continue; 2470 } 2471 return false; 2472 } 2473 2474 return true; 2475 } 2476 2477 static SDValue skipExtensionForVectorMULL(SDNode *N, SelectionDAG &DAG) { 2478 if (N->getOpcode() == ISD::SIGN_EXTEND || N->getOpcode() == ISD::ZERO_EXTEND) 2479 return addRequiredExtensionForVectorMULL(N->getOperand(0), DAG, 2480 N->getOperand(0)->getValueType(0), 2481 N->getValueType(0), 2482 N->getOpcode()); 2483 2484 assert(N->getOpcode() == ISD::BUILD_VECTOR && "expected BUILD_VECTOR"); 2485 EVT VT = N->getValueType(0); 2486 SDLoc dl(N); 2487 unsigned EltSize = VT.getScalarSizeInBits() / 2; 2488 unsigned NumElts = VT.getVectorNumElements(); 2489 MVT TruncVT = MVT::getIntegerVT(EltSize); 2490 SmallVector<SDValue, 8> Ops; 2491 for (unsigned i = 0; i != NumElts; ++i) { 2492 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(i)); 2493 const APInt &CInt = C->getAPIntValue(); 2494 // Element types smaller than 32 bits are not legal, so use i32 elements. 2495 // The values are implicitly truncated so sext vs. zext doesn't matter. 2496 Ops.push_back(DAG.getConstant(CInt.zextOrTrunc(32), dl, MVT::i32)); 2497 } 2498 return DAG.getBuildVector(MVT::getVectorVT(TruncVT, NumElts), dl, Ops); 2499 } 2500 2501 static bool isSignExtended(SDNode *N, SelectionDAG &DAG) { 2502 return N->getOpcode() == ISD::SIGN_EXTEND || 2503 isExtendedBUILD_VECTOR(N, DAG, true); 2504 } 2505 2506 static bool isZeroExtended(SDNode *N, SelectionDAG &DAG) { 2507 return N->getOpcode() == ISD::ZERO_EXTEND || 2508 isExtendedBUILD_VECTOR(N, DAG, false); 2509 } 2510 2511 static bool isAddSubSExt(SDNode *N, SelectionDAG &DAG) { 2512 unsigned Opcode = N->getOpcode(); 2513 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 2514 SDNode *N0 = N->getOperand(0).getNode(); 2515 SDNode *N1 = N->getOperand(1).getNode(); 2516 return N0->hasOneUse() && N1->hasOneUse() && 2517 isSignExtended(N0, DAG) && isSignExtended(N1, DAG); 2518 } 2519 return false; 2520 } 2521 2522 static bool isAddSubZExt(SDNode *N, SelectionDAG &DAG) { 2523 unsigned Opcode = N->getOpcode(); 2524 if (Opcode == ISD::ADD || Opcode == ISD::SUB) { 2525 SDNode *N0 = N->getOperand(0).getNode(); 2526 SDNode *N1 = N->getOperand(1).getNode(); 2527 return N0->hasOneUse() && N1->hasOneUse() && 2528 isZeroExtended(N0, DAG) && isZeroExtended(N1, DAG); 2529 } 2530 return false; 2531 } 2532 2533 SDValue AArch64TargetLowering::LowerFLT_ROUNDS_(SDValue Op, 2534 SelectionDAG &DAG) const { 2535 // The rounding mode is in bits 23:22 of the FPSCR. 2536 // The ARM rounding mode value to FLT_ROUNDS mapping is 0->1, 1->2, 2->3, 3->0 2537 // The formula we use to implement this is (((FPSCR + 1 << 22) >> 22) & 3) 2538 // so that the shift + and get folded into a bitfield extract. 2539 SDLoc dl(Op); 2540 2541 SDValue FPCR_64 = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, dl, MVT::i64, 2542 DAG.getConstant(Intrinsic::aarch64_get_fpcr, dl, 2543 MVT::i64)); 2544 SDValue FPCR_32 = DAG.getNode(ISD::TRUNCATE, dl, MVT::i32, FPCR_64); 2545 SDValue FltRounds = DAG.getNode(ISD::ADD, dl, MVT::i32, FPCR_32, 2546 DAG.getConstant(1U << 22, dl, MVT::i32)); 2547 SDValue RMODE = DAG.getNode(ISD::SRL, dl, MVT::i32, FltRounds, 2548 DAG.getConstant(22, dl, MVT::i32)); 2549 return DAG.getNode(ISD::AND, dl, MVT::i32, RMODE, 2550 DAG.getConstant(3, dl, MVT::i32)); 2551 } 2552 2553 static SDValue LowerMUL(SDValue Op, SelectionDAG &DAG) { 2554 // Multiplications are only custom-lowered for 128-bit vectors so that 2555 // VMULL can be detected. Otherwise v2i64 multiplications are not legal. 2556 EVT VT = Op.getValueType(); 2557 assert(VT.is128BitVector() && VT.isInteger() && 2558 "unexpected type for custom-lowering ISD::MUL"); 2559 SDNode *N0 = Op.getOperand(0).getNode(); 2560 SDNode *N1 = Op.getOperand(1).getNode(); 2561 unsigned NewOpc = 0; 2562 bool isMLA = false; 2563 bool isN0SExt = isSignExtended(N0, DAG); 2564 bool isN1SExt = isSignExtended(N1, DAG); 2565 if (isN0SExt && isN1SExt) 2566 NewOpc = AArch64ISD::SMULL; 2567 else { 2568 bool isN0ZExt = isZeroExtended(N0, DAG); 2569 bool isN1ZExt = isZeroExtended(N1, DAG); 2570 if (isN0ZExt && isN1ZExt) 2571 NewOpc = AArch64ISD::UMULL; 2572 else if (isN1SExt || isN1ZExt) { 2573 // Look for (s/zext A + s/zext B) * (s/zext C). We want to turn these 2574 // into (s/zext A * s/zext C) + (s/zext B * s/zext C) 2575 if (isN1SExt && isAddSubSExt(N0, DAG)) { 2576 NewOpc = AArch64ISD::SMULL; 2577 isMLA = true; 2578 } else if (isN1ZExt && isAddSubZExt(N0, DAG)) { 2579 NewOpc = AArch64ISD::UMULL; 2580 isMLA = true; 2581 } else if (isN0ZExt && isAddSubZExt(N1, DAG)) { 2582 std::swap(N0, N1); 2583 NewOpc = AArch64ISD::UMULL; 2584 isMLA = true; 2585 } 2586 } 2587 2588 if (!NewOpc) { 2589 if (VT == MVT::v2i64) 2590 // Fall through to expand this. It is not legal. 2591 return SDValue(); 2592 else 2593 // Other vector multiplications are legal. 2594 return Op; 2595 } 2596 } 2597 2598 // Legalize to a S/UMULL instruction 2599 SDLoc DL(Op); 2600 SDValue Op0; 2601 SDValue Op1 = skipExtensionForVectorMULL(N1, DAG); 2602 if (!isMLA) { 2603 Op0 = skipExtensionForVectorMULL(N0, DAG); 2604 assert(Op0.getValueType().is64BitVector() && 2605 Op1.getValueType().is64BitVector() && 2606 "unexpected types for extended operands to VMULL"); 2607 return DAG.getNode(NewOpc, DL, VT, Op0, Op1); 2608 } 2609 // Optimizing (zext A + zext B) * C, to (S/UMULL A, C) + (S/UMULL B, C) during 2610 // isel lowering to take advantage of no-stall back to back s/umul + s/umla. 2611 // This is true for CPUs with accumulate forwarding such as Cortex-A53/A57 2612 SDValue N00 = skipExtensionForVectorMULL(N0->getOperand(0).getNode(), DAG); 2613 SDValue N01 = skipExtensionForVectorMULL(N0->getOperand(1).getNode(), DAG); 2614 EVT Op1VT = Op1.getValueType(); 2615 return DAG.getNode(N0->getOpcode(), DL, VT, 2616 DAG.getNode(NewOpc, DL, VT, 2617 DAG.getNode(ISD::BITCAST, DL, Op1VT, N00), Op1), 2618 DAG.getNode(NewOpc, DL, VT, 2619 DAG.getNode(ISD::BITCAST, DL, Op1VT, N01), Op1)); 2620 } 2621 2622 // Lower vector multiply high (ISD::MULHS and ISD::MULHU). 2623 static SDValue LowerMULH(SDValue Op, SelectionDAG &DAG) { 2624 // Multiplications are only custom-lowered for 128-bit vectors so that 2625 // {S,U}MULL{2} can be detected. Otherwise v2i64 multiplications are not 2626 // legal. 2627 EVT VT = Op.getValueType(); 2628 assert(VT.is128BitVector() && VT.isInteger() && 2629 "unexpected type for custom-lowering ISD::MULH{U,S}"); 2630 2631 SDValue V0 = Op.getOperand(0); 2632 SDValue V1 = Op.getOperand(1); 2633 2634 SDLoc DL(Op); 2635 2636 EVT ExtractVT = VT.getHalfNumVectorElementsVT(*DAG.getContext()); 2637 2638 // We turn (V0 mulhs/mulhu V1) to: 2639 // 2640 // (uzp2 (smull (extract_subvector (ExtractVT V128:V0, (i64 0)), 2641 // (extract_subvector (ExtractVT V128:V1, (i64 0))))), 2642 // (smull (extract_subvector (ExtractVT V128:V0, (i64 VMull2Idx)), 2643 // (extract_subvector (ExtractVT V128:V2, (i64 VMull2Idx)))))) 2644 // 2645 // Where ExtractVT is a subvector with half number of elements, and 2646 // VMullIdx2 is the index of the middle element (the high part). 2647 // 2648 // The vector hight part extract and multiply will be matched against 2649 // {S,U}MULL{v16i8_v8i16,v8i16_v4i32,v4i32_v2i64} which in turn will 2650 // issue a {s}mull2 instruction. 2651 // 2652 // This basically multiply the lower subvector with '{s,u}mull', the high 2653 // subvector with '{s,u}mull2', and shuffle both results high part in 2654 // resulting vector. 2655 unsigned Mull2VectorIdx = VT.getVectorNumElements () / 2; 2656 SDValue VMullIdx = DAG.getConstant(0, DL, MVT::i64); 2657 SDValue VMull2Idx = DAG.getConstant(Mull2VectorIdx, DL, MVT::i64); 2658 2659 SDValue VMullV0 = 2660 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT, V0, VMullIdx); 2661 SDValue VMullV1 = 2662 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT, V1, VMullIdx); 2663 2664 SDValue VMull2V0 = 2665 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT, V0, VMull2Idx); 2666 SDValue VMull2V1 = 2667 DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtractVT, V1, VMull2Idx); 2668 2669 unsigned MullOpc = Op.getOpcode() == ISD::MULHS ? AArch64ISD::SMULL 2670 : AArch64ISD::UMULL; 2671 2672 EVT MullVT = ExtractVT.widenIntegerVectorElementType(*DAG.getContext()); 2673 SDValue Mull = DAG.getNode(MullOpc, DL, MullVT, VMullV0, VMullV1); 2674 SDValue Mull2 = DAG.getNode(MullOpc, DL, MullVT, VMull2V0, VMull2V1); 2675 2676 Mull = DAG.getNode(ISD::BITCAST, DL, VT, Mull); 2677 Mull2 = DAG.getNode(ISD::BITCAST, DL, VT, Mull2); 2678 2679 return DAG.getNode(AArch64ISD::UZP2, DL, VT, Mull, Mull2); 2680 } 2681 2682 SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op, 2683 SelectionDAG &DAG) const { 2684 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 2685 SDLoc dl(Op); 2686 switch (IntNo) { 2687 default: return SDValue(); // Don't custom lower most intrinsics. 2688 case Intrinsic::thread_pointer: { 2689 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2690 return DAG.getNode(AArch64ISD::THREAD_POINTER, dl, PtrVT); 2691 } 2692 case Intrinsic::aarch64_neon_abs: 2693 return DAG.getNode(ISD::ABS, dl, Op.getValueType(), 2694 Op.getOperand(1)); 2695 case Intrinsic::aarch64_neon_smax: 2696 return DAG.getNode(ISD::SMAX, dl, Op.getValueType(), 2697 Op.getOperand(1), Op.getOperand(2)); 2698 case Intrinsic::aarch64_neon_umax: 2699 return DAG.getNode(ISD::UMAX, dl, Op.getValueType(), 2700 Op.getOperand(1), Op.getOperand(2)); 2701 case Intrinsic::aarch64_neon_smin: 2702 return DAG.getNode(ISD::SMIN, dl, Op.getValueType(), 2703 Op.getOperand(1), Op.getOperand(2)); 2704 case Intrinsic::aarch64_neon_umin: 2705 return DAG.getNode(ISD::UMIN, dl, Op.getValueType(), 2706 Op.getOperand(1), Op.getOperand(2)); 2707 } 2708 } 2709 2710 // Custom lower trunc store for v4i8 vectors, since it is promoted to v4i16. 2711 static SDValue LowerTruncateVectorStore(SDLoc DL, StoreSDNode *ST, 2712 EVT VT, EVT MemVT, 2713 SelectionDAG &DAG) { 2714 assert(VT.isVector() && "VT should be a vector type"); 2715 assert(MemVT == MVT::v4i8 && VT == MVT::v4i16); 2716 2717 SDValue Value = ST->getValue(); 2718 2719 // It first extend the promoted v4i16 to v8i16, truncate to v8i8, and extract 2720 // the word lane which represent the v4i8 subvector. It optimizes the store 2721 // to: 2722 // 2723 // xtn v0.8b, v0.8h 2724 // str s0, [x0] 2725 2726 SDValue Undef = DAG.getUNDEF(MVT::i16); 2727 SDValue UndefVec = DAG.getBuildVector(MVT::v4i16, DL, 2728 {Undef, Undef, Undef, Undef}); 2729 2730 SDValue TruncExt = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v8i16, 2731 Value, UndefVec); 2732 SDValue Trunc = DAG.getNode(ISD::TRUNCATE, DL, MVT::v8i8, TruncExt); 2733 2734 Trunc = DAG.getNode(ISD::BITCAST, DL, MVT::v2i32, Trunc); 2735 SDValue ExtractTrunc = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, MVT::i32, 2736 Trunc, DAG.getConstant(0, DL, MVT::i64)); 2737 2738 return DAG.getStore(ST->getChain(), DL, ExtractTrunc, 2739 ST->getBasePtr(), ST->getMemOperand()); 2740 } 2741 2742 // Custom lowering for any store, vector or scalar and/or default or with 2743 // a truncate operations. Currently only custom lower truncate operation 2744 // from vector v4i16 to v4i8. 2745 SDValue AArch64TargetLowering::LowerSTORE(SDValue Op, 2746 SelectionDAG &DAG) const { 2747 SDLoc Dl(Op); 2748 StoreSDNode *StoreNode = cast<StoreSDNode>(Op); 2749 assert (StoreNode && "Can only custom lower store nodes"); 2750 2751 SDValue Value = StoreNode->getValue(); 2752 2753 EVT VT = Value.getValueType(); 2754 EVT MemVT = StoreNode->getMemoryVT(); 2755 2756 assert (VT.isVector() && "Can only custom lower vector store types"); 2757 2758 unsigned AS = StoreNode->getAddressSpace(); 2759 unsigned Align = StoreNode->getAlignment(); 2760 if (Align < MemVT.getStoreSize() && 2761 !allowsMisalignedMemoryAccesses(MemVT, AS, Align, nullptr)) { 2762 return scalarizeVectorStore(StoreNode, DAG); 2763 } 2764 2765 if (StoreNode->isTruncatingStore()) { 2766 return LowerTruncateVectorStore(Dl, StoreNode, VT, MemVT, DAG); 2767 } 2768 2769 return SDValue(); 2770 } 2771 2772 SDValue AArch64TargetLowering::LowerOperation(SDValue Op, 2773 SelectionDAG &DAG) const { 2774 LLVM_DEBUG(dbgs() << "Custom lowering: "); 2775 LLVM_DEBUG(Op.dump()); 2776 2777 switch (Op.getOpcode()) { 2778 default: 2779 llvm_unreachable("unimplemented operand"); 2780 return SDValue(); 2781 case ISD::BITCAST: 2782 return LowerBITCAST(Op, DAG); 2783 case ISD::GlobalAddress: 2784 return LowerGlobalAddress(Op, DAG); 2785 case ISD::GlobalTLSAddress: 2786 return LowerGlobalTLSAddress(Op, DAG); 2787 case ISD::SETCC: 2788 return LowerSETCC(Op, DAG); 2789 case ISD::BR_CC: 2790 return LowerBR_CC(Op, DAG); 2791 case ISD::SELECT: 2792 return LowerSELECT(Op, DAG); 2793 case ISD::SELECT_CC: 2794 return LowerSELECT_CC(Op, DAG); 2795 case ISD::JumpTable: 2796 return LowerJumpTable(Op, DAG); 2797 case ISD::ConstantPool: 2798 return LowerConstantPool(Op, DAG); 2799 case ISD::BlockAddress: 2800 return LowerBlockAddress(Op, DAG); 2801 case ISD::VASTART: 2802 return LowerVASTART(Op, DAG); 2803 case ISD::VACOPY: 2804 return LowerVACOPY(Op, DAG); 2805 case ISD::VAARG: 2806 return LowerVAARG(Op, DAG); 2807 case ISD::ADDC: 2808 case ISD::ADDE: 2809 case ISD::SUBC: 2810 case ISD::SUBE: 2811 return LowerADDC_ADDE_SUBC_SUBE(Op, DAG); 2812 case ISD::SADDO: 2813 case ISD::UADDO: 2814 case ISD::SSUBO: 2815 case ISD::USUBO: 2816 case ISD::SMULO: 2817 case ISD::UMULO: 2818 return LowerXALUO(Op, DAG); 2819 case ISD::FADD: 2820 return LowerF128Call(Op, DAG, RTLIB::ADD_F128); 2821 case ISD::FSUB: 2822 return LowerF128Call(Op, DAG, RTLIB::SUB_F128); 2823 case ISD::FMUL: 2824 return LowerF128Call(Op, DAG, RTLIB::MUL_F128); 2825 case ISD::FDIV: 2826 return LowerF128Call(Op, DAG, RTLIB::DIV_F128); 2827 case ISD::FP_ROUND: 2828 return LowerFP_ROUND(Op, DAG); 2829 case ISD::FP_EXTEND: 2830 return LowerFP_EXTEND(Op, DAG); 2831 case ISD::FRAMEADDR: 2832 return LowerFRAMEADDR(Op, DAG); 2833 case ISD::RETURNADDR: 2834 return LowerRETURNADDR(Op, DAG); 2835 case ISD::INSERT_VECTOR_ELT: 2836 return LowerINSERT_VECTOR_ELT(Op, DAG); 2837 case ISD::EXTRACT_VECTOR_ELT: 2838 return LowerEXTRACT_VECTOR_ELT(Op, DAG); 2839 case ISD::BUILD_VECTOR: 2840 return LowerBUILD_VECTOR(Op, DAG); 2841 case ISD::VECTOR_SHUFFLE: 2842 return LowerVECTOR_SHUFFLE(Op, DAG); 2843 case ISD::EXTRACT_SUBVECTOR: 2844 return LowerEXTRACT_SUBVECTOR(Op, DAG); 2845 case ISD::SRA: 2846 case ISD::SRL: 2847 case ISD::SHL: 2848 return LowerVectorSRA_SRL_SHL(Op, DAG); 2849 case ISD::SHL_PARTS: 2850 return LowerShiftLeftParts(Op, DAG); 2851 case ISD::SRL_PARTS: 2852 case ISD::SRA_PARTS: 2853 return LowerShiftRightParts(Op, DAG); 2854 case ISD::CTPOP: 2855 return LowerCTPOP(Op, DAG); 2856 case ISD::FCOPYSIGN: 2857 return LowerFCOPYSIGN(Op, DAG); 2858 case ISD::AND: 2859 return LowerVectorAND(Op, DAG); 2860 case ISD::OR: 2861 return LowerVectorOR(Op, DAG); 2862 case ISD::XOR: 2863 return LowerXOR(Op, DAG); 2864 case ISD::PREFETCH: 2865 return LowerPREFETCH(Op, DAG); 2866 case ISD::SINT_TO_FP: 2867 case ISD::UINT_TO_FP: 2868 return LowerINT_TO_FP(Op, DAG); 2869 case ISD::FP_TO_SINT: 2870 case ISD::FP_TO_UINT: 2871 return LowerFP_TO_INT(Op, DAG); 2872 case ISD::FSINCOS: 2873 return LowerFSINCOS(Op, DAG); 2874 case ISD::FLT_ROUNDS_: 2875 return LowerFLT_ROUNDS_(Op, DAG); 2876 case ISD::MUL: 2877 return LowerMUL(Op, DAG); 2878 case ISD::MULHS: 2879 case ISD::MULHU: 2880 return LowerMULH(Op, DAG); 2881 case ISD::INTRINSIC_WO_CHAIN: 2882 return LowerINTRINSIC_WO_CHAIN(Op, DAG); 2883 case ISD::STORE: 2884 return LowerSTORE(Op, DAG); 2885 case ISD::VECREDUCE_ADD: 2886 case ISD::VECREDUCE_SMAX: 2887 case ISD::VECREDUCE_SMIN: 2888 case ISD::VECREDUCE_UMAX: 2889 case ISD::VECREDUCE_UMIN: 2890 case ISD::VECREDUCE_FMAX: 2891 case ISD::VECREDUCE_FMIN: 2892 return LowerVECREDUCE(Op, DAG); 2893 case ISD::ATOMIC_LOAD_SUB: 2894 return LowerATOMIC_LOAD_SUB(Op, DAG); 2895 case ISD::ATOMIC_LOAD_AND: 2896 return LowerATOMIC_LOAD_AND(Op, DAG); 2897 case ISD::DYNAMIC_STACKALLOC: 2898 return LowerDYNAMIC_STACKALLOC(Op, DAG); 2899 } 2900 } 2901 2902 //===----------------------------------------------------------------------===// 2903 // Calling Convention Implementation 2904 //===----------------------------------------------------------------------===// 2905 2906 #include "AArch64GenCallingConv.inc" 2907 2908 /// Selects the correct CCAssignFn for a given CallingConvention value. 2909 CCAssignFn *AArch64TargetLowering::CCAssignFnForCall(CallingConv::ID CC, 2910 bool IsVarArg) const { 2911 switch (CC) { 2912 default: 2913 report_fatal_error("Unsupported calling convention."); 2914 case CallingConv::WebKit_JS: 2915 return CC_AArch64_WebKit_JS; 2916 case CallingConv::GHC: 2917 return CC_AArch64_GHC; 2918 case CallingConv::C: 2919 case CallingConv::Fast: 2920 case CallingConv::PreserveMost: 2921 case CallingConv::CXX_FAST_TLS: 2922 case CallingConv::Swift: 2923 if (Subtarget->isTargetWindows() && IsVarArg) 2924 return CC_AArch64_Win64_VarArg; 2925 if (!Subtarget->isTargetDarwin()) 2926 return CC_AArch64_AAPCS; 2927 return IsVarArg ? CC_AArch64_DarwinPCS_VarArg : CC_AArch64_DarwinPCS; 2928 case CallingConv::Win64: 2929 return IsVarArg ? CC_AArch64_Win64_VarArg : CC_AArch64_AAPCS; 2930 } 2931 } 2932 2933 CCAssignFn * 2934 AArch64TargetLowering::CCAssignFnForReturn(CallingConv::ID CC) const { 2935 return CC == CallingConv::WebKit_JS ? RetCC_AArch64_WebKit_JS 2936 : RetCC_AArch64_AAPCS; 2937 } 2938 2939 SDValue AArch64TargetLowering::LowerFormalArguments( 2940 SDValue Chain, CallingConv::ID CallConv, bool isVarArg, 2941 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 2942 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const { 2943 MachineFunction &MF = DAG.getMachineFunction(); 2944 MachineFrameInfo &MFI = MF.getFrameInfo(); 2945 bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv()); 2946 2947 // Assign locations to all of the incoming arguments. 2948 SmallVector<CCValAssign, 16> ArgLocs; 2949 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), ArgLocs, 2950 *DAG.getContext()); 2951 2952 // At this point, Ins[].VT may already be promoted to i32. To correctly 2953 // handle passing i8 as i8 instead of i32 on stack, we pass in both i32 and 2954 // i8 to CC_AArch64_AAPCS with i32 being ValVT and i8 being LocVT. 2955 // Since AnalyzeFormalArguments uses Ins[].VT for both ValVT and LocVT, here 2956 // we use a special version of AnalyzeFormalArguments to pass in ValVT and 2957 // LocVT. 2958 unsigned NumArgs = Ins.size(); 2959 Function::const_arg_iterator CurOrigArg = MF.getFunction().arg_begin(); 2960 unsigned CurArgIdx = 0; 2961 for (unsigned i = 0; i != NumArgs; ++i) { 2962 MVT ValVT = Ins[i].VT; 2963 if (Ins[i].isOrigArg()) { 2964 std::advance(CurOrigArg, Ins[i].getOrigArgIndex() - CurArgIdx); 2965 CurArgIdx = Ins[i].getOrigArgIndex(); 2966 2967 // Get type of the original argument. 2968 EVT ActualVT = getValueType(DAG.getDataLayout(), CurOrigArg->getType(), 2969 /*AllowUnknown*/ true); 2970 MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : MVT::Other; 2971 // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16. 2972 if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8) 2973 ValVT = MVT::i8; 2974 else if (ActualMVT == MVT::i16) 2975 ValVT = MVT::i16; 2976 } 2977 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, /*IsVarArg=*/false); 2978 bool Res = 2979 AssignFn(i, ValVT, ValVT, CCValAssign::Full, Ins[i].Flags, CCInfo); 2980 assert(!Res && "Call operand has unhandled type"); 2981 (void)Res; 2982 } 2983 assert(ArgLocs.size() == Ins.size()); 2984 SmallVector<SDValue, 16> ArgValues; 2985 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { 2986 CCValAssign &VA = ArgLocs[i]; 2987 2988 if (Ins[i].Flags.isByVal()) { 2989 // Byval is used for HFAs in the PCS, but the system should work in a 2990 // non-compliant manner for larger structs. 2991 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 2992 int Size = Ins[i].Flags.getByValSize(); 2993 unsigned NumRegs = (Size + 7) / 8; 2994 2995 // FIXME: This works on big-endian for composite byvals, which are the common 2996 // case. It should also work for fundamental types too. 2997 unsigned FrameIdx = 2998 MFI.CreateFixedObject(8 * NumRegs, VA.getLocMemOffset(), false); 2999 SDValue FrameIdxN = DAG.getFrameIndex(FrameIdx, PtrVT); 3000 InVals.push_back(FrameIdxN); 3001 3002 continue; 3003 } 3004 3005 if (VA.isRegLoc()) { 3006 // Arguments stored in registers. 3007 EVT RegVT = VA.getLocVT(); 3008 3009 SDValue ArgValue; 3010 const TargetRegisterClass *RC; 3011 3012 if (RegVT == MVT::i32) 3013 RC = &AArch64::GPR32RegClass; 3014 else if (RegVT == MVT::i64) 3015 RC = &AArch64::GPR64RegClass; 3016 else if (RegVT == MVT::f16) 3017 RC = &AArch64::FPR16RegClass; 3018 else if (RegVT == MVT::f32) 3019 RC = &AArch64::FPR32RegClass; 3020 else if (RegVT == MVT::f64 || RegVT.is64BitVector()) 3021 RC = &AArch64::FPR64RegClass; 3022 else if (RegVT == MVT::f128 || RegVT.is128BitVector()) 3023 RC = &AArch64::FPR128RegClass; 3024 else 3025 llvm_unreachable("RegVT not supported by FORMAL_ARGUMENTS Lowering"); 3026 3027 // Transform the arguments in physical registers into virtual ones. 3028 unsigned Reg = MF.addLiveIn(VA.getLocReg(), RC); 3029 ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, RegVT); 3030 3031 // If this is an 8, 16 or 32-bit value, it is really passed promoted 3032 // to 64 bits. Insert an assert[sz]ext to capture this, then 3033 // truncate to the right size. 3034 switch (VA.getLocInfo()) { 3035 default: 3036 llvm_unreachable("Unknown loc info!"); 3037 case CCValAssign::Full: 3038 break; 3039 case CCValAssign::BCvt: 3040 ArgValue = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), ArgValue); 3041 break; 3042 case CCValAssign::AExt: 3043 case CCValAssign::SExt: 3044 case CCValAssign::ZExt: 3045 // SelectionDAGBuilder will insert appropriate AssertZExt & AssertSExt 3046 // nodes after our lowering. 3047 assert(RegVT == Ins[i].VT && "incorrect register location selected"); 3048 break; 3049 } 3050 3051 InVals.push_back(ArgValue); 3052 3053 } else { // VA.isRegLoc() 3054 assert(VA.isMemLoc() && "CCValAssign is neither reg nor mem"); 3055 unsigned ArgOffset = VA.getLocMemOffset(); 3056 unsigned ArgSize = VA.getValVT().getSizeInBits() / 8; 3057 3058 uint32_t BEAlign = 0; 3059 if (!Subtarget->isLittleEndian() && ArgSize < 8 && 3060 !Ins[i].Flags.isInConsecutiveRegs()) 3061 BEAlign = 8 - ArgSize; 3062 3063 int FI = MFI.CreateFixedObject(ArgSize, ArgOffset + BEAlign, true); 3064 3065 // Create load nodes to retrieve arguments from the stack. 3066 SDValue FIN = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout())); 3067 SDValue ArgValue; 3068 3069 // For NON_EXTLOAD, generic code in getLoad assert(ValVT == MemVT) 3070 ISD::LoadExtType ExtType = ISD::NON_EXTLOAD; 3071 MVT MemVT = VA.getValVT(); 3072 3073 switch (VA.getLocInfo()) { 3074 default: 3075 break; 3076 case CCValAssign::BCvt: 3077 MemVT = VA.getLocVT(); 3078 break; 3079 case CCValAssign::SExt: 3080 ExtType = ISD::SEXTLOAD; 3081 break; 3082 case CCValAssign::ZExt: 3083 ExtType = ISD::ZEXTLOAD; 3084 break; 3085 case CCValAssign::AExt: 3086 ExtType = ISD::EXTLOAD; 3087 break; 3088 } 3089 3090 ArgValue = DAG.getExtLoad( 3091 ExtType, DL, VA.getLocVT(), Chain, FIN, 3092 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), 3093 MemVT); 3094 3095 InVals.push_back(ArgValue); 3096 } 3097 } 3098 3099 // varargs 3100 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); 3101 if (isVarArg) { 3102 if (!Subtarget->isTargetDarwin() || IsWin64) { 3103 // The AAPCS variadic function ABI is identical to the non-variadic 3104 // one. As a result there may be more arguments in registers and we should 3105 // save them for future reference. 3106 // Win64 variadic functions also pass arguments in registers, but all float 3107 // arguments are passed in integer registers. 3108 saveVarArgRegisters(CCInfo, DAG, DL, Chain); 3109 } 3110 3111 // This will point to the next argument passed via stack. 3112 unsigned StackOffset = CCInfo.getNextStackOffset(); 3113 // We currently pass all varargs at 8-byte alignment. 3114 StackOffset = ((StackOffset + 7) & ~7); 3115 FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true)); 3116 } 3117 3118 unsigned StackArgSize = CCInfo.getNextStackOffset(); 3119 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; 3120 if (DoesCalleeRestoreStack(CallConv, TailCallOpt)) { 3121 // This is a non-standard ABI so by fiat I say we're allowed to make full 3122 // use of the stack area to be popped, which must be aligned to 16 bytes in 3123 // any case: 3124 StackArgSize = alignTo(StackArgSize, 16); 3125 3126 // If we're expected to restore the stack (e.g. fastcc) then we'll be adding 3127 // a multiple of 16. 3128 FuncInfo->setArgumentStackToRestore(StackArgSize); 3129 3130 // This realignment carries over to the available bytes below. Our own 3131 // callers will guarantee the space is free by giving an aligned value to 3132 // CALLSEQ_START. 3133 } 3134 // Even if we're not expected to free up the space, it's useful to know how 3135 // much is there while considering tail calls (because we can reuse it). 3136 FuncInfo->setBytesInStackArgArea(StackArgSize); 3137 3138 return Chain; 3139 } 3140 3141 void AArch64TargetLowering::saveVarArgRegisters(CCState &CCInfo, 3142 SelectionDAG &DAG, 3143 const SDLoc &DL, 3144 SDValue &Chain) const { 3145 MachineFunction &MF = DAG.getMachineFunction(); 3146 MachineFrameInfo &MFI = MF.getFrameInfo(); 3147 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); 3148 auto PtrVT = getPointerTy(DAG.getDataLayout()); 3149 bool IsWin64 = Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv()); 3150 3151 SmallVector<SDValue, 8> MemOps; 3152 3153 static const MCPhysReg GPRArgRegs[] = { AArch64::X0, AArch64::X1, AArch64::X2, 3154 AArch64::X3, AArch64::X4, AArch64::X5, 3155 AArch64::X6, AArch64::X7 }; 3156 static const unsigned NumGPRArgRegs = array_lengthof(GPRArgRegs); 3157 unsigned FirstVariadicGPR = CCInfo.getFirstUnallocated(GPRArgRegs); 3158 3159 unsigned GPRSaveSize = 8 * (NumGPRArgRegs - FirstVariadicGPR); 3160 int GPRIdx = 0; 3161 if (GPRSaveSize != 0) { 3162 if (IsWin64) { 3163 GPRIdx = MFI.CreateFixedObject(GPRSaveSize, -(int)GPRSaveSize, false); 3164 if (GPRSaveSize & 15) 3165 // The extra size here, if triggered, will always be 8. 3166 MFI.CreateFixedObject(16 - (GPRSaveSize & 15), -(int)alignTo(GPRSaveSize, 16), false); 3167 } else 3168 GPRIdx = MFI.CreateStackObject(GPRSaveSize, 8, false); 3169 3170 SDValue FIN = DAG.getFrameIndex(GPRIdx, PtrVT); 3171 3172 for (unsigned i = FirstVariadicGPR; i < NumGPRArgRegs; ++i) { 3173 unsigned VReg = MF.addLiveIn(GPRArgRegs[i], &AArch64::GPR64RegClass); 3174 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::i64); 3175 SDValue Store = DAG.getStore( 3176 Val.getValue(1), DL, Val, FIN, 3177 IsWin64 3178 ? MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), 3179 GPRIdx, 3180 (i - FirstVariadicGPR) * 8) 3181 : MachinePointerInfo::getStack(DAG.getMachineFunction(), i * 8)); 3182 MemOps.push_back(Store); 3183 FIN = 3184 DAG.getNode(ISD::ADD, DL, PtrVT, FIN, DAG.getConstant(8, DL, PtrVT)); 3185 } 3186 } 3187 FuncInfo->setVarArgsGPRIndex(GPRIdx); 3188 FuncInfo->setVarArgsGPRSize(GPRSaveSize); 3189 3190 if (Subtarget->hasFPARMv8() && !IsWin64) { 3191 static const MCPhysReg FPRArgRegs[] = { 3192 AArch64::Q0, AArch64::Q1, AArch64::Q2, AArch64::Q3, 3193 AArch64::Q4, AArch64::Q5, AArch64::Q6, AArch64::Q7}; 3194 static const unsigned NumFPRArgRegs = array_lengthof(FPRArgRegs); 3195 unsigned FirstVariadicFPR = CCInfo.getFirstUnallocated(FPRArgRegs); 3196 3197 unsigned FPRSaveSize = 16 * (NumFPRArgRegs - FirstVariadicFPR); 3198 int FPRIdx = 0; 3199 if (FPRSaveSize != 0) { 3200 FPRIdx = MFI.CreateStackObject(FPRSaveSize, 16, false); 3201 3202 SDValue FIN = DAG.getFrameIndex(FPRIdx, PtrVT); 3203 3204 for (unsigned i = FirstVariadicFPR; i < NumFPRArgRegs; ++i) { 3205 unsigned VReg = MF.addLiveIn(FPRArgRegs[i], &AArch64::FPR128RegClass); 3206 SDValue Val = DAG.getCopyFromReg(Chain, DL, VReg, MVT::f128); 3207 3208 SDValue Store = DAG.getStore( 3209 Val.getValue(1), DL, Val, FIN, 3210 MachinePointerInfo::getStack(DAG.getMachineFunction(), i * 16)); 3211 MemOps.push_back(Store); 3212 FIN = DAG.getNode(ISD::ADD, DL, PtrVT, FIN, 3213 DAG.getConstant(16, DL, PtrVT)); 3214 } 3215 } 3216 FuncInfo->setVarArgsFPRIndex(FPRIdx); 3217 FuncInfo->setVarArgsFPRSize(FPRSaveSize); 3218 } 3219 3220 if (!MemOps.empty()) { 3221 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); 3222 } 3223 } 3224 3225 /// LowerCallResult - Lower the result values of a call into the 3226 /// appropriate copies out of appropriate physical registers. 3227 SDValue AArch64TargetLowering::LowerCallResult( 3228 SDValue Chain, SDValue InFlag, CallingConv::ID CallConv, bool isVarArg, 3229 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL, 3230 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals, bool isThisReturn, 3231 SDValue ThisVal) const { 3232 CCAssignFn *RetCC = CallConv == CallingConv::WebKit_JS 3233 ? RetCC_AArch64_WebKit_JS 3234 : RetCC_AArch64_AAPCS; 3235 // Assign locations to each value returned by this call. 3236 SmallVector<CCValAssign, 16> RVLocs; 3237 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 3238 *DAG.getContext()); 3239 CCInfo.AnalyzeCallResult(Ins, RetCC); 3240 3241 // Copy all of the result registers out of their specified physreg. 3242 for (unsigned i = 0; i != RVLocs.size(); ++i) { 3243 CCValAssign VA = RVLocs[i]; 3244 3245 // Pass 'this' value directly from the argument to return value, to avoid 3246 // reg unit interference 3247 if (i == 0 && isThisReturn) { 3248 assert(!VA.needsCustom() && VA.getLocVT() == MVT::i64 && 3249 "unexpected return calling convention register assignment"); 3250 InVals.push_back(ThisVal); 3251 continue; 3252 } 3253 3254 SDValue Val = 3255 DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), InFlag); 3256 Chain = Val.getValue(1); 3257 InFlag = Val.getValue(2); 3258 3259 switch (VA.getLocInfo()) { 3260 default: 3261 llvm_unreachable("Unknown loc info!"); 3262 case CCValAssign::Full: 3263 break; 3264 case CCValAssign::BCvt: 3265 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val); 3266 break; 3267 } 3268 3269 InVals.push_back(Val); 3270 } 3271 3272 return Chain; 3273 } 3274 3275 /// Return true if the calling convention is one that we can guarantee TCO for. 3276 static bool canGuaranteeTCO(CallingConv::ID CC) { 3277 return CC == CallingConv::Fast; 3278 } 3279 3280 /// Return true if we might ever do TCO for calls with this calling convention. 3281 static bool mayTailCallThisCC(CallingConv::ID CC) { 3282 switch (CC) { 3283 case CallingConv::C: 3284 case CallingConv::PreserveMost: 3285 case CallingConv::Swift: 3286 return true; 3287 default: 3288 return canGuaranteeTCO(CC); 3289 } 3290 } 3291 3292 bool AArch64TargetLowering::isEligibleForTailCallOptimization( 3293 SDValue Callee, CallingConv::ID CalleeCC, bool isVarArg, 3294 const SmallVectorImpl<ISD::OutputArg> &Outs, 3295 const SmallVectorImpl<SDValue> &OutVals, 3296 const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG &DAG) const { 3297 if (!mayTailCallThisCC(CalleeCC)) 3298 return false; 3299 3300 MachineFunction &MF = DAG.getMachineFunction(); 3301 const Function &CallerF = MF.getFunction(); 3302 CallingConv::ID CallerCC = CallerF.getCallingConv(); 3303 bool CCMatch = CallerCC == CalleeCC; 3304 3305 // Byval parameters hand the function a pointer directly into the stack area 3306 // we want to reuse during a tail call. Working around this *is* possible (see 3307 // X86) but less efficient and uglier in LowerCall. 3308 for (Function::const_arg_iterator i = CallerF.arg_begin(), 3309 e = CallerF.arg_end(); 3310 i != e; ++i) 3311 if (i->hasByValAttr()) 3312 return false; 3313 3314 if (getTargetMachine().Options.GuaranteedTailCallOpt) 3315 return canGuaranteeTCO(CalleeCC) && CCMatch; 3316 3317 // Externally-defined functions with weak linkage should not be 3318 // tail-called on AArch64 when the OS does not support dynamic 3319 // pre-emption of symbols, as the AAELF spec requires normal calls 3320 // to undefined weak functions to be replaced with a NOP or jump to the 3321 // next instruction. The behaviour of branch instructions in this 3322 // situation (as used for tail calls) is implementation-defined, so we 3323 // cannot rely on the linker replacing the tail call with a return. 3324 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 3325 const GlobalValue *GV = G->getGlobal(); 3326 const Triple &TT = getTargetMachine().getTargetTriple(); 3327 if (GV->hasExternalWeakLinkage() && 3328 (!TT.isOSWindows() || TT.isOSBinFormatELF() || TT.isOSBinFormatMachO())) 3329 return false; 3330 } 3331 3332 // Now we search for cases where we can use a tail call without changing the 3333 // ABI. Sibcall is used in some places (particularly gcc) to refer to this 3334 // concept. 3335 3336 // I want anyone implementing a new calling convention to think long and hard 3337 // about this assert. 3338 assert((!isVarArg || CalleeCC == CallingConv::C) && 3339 "Unexpected variadic calling convention"); 3340 3341 LLVMContext &C = *DAG.getContext(); 3342 if (isVarArg && !Outs.empty()) { 3343 // At least two cases here: if caller is fastcc then we can't have any 3344 // memory arguments (we'd be expected to clean up the stack afterwards). If 3345 // caller is C then we could potentially use its argument area. 3346 3347 // FIXME: for now we take the most conservative of these in both cases: 3348 // disallow all variadic memory operands. 3349 SmallVector<CCValAssign, 16> ArgLocs; 3350 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); 3351 3352 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, true)); 3353 for (const CCValAssign &ArgLoc : ArgLocs) 3354 if (!ArgLoc.isRegLoc()) 3355 return false; 3356 } 3357 3358 // Check that the call results are passed in the same way. 3359 if (!CCState::resultsCompatible(CalleeCC, CallerCC, MF, C, Ins, 3360 CCAssignFnForCall(CalleeCC, isVarArg), 3361 CCAssignFnForCall(CallerCC, isVarArg))) 3362 return false; 3363 // The callee has to preserve all registers the caller needs to preserve. 3364 const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); 3365 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC); 3366 if (!CCMatch) { 3367 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC); 3368 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved)) 3369 return false; 3370 } 3371 3372 // Nothing more to check if the callee is taking no arguments 3373 if (Outs.empty()) 3374 return true; 3375 3376 SmallVector<CCValAssign, 16> ArgLocs; 3377 CCState CCInfo(CalleeCC, isVarArg, MF, ArgLocs, C); 3378 3379 CCInfo.AnalyzeCallOperands(Outs, CCAssignFnForCall(CalleeCC, isVarArg)); 3380 3381 const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); 3382 3383 // If the stack arguments for this call do not fit into our own save area then 3384 // the call cannot be made tail. 3385 if (CCInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) 3386 return false; 3387 3388 const MachineRegisterInfo &MRI = MF.getRegInfo(); 3389 if (!parametersInCSRMatch(MRI, CallerPreserved, ArgLocs, OutVals)) 3390 return false; 3391 3392 return true; 3393 } 3394 3395 SDValue AArch64TargetLowering::addTokenForArgument(SDValue Chain, 3396 SelectionDAG &DAG, 3397 MachineFrameInfo &MFI, 3398 int ClobberedFI) const { 3399 SmallVector<SDValue, 8> ArgChains; 3400 int64_t FirstByte = MFI.getObjectOffset(ClobberedFI); 3401 int64_t LastByte = FirstByte + MFI.getObjectSize(ClobberedFI) - 1; 3402 3403 // Include the original chain at the beginning of the list. When this is 3404 // used by target LowerCall hooks, this helps legalize find the 3405 // CALLSEQ_BEGIN node. 3406 ArgChains.push_back(Chain); 3407 3408 // Add a chain value for each stack argument corresponding 3409 for (SDNode::use_iterator U = DAG.getEntryNode().getNode()->use_begin(), 3410 UE = DAG.getEntryNode().getNode()->use_end(); 3411 U != UE; ++U) 3412 if (LoadSDNode *L = dyn_cast<LoadSDNode>(*U)) 3413 if (FrameIndexSDNode *FI = dyn_cast<FrameIndexSDNode>(L->getBasePtr())) 3414 if (FI->getIndex() < 0) { 3415 int64_t InFirstByte = MFI.getObjectOffset(FI->getIndex()); 3416 int64_t InLastByte = InFirstByte; 3417 InLastByte += MFI.getObjectSize(FI->getIndex()) - 1; 3418 3419 if ((InFirstByte <= FirstByte && FirstByte <= InLastByte) || 3420 (FirstByte <= InFirstByte && InFirstByte <= LastByte)) 3421 ArgChains.push_back(SDValue(L, 1)); 3422 } 3423 3424 // Build a tokenfactor for all the chains. 3425 return DAG.getNode(ISD::TokenFactor, SDLoc(Chain), MVT::Other, ArgChains); 3426 } 3427 3428 bool AArch64TargetLowering::DoesCalleeRestoreStack(CallingConv::ID CallCC, 3429 bool TailCallOpt) const { 3430 return CallCC == CallingConv::Fast && TailCallOpt; 3431 } 3432 3433 /// LowerCall - Lower a call to a callseq_start + CALL + callseq_end chain, 3434 /// and add input and output parameter nodes. 3435 SDValue 3436 AArch64TargetLowering::LowerCall(CallLoweringInfo &CLI, 3437 SmallVectorImpl<SDValue> &InVals) const { 3438 SelectionDAG &DAG = CLI.DAG; 3439 SDLoc &DL = CLI.DL; 3440 SmallVector<ISD::OutputArg, 32> &Outs = CLI.Outs; 3441 SmallVector<SDValue, 32> &OutVals = CLI.OutVals; 3442 SmallVector<ISD::InputArg, 32> &Ins = CLI.Ins; 3443 SDValue Chain = CLI.Chain; 3444 SDValue Callee = CLI.Callee; 3445 bool &IsTailCall = CLI.IsTailCall; 3446 CallingConv::ID CallConv = CLI.CallConv; 3447 bool IsVarArg = CLI.IsVarArg; 3448 3449 MachineFunction &MF = DAG.getMachineFunction(); 3450 bool IsThisReturn = false; 3451 3452 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); 3453 bool TailCallOpt = MF.getTarget().Options.GuaranteedTailCallOpt; 3454 bool IsSibCall = false; 3455 3456 if (IsTailCall) { 3457 // Check if it's really possible to do a tail call. 3458 IsTailCall = isEligibleForTailCallOptimization( 3459 Callee, CallConv, IsVarArg, Outs, OutVals, Ins, DAG); 3460 if (!IsTailCall && CLI.CS && CLI.CS.isMustTailCall()) 3461 report_fatal_error("failed to perform tail call elimination on a call " 3462 "site marked musttail"); 3463 3464 // A sibling call is one where we're under the usual C ABI and not planning 3465 // to change that but can still do a tail call: 3466 if (!TailCallOpt && IsTailCall) 3467 IsSibCall = true; 3468 3469 if (IsTailCall) 3470 ++NumTailCalls; 3471 } 3472 3473 // Analyze operands of the call, assigning locations to each operand. 3474 SmallVector<CCValAssign, 16> ArgLocs; 3475 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), ArgLocs, 3476 *DAG.getContext()); 3477 3478 if (IsVarArg) { 3479 // Handle fixed and variable vector arguments differently. 3480 // Variable vector arguments always go into memory. 3481 unsigned NumArgs = Outs.size(); 3482 3483 for (unsigned i = 0; i != NumArgs; ++i) { 3484 MVT ArgVT = Outs[i].VT; 3485 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 3486 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, 3487 /*IsVarArg=*/ !Outs[i].IsFixed); 3488 bool Res = AssignFn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, CCInfo); 3489 assert(!Res && "Call operand has unhandled type"); 3490 (void)Res; 3491 } 3492 } else { 3493 // At this point, Outs[].VT may already be promoted to i32. To correctly 3494 // handle passing i8 as i8 instead of i32 on stack, we pass in both i32 and 3495 // i8 to CC_AArch64_AAPCS with i32 being ValVT and i8 being LocVT. 3496 // Since AnalyzeCallOperands uses Ins[].VT for both ValVT and LocVT, here 3497 // we use a special version of AnalyzeCallOperands to pass in ValVT and 3498 // LocVT. 3499 unsigned NumArgs = Outs.size(); 3500 for (unsigned i = 0; i != NumArgs; ++i) { 3501 MVT ValVT = Outs[i].VT; 3502 // Get type of the original argument. 3503 EVT ActualVT = getValueType(DAG.getDataLayout(), 3504 CLI.getArgs()[Outs[i].OrigArgIndex].Ty, 3505 /*AllowUnknown*/ true); 3506 MVT ActualMVT = ActualVT.isSimple() ? ActualVT.getSimpleVT() : ValVT; 3507 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags; 3508 // If ActualMVT is i1/i8/i16, we should set LocVT to i8/i8/i16. 3509 if (ActualMVT == MVT::i1 || ActualMVT == MVT::i8) 3510 ValVT = MVT::i8; 3511 else if (ActualMVT == MVT::i16) 3512 ValVT = MVT::i16; 3513 3514 CCAssignFn *AssignFn = CCAssignFnForCall(CallConv, /*IsVarArg=*/false); 3515 bool Res = AssignFn(i, ValVT, ValVT, CCValAssign::Full, ArgFlags, CCInfo); 3516 assert(!Res && "Call operand has unhandled type"); 3517 (void)Res; 3518 } 3519 } 3520 3521 // Get a count of how many bytes are to be pushed on the stack. 3522 unsigned NumBytes = CCInfo.getNextStackOffset(); 3523 3524 if (IsSibCall) { 3525 // Since we're not changing the ABI to make this a tail call, the memory 3526 // operands are already available in the caller's incoming argument space. 3527 NumBytes = 0; 3528 } 3529 3530 // FPDiff is the byte offset of the call's argument area from the callee's. 3531 // Stores to callee stack arguments will be placed in FixedStackSlots offset 3532 // by this amount for a tail call. In a sibling call it must be 0 because the 3533 // caller will deallocate the entire stack and the callee still expects its 3534 // arguments to begin at SP+0. Completely unused for non-tail calls. 3535 int FPDiff = 0; 3536 3537 if (IsTailCall && !IsSibCall) { 3538 unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea(); 3539 3540 // Since callee will pop argument stack as a tail call, we must keep the 3541 // popped size 16-byte aligned. 3542 NumBytes = alignTo(NumBytes, 16); 3543 3544 // FPDiff will be negative if this tail call requires more space than we 3545 // would automatically have in our incoming argument space. Positive if we 3546 // can actually shrink the stack. 3547 FPDiff = NumReusableBytes - NumBytes; 3548 3549 // The stack pointer must be 16-byte aligned at all times it's used for a 3550 // memory operation, which in practice means at *all* times and in 3551 // particular across call boundaries. Therefore our own arguments started at 3552 // a 16-byte aligned SP and the delta applied for the tail call should 3553 // satisfy the same constraint. 3554 assert(FPDiff % 16 == 0 && "unaligned stack on tail call"); 3555 } 3556 3557 // Adjust the stack pointer for the new arguments... 3558 // These operations are automatically eliminated by the prolog/epilog pass 3559 if (!IsSibCall) 3560 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, DL); 3561 3562 SDValue StackPtr = DAG.getCopyFromReg(Chain, DL, AArch64::SP, 3563 getPointerTy(DAG.getDataLayout())); 3564 3565 SmallVector<std::pair<unsigned, SDValue>, 8> RegsToPass; 3566 SmallVector<SDValue, 8> MemOpChains; 3567 auto PtrVT = getPointerTy(DAG.getDataLayout()); 3568 3569 // Walk the register/memloc assignments, inserting copies/loads. 3570 for (unsigned i = 0, realArgIdx = 0, e = ArgLocs.size(); i != e; 3571 ++i, ++realArgIdx) { 3572 CCValAssign &VA = ArgLocs[i]; 3573 SDValue Arg = OutVals[realArgIdx]; 3574 ISD::ArgFlagsTy Flags = Outs[realArgIdx].Flags; 3575 3576 // Promote the value if needed. 3577 switch (VA.getLocInfo()) { 3578 default: 3579 llvm_unreachable("Unknown loc info!"); 3580 case CCValAssign::Full: 3581 break; 3582 case CCValAssign::SExt: 3583 Arg = DAG.getNode(ISD::SIGN_EXTEND, DL, VA.getLocVT(), Arg); 3584 break; 3585 case CCValAssign::ZExt: 3586 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 3587 break; 3588 case CCValAssign::AExt: 3589 if (Outs[realArgIdx].ArgVT == MVT::i1) { 3590 // AAPCS requires i1 to be zero-extended to 8-bits by the caller. 3591 Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg); 3592 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i8, Arg); 3593 } 3594 Arg = DAG.getNode(ISD::ANY_EXTEND, DL, VA.getLocVT(), Arg); 3595 break; 3596 case CCValAssign::BCvt: 3597 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 3598 break; 3599 case CCValAssign::FPExt: 3600 Arg = DAG.getNode(ISD::FP_EXTEND, DL, VA.getLocVT(), Arg); 3601 break; 3602 } 3603 3604 if (VA.isRegLoc()) { 3605 if (realArgIdx == 0 && Flags.isReturned() && !Flags.isSwiftSelf() && 3606 Outs[0].VT == MVT::i64) { 3607 assert(VA.getLocVT() == MVT::i64 && 3608 "unexpected calling convention register assignment"); 3609 assert(!Ins.empty() && Ins[0].VT == MVT::i64 && 3610 "unexpected use of 'returned'"); 3611 IsThisReturn = true; 3612 } 3613 RegsToPass.push_back(std::make_pair(VA.getLocReg(), Arg)); 3614 } else { 3615 assert(VA.isMemLoc()); 3616 3617 SDValue DstAddr; 3618 MachinePointerInfo DstInfo; 3619 3620 // FIXME: This works on big-endian for composite byvals, which are the 3621 // common case. It should also work for fundamental types too. 3622 uint32_t BEAlign = 0; 3623 unsigned OpSize = Flags.isByVal() ? Flags.getByValSize() * 8 3624 : VA.getValVT().getSizeInBits(); 3625 OpSize = (OpSize + 7) / 8; 3626 if (!Subtarget->isLittleEndian() && !Flags.isByVal() && 3627 !Flags.isInConsecutiveRegs()) { 3628 if (OpSize < 8) 3629 BEAlign = 8 - OpSize; 3630 } 3631 unsigned LocMemOffset = VA.getLocMemOffset(); 3632 int32_t Offset = LocMemOffset + BEAlign; 3633 SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL); 3634 PtrOff = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff); 3635 3636 if (IsTailCall) { 3637 Offset = Offset + FPDiff; 3638 int FI = MF.getFrameInfo().CreateFixedObject(OpSize, Offset, true); 3639 3640 DstAddr = DAG.getFrameIndex(FI, PtrVT); 3641 DstInfo = 3642 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI); 3643 3644 // Make sure any stack arguments overlapping with where we're storing 3645 // are loaded before this eventual operation. Otherwise they'll be 3646 // clobbered. 3647 Chain = addTokenForArgument(Chain, DAG, MF.getFrameInfo(), FI); 3648 } else { 3649 SDValue PtrOff = DAG.getIntPtrConstant(Offset, DL); 3650 3651 DstAddr = DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr, PtrOff); 3652 DstInfo = MachinePointerInfo::getStack(DAG.getMachineFunction(), 3653 LocMemOffset); 3654 } 3655 3656 if (Outs[i].Flags.isByVal()) { 3657 SDValue SizeNode = 3658 DAG.getConstant(Outs[i].Flags.getByValSize(), DL, MVT::i64); 3659 SDValue Cpy = DAG.getMemcpy( 3660 Chain, DL, DstAddr, Arg, SizeNode, Outs[i].Flags.getByValAlign(), 3661 /*isVol = */ false, /*AlwaysInline = */ false, 3662 /*isTailCall = */ false, 3663 DstInfo, MachinePointerInfo()); 3664 3665 MemOpChains.push_back(Cpy); 3666 } else { 3667 // Since we pass i1/i8/i16 as i1/i8/i16 on stack and Arg is already 3668 // promoted to a legal register type i32, we should truncate Arg back to 3669 // i1/i8/i16. 3670 if (VA.getValVT() == MVT::i1 || VA.getValVT() == MVT::i8 || 3671 VA.getValVT() == MVT::i16) 3672 Arg = DAG.getNode(ISD::TRUNCATE, DL, VA.getValVT(), Arg); 3673 3674 SDValue Store = DAG.getStore(Chain, DL, Arg, DstAddr, DstInfo); 3675 MemOpChains.push_back(Store); 3676 } 3677 } 3678 } 3679 3680 if (!MemOpChains.empty()) 3681 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains); 3682 3683 // Build a sequence of copy-to-reg nodes chained together with token chain 3684 // and flag operands which copy the outgoing args into the appropriate regs. 3685 SDValue InFlag; 3686 for (auto &RegToPass : RegsToPass) { 3687 Chain = DAG.getCopyToReg(Chain, DL, RegToPass.first, 3688 RegToPass.second, InFlag); 3689 InFlag = Chain.getValue(1); 3690 } 3691 3692 // If the callee is a GlobalAddress/ExternalSymbol node (quite common, every 3693 // direct call is) turn it into a TargetGlobalAddress/TargetExternalSymbol 3694 // node so that legalize doesn't hack it. 3695 if (auto *G = dyn_cast<GlobalAddressSDNode>(Callee)) { 3696 auto GV = G->getGlobal(); 3697 if (Subtarget->classifyGlobalFunctionReference(GV, getTargetMachine()) == 3698 AArch64II::MO_GOT) { 3699 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_GOT); 3700 Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee); 3701 } else if (Subtarget->isTargetCOFF() && GV->hasDLLImportStorageClass()) { 3702 assert(Subtarget->isTargetWindows() && 3703 "Windows is the only supported COFF target"); 3704 Callee = getGOT(G, DAG, AArch64II::MO_DLLIMPORT); 3705 } else { 3706 const GlobalValue *GV = G->getGlobal(); 3707 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, 0); 3708 } 3709 } else if (auto *S = dyn_cast<ExternalSymbolSDNode>(Callee)) { 3710 if (getTargetMachine().getCodeModel() == CodeModel::Large && 3711 Subtarget->isTargetMachO()) { 3712 const char *Sym = S->getSymbol(); 3713 Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, AArch64II::MO_GOT); 3714 Callee = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, Callee); 3715 } else { 3716 const char *Sym = S->getSymbol(); 3717 Callee = DAG.getTargetExternalSymbol(Sym, PtrVT, 0); 3718 } 3719 } 3720 3721 // We don't usually want to end the call-sequence here because we would tidy 3722 // the frame up *after* the call, however in the ABI-changing tail-call case 3723 // we've carefully laid out the parameters so that when sp is reset they'll be 3724 // in the correct location. 3725 if (IsTailCall && !IsSibCall) { 3726 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true), 3727 DAG.getIntPtrConstant(0, DL, true), InFlag, DL); 3728 InFlag = Chain.getValue(1); 3729 } 3730 3731 std::vector<SDValue> Ops; 3732 Ops.push_back(Chain); 3733 Ops.push_back(Callee); 3734 3735 if (IsTailCall) { 3736 // Each tail call may have to adjust the stack by a different amount, so 3737 // this information must travel along with the operation for eventual 3738 // consumption by emitEpilogue. 3739 Ops.push_back(DAG.getTargetConstant(FPDiff, DL, MVT::i32)); 3740 } 3741 3742 // Add argument registers to the end of the list so that they are known live 3743 // into the call. 3744 for (auto &RegToPass : RegsToPass) 3745 Ops.push_back(DAG.getRegister(RegToPass.first, 3746 RegToPass.second.getValueType())); 3747 3748 // Add a register mask operand representing the call-preserved registers. 3749 const uint32_t *Mask; 3750 const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); 3751 if (IsThisReturn) { 3752 // For 'this' returns, use the X0-preserving mask if applicable 3753 Mask = TRI->getThisReturnPreservedMask(MF, CallConv); 3754 if (!Mask) { 3755 IsThisReturn = false; 3756 Mask = TRI->getCallPreservedMask(MF, CallConv); 3757 } 3758 } else 3759 Mask = TRI->getCallPreservedMask(MF, CallConv); 3760 3761 assert(Mask && "Missing call preserved mask for calling convention"); 3762 Ops.push_back(DAG.getRegisterMask(Mask)); 3763 3764 if (InFlag.getNode()) 3765 Ops.push_back(InFlag); 3766 3767 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 3768 3769 // If we're doing a tall call, use a TC_RETURN here rather than an 3770 // actual call instruction. 3771 if (IsTailCall) { 3772 MF.getFrameInfo().setHasTailCall(); 3773 return DAG.getNode(AArch64ISD::TC_RETURN, DL, NodeTys, Ops); 3774 } 3775 3776 // Returns a chain and a flag for retval copy to use. 3777 Chain = DAG.getNode(AArch64ISD::CALL, DL, NodeTys, Ops); 3778 InFlag = Chain.getValue(1); 3779 3780 uint64_t CalleePopBytes = 3781 DoesCalleeRestoreStack(CallConv, TailCallOpt) ? alignTo(NumBytes, 16) : 0; 3782 3783 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(NumBytes, DL, true), 3784 DAG.getIntPtrConstant(CalleePopBytes, DL, true), 3785 InFlag, DL); 3786 if (!Ins.empty()) 3787 InFlag = Chain.getValue(1); 3788 3789 // Handle result values, copying them out of physregs into vregs that we 3790 // return. 3791 return LowerCallResult(Chain, InFlag, CallConv, IsVarArg, Ins, DL, DAG, 3792 InVals, IsThisReturn, 3793 IsThisReturn ? OutVals[0] : SDValue()); 3794 } 3795 3796 bool AArch64TargetLowering::CanLowerReturn( 3797 CallingConv::ID CallConv, MachineFunction &MF, bool isVarArg, 3798 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const { 3799 CCAssignFn *RetCC = CallConv == CallingConv::WebKit_JS 3800 ? RetCC_AArch64_WebKit_JS 3801 : RetCC_AArch64_AAPCS; 3802 SmallVector<CCValAssign, 16> RVLocs; 3803 CCState CCInfo(CallConv, isVarArg, MF, RVLocs, Context); 3804 return CCInfo.CheckReturn(Outs, RetCC); 3805 } 3806 3807 SDValue 3808 AArch64TargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv, 3809 bool isVarArg, 3810 const SmallVectorImpl<ISD::OutputArg> &Outs, 3811 const SmallVectorImpl<SDValue> &OutVals, 3812 const SDLoc &DL, SelectionDAG &DAG) const { 3813 CCAssignFn *RetCC = CallConv == CallingConv::WebKit_JS 3814 ? RetCC_AArch64_WebKit_JS 3815 : RetCC_AArch64_AAPCS; 3816 SmallVector<CCValAssign, 16> RVLocs; 3817 CCState CCInfo(CallConv, isVarArg, DAG.getMachineFunction(), RVLocs, 3818 *DAG.getContext()); 3819 CCInfo.AnalyzeReturn(Outs, RetCC); 3820 3821 // Copy the result values into the output registers. 3822 SDValue Flag; 3823 SmallVector<SDValue, 4> RetOps(1, Chain); 3824 for (unsigned i = 0, realRVLocIdx = 0; i != RVLocs.size(); 3825 ++i, ++realRVLocIdx) { 3826 CCValAssign &VA = RVLocs[i]; 3827 assert(VA.isRegLoc() && "Can only return in registers!"); 3828 SDValue Arg = OutVals[realRVLocIdx]; 3829 3830 switch (VA.getLocInfo()) { 3831 default: 3832 llvm_unreachable("Unknown loc info!"); 3833 case CCValAssign::Full: 3834 if (Outs[i].ArgVT == MVT::i1) { 3835 // AAPCS requires i1 to be zero-extended to i8 by the producer of the 3836 // value. This is strictly redundant on Darwin (which uses "zeroext 3837 // i1"), but will be optimised out before ISel. 3838 Arg = DAG.getNode(ISD::TRUNCATE, DL, MVT::i1, Arg); 3839 Arg = DAG.getNode(ISD::ZERO_EXTEND, DL, VA.getLocVT(), Arg); 3840 } 3841 break; 3842 case CCValAssign::BCvt: 3843 Arg = DAG.getNode(ISD::BITCAST, DL, VA.getLocVT(), Arg); 3844 break; 3845 } 3846 3847 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Arg, Flag); 3848 Flag = Chain.getValue(1); 3849 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT())); 3850 } 3851 const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); 3852 const MCPhysReg *I = 3853 TRI->getCalleeSavedRegsViaCopy(&DAG.getMachineFunction()); 3854 if (I) { 3855 for (; *I; ++I) { 3856 if (AArch64::GPR64RegClass.contains(*I)) 3857 RetOps.push_back(DAG.getRegister(*I, MVT::i64)); 3858 else if (AArch64::FPR64RegClass.contains(*I)) 3859 RetOps.push_back(DAG.getRegister(*I, MVT::getFloatingPointVT(64))); 3860 else 3861 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 3862 } 3863 } 3864 3865 RetOps[0] = Chain; // Update chain. 3866 3867 // Add the flag if we have it. 3868 if (Flag.getNode()) 3869 RetOps.push_back(Flag); 3870 3871 return DAG.getNode(AArch64ISD::RET_FLAG, DL, MVT::Other, RetOps); 3872 } 3873 3874 //===----------------------------------------------------------------------===// 3875 // Other Lowering Code 3876 //===----------------------------------------------------------------------===// 3877 3878 SDValue AArch64TargetLowering::getTargetNode(GlobalAddressSDNode *N, EVT Ty, 3879 SelectionDAG &DAG, 3880 unsigned Flag) const { 3881 return DAG.getTargetGlobalAddress(N->getGlobal(), SDLoc(N), Ty, 3882 N->getOffset(), Flag); 3883 } 3884 3885 SDValue AArch64TargetLowering::getTargetNode(JumpTableSDNode *N, EVT Ty, 3886 SelectionDAG &DAG, 3887 unsigned Flag) const { 3888 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flag); 3889 } 3890 3891 SDValue AArch64TargetLowering::getTargetNode(ConstantPoolSDNode *N, EVT Ty, 3892 SelectionDAG &DAG, 3893 unsigned Flag) const { 3894 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlignment(), 3895 N->getOffset(), Flag); 3896 } 3897 3898 SDValue AArch64TargetLowering::getTargetNode(BlockAddressSDNode* N, EVT Ty, 3899 SelectionDAG &DAG, 3900 unsigned Flag) const { 3901 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, 0, Flag); 3902 } 3903 3904 // (loadGOT sym) 3905 template <class NodeTy> 3906 SDValue AArch64TargetLowering::getGOT(NodeTy *N, SelectionDAG &DAG, 3907 unsigned Flags) const { 3908 LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getGOT\n"); 3909 SDLoc DL(N); 3910 EVT Ty = getPointerTy(DAG.getDataLayout()); 3911 SDValue GotAddr = getTargetNode(N, Ty, DAG, AArch64II::MO_GOT | Flags); 3912 // FIXME: Once remat is capable of dealing with instructions with register 3913 // operands, expand this into two nodes instead of using a wrapper node. 3914 return DAG.getNode(AArch64ISD::LOADgot, DL, Ty, GotAddr); 3915 } 3916 3917 // (wrapper %highest(sym), %higher(sym), %hi(sym), %lo(sym)) 3918 template <class NodeTy> 3919 SDValue AArch64TargetLowering::getAddrLarge(NodeTy *N, SelectionDAG &DAG, 3920 unsigned Flags) const { 3921 LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddrLarge\n"); 3922 SDLoc DL(N); 3923 EVT Ty = getPointerTy(DAG.getDataLayout()); 3924 const unsigned char MO_NC = AArch64II::MO_NC; 3925 return DAG.getNode( 3926 AArch64ISD::WrapperLarge, DL, Ty, 3927 getTargetNode(N, Ty, DAG, AArch64II::MO_G3 | Flags), 3928 getTargetNode(N, Ty, DAG, AArch64II::MO_G2 | MO_NC | Flags), 3929 getTargetNode(N, Ty, DAG, AArch64II::MO_G1 | MO_NC | Flags), 3930 getTargetNode(N, Ty, DAG, AArch64II::MO_G0 | MO_NC | Flags)); 3931 } 3932 3933 // (addlow (adrp %hi(sym)) %lo(sym)) 3934 template <class NodeTy> 3935 SDValue AArch64TargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG, 3936 unsigned Flags) const { 3937 LLVM_DEBUG(dbgs() << "AArch64TargetLowering::getAddr\n"); 3938 SDLoc DL(N); 3939 EVT Ty = getPointerTy(DAG.getDataLayout()); 3940 SDValue Hi = getTargetNode(N, Ty, DAG, AArch64II::MO_PAGE | Flags); 3941 SDValue Lo = getTargetNode(N, Ty, DAG, 3942 AArch64II::MO_PAGEOFF | AArch64II::MO_NC | Flags); 3943 SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, Ty, Hi); 3944 return DAG.getNode(AArch64ISD::ADDlow, DL, Ty, ADRP, Lo); 3945 } 3946 3947 SDValue AArch64TargetLowering::LowerGlobalAddress(SDValue Op, 3948 SelectionDAG &DAG) const { 3949 GlobalAddressSDNode *GN = cast<GlobalAddressSDNode>(Op); 3950 const GlobalValue *GV = GN->getGlobal(); 3951 const AArch64II::TOF TargetFlags = 3952 (GV->hasDLLImportStorageClass() ? AArch64II::MO_DLLIMPORT 3953 : AArch64II::MO_NO_FLAG); 3954 unsigned char OpFlags = 3955 Subtarget->ClassifyGlobalReference(GV, getTargetMachine()); 3956 3957 if (OpFlags != AArch64II::MO_NO_FLAG) 3958 assert(cast<GlobalAddressSDNode>(Op)->getOffset() == 0 && 3959 "unexpected offset in global node"); 3960 3961 // This also catches the large code model case for Darwin. 3962 if ((OpFlags & AArch64II::MO_GOT) != 0) { 3963 return getGOT(GN, DAG, TargetFlags); 3964 } 3965 3966 SDValue Result; 3967 if (getTargetMachine().getCodeModel() == CodeModel::Large) { 3968 Result = getAddrLarge(GN, DAG, TargetFlags); 3969 } else { 3970 Result = getAddr(GN, DAG, TargetFlags); 3971 } 3972 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 3973 SDLoc DL(GN); 3974 if (GV->hasDLLImportStorageClass()) 3975 Result = DAG.getLoad(PtrVT, DL, DAG.getEntryNode(), Result, 3976 MachinePointerInfo::getGOT(DAG.getMachineFunction())); 3977 return Result; 3978 } 3979 3980 /// Convert a TLS address reference into the correct sequence of loads 3981 /// and calls to compute the variable's address (for Darwin, currently) and 3982 /// return an SDValue containing the final node. 3983 3984 /// Darwin only has one TLS scheme which must be capable of dealing with the 3985 /// fully general situation, in the worst case. This means: 3986 /// + "extern __thread" declaration. 3987 /// + Defined in a possibly unknown dynamic library. 3988 /// 3989 /// The general system is that each __thread variable has a [3 x i64] descriptor 3990 /// which contains information used by the runtime to calculate the address. The 3991 /// only part of this the compiler needs to know about is the first xword, which 3992 /// contains a function pointer that must be called with the address of the 3993 /// entire descriptor in "x0". 3994 /// 3995 /// Since this descriptor may be in a different unit, in general even the 3996 /// descriptor must be accessed via an indirect load. The "ideal" code sequence 3997 /// is: 3998 /// adrp x0, _var@TLVPPAGE 3999 /// ldr x0, [x0, _var@TLVPPAGEOFF] ; x0 now contains address of descriptor 4000 /// ldr x1, [x0] ; x1 contains 1st entry of descriptor, 4001 /// ; the function pointer 4002 /// blr x1 ; Uses descriptor address in x0 4003 /// ; Address of _var is now in x0. 4004 /// 4005 /// If the address of _var's descriptor *is* known to the linker, then it can 4006 /// change the first "ldr" instruction to an appropriate "add x0, x0, #imm" for 4007 /// a slight efficiency gain. 4008 SDValue 4009 AArch64TargetLowering::LowerDarwinGlobalTLSAddress(SDValue Op, 4010 SelectionDAG &DAG) const { 4011 assert(Subtarget->isTargetDarwin() && 4012 "This function expects a Darwin target"); 4013 4014 SDLoc DL(Op); 4015 MVT PtrVT = getPointerTy(DAG.getDataLayout()); 4016 const GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal(); 4017 4018 SDValue TLVPAddr = 4019 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS); 4020 SDValue DescAddr = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TLVPAddr); 4021 4022 // The first entry in the descriptor is a function pointer that we must call 4023 // to obtain the address of the variable. 4024 SDValue Chain = DAG.getEntryNode(); 4025 SDValue FuncTLVGet = DAG.getLoad( 4026 MVT::i64, DL, Chain, DescAddr, 4027 MachinePointerInfo::getGOT(DAG.getMachineFunction()), 4028 /* Alignment = */ 8, 4029 MachineMemOperand::MONonTemporal | MachineMemOperand::MOInvariant | 4030 MachineMemOperand::MODereferenceable); 4031 Chain = FuncTLVGet.getValue(1); 4032 4033 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 4034 MFI.setAdjustsStack(true); 4035 4036 // TLS calls preserve all registers except those that absolutely must be 4037 // trashed: X0 (it takes an argument), LR (it's a call) and NZCV (let's not be 4038 // silly). 4039 const uint32_t *Mask = 4040 Subtarget->getRegisterInfo()->getTLSCallPreservedMask(); 4041 4042 // Finally, we can make the call. This is just a degenerate version of a 4043 // normal AArch64 call node: x0 takes the address of the descriptor, and 4044 // returns the address of the variable in this thread. 4045 Chain = DAG.getCopyToReg(Chain, DL, AArch64::X0, DescAddr, SDValue()); 4046 Chain = 4047 DAG.getNode(AArch64ISD::CALL, DL, DAG.getVTList(MVT::Other, MVT::Glue), 4048 Chain, FuncTLVGet, DAG.getRegister(AArch64::X0, MVT::i64), 4049 DAG.getRegisterMask(Mask), Chain.getValue(1)); 4050 return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Chain.getValue(1)); 4051 } 4052 4053 /// When accessing thread-local variables under either the general-dynamic or 4054 /// local-dynamic system, we make a "TLS-descriptor" call. The variable will 4055 /// have a descriptor, accessible via a PC-relative ADRP, and whose first entry 4056 /// is a function pointer to carry out the resolution. 4057 /// 4058 /// The sequence is: 4059 /// adrp x0, :tlsdesc:var 4060 /// ldr x1, [x0, #:tlsdesc_lo12:var] 4061 /// add x0, x0, #:tlsdesc_lo12:var 4062 /// .tlsdesccall var 4063 /// blr x1 4064 /// (TPIDR_EL0 offset now in x0) 4065 /// 4066 /// The above sequence must be produced unscheduled, to enable the linker to 4067 /// optimize/relax this sequence. 4068 /// Therefore, a pseudo-instruction (TLSDESC_CALLSEQ) is used to represent the 4069 /// above sequence, and expanded really late in the compilation flow, to ensure 4070 /// the sequence is produced as per above. 4071 SDValue AArch64TargetLowering::LowerELFTLSDescCallSeq(SDValue SymAddr, 4072 const SDLoc &DL, 4073 SelectionDAG &DAG) const { 4074 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 4075 4076 SDValue Chain = DAG.getEntryNode(); 4077 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue); 4078 4079 Chain = 4080 DAG.getNode(AArch64ISD::TLSDESC_CALLSEQ, DL, NodeTys, {Chain, SymAddr}); 4081 SDValue Glue = Chain.getValue(1); 4082 4083 return DAG.getCopyFromReg(Chain, DL, AArch64::X0, PtrVT, Glue); 4084 } 4085 4086 SDValue 4087 AArch64TargetLowering::LowerELFGlobalTLSAddress(SDValue Op, 4088 SelectionDAG &DAG) const { 4089 assert(Subtarget->isTargetELF() && "This function expects an ELF target"); 4090 assert(Subtarget->useSmallAddressing() && 4091 "ELF TLS only supported in small memory model"); 4092 // Different choices can be made for the maximum size of the TLS area for a 4093 // module. For the small address model, the default TLS size is 16MiB and the 4094 // maximum TLS size is 4GiB. 4095 // FIXME: add -mtls-size command line option and make it control the 16MiB 4096 // vs. 4GiB code sequence generation. 4097 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 4098 4099 TLSModel::Model Model = getTargetMachine().getTLSModel(GA->getGlobal()); 4100 4101 if (!EnableAArch64ELFLocalDynamicTLSGeneration) { 4102 if (Model == TLSModel::LocalDynamic) 4103 Model = TLSModel::GeneralDynamic; 4104 } 4105 4106 SDValue TPOff; 4107 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 4108 SDLoc DL(Op); 4109 const GlobalValue *GV = GA->getGlobal(); 4110 4111 SDValue ThreadBase = DAG.getNode(AArch64ISD::THREAD_POINTER, DL, PtrVT); 4112 4113 if (Model == TLSModel::LocalExec) { 4114 SDValue HiVar = DAG.getTargetGlobalAddress( 4115 GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12); 4116 SDValue LoVar = DAG.getTargetGlobalAddress( 4117 GV, DL, PtrVT, 0, 4118 AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); 4119 4120 SDValue TPWithOff_lo = 4121 SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, ThreadBase, 4122 HiVar, 4123 DAG.getTargetConstant(0, DL, MVT::i32)), 4124 0); 4125 SDValue TPWithOff = 4126 SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPWithOff_lo, 4127 LoVar, 4128 DAG.getTargetConstant(0, DL, MVT::i32)), 4129 0); 4130 return TPWithOff; 4131 } else if (Model == TLSModel::InitialExec) { 4132 TPOff = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS); 4133 TPOff = DAG.getNode(AArch64ISD::LOADgot, DL, PtrVT, TPOff); 4134 } else if (Model == TLSModel::LocalDynamic) { 4135 // Local-dynamic accesses proceed in two phases. A general-dynamic TLS 4136 // descriptor call against the special symbol _TLS_MODULE_BASE_ to calculate 4137 // the beginning of the module's TLS region, followed by a DTPREL offset 4138 // calculation. 4139 4140 // These accesses will need deduplicating if there's more than one. 4141 AArch64FunctionInfo *MFI = 4142 DAG.getMachineFunction().getInfo<AArch64FunctionInfo>(); 4143 MFI->incNumLocalDynamicTLSAccesses(); 4144 4145 // The call needs a relocation too for linker relaxation. It doesn't make 4146 // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of 4147 // the address. 4148 SDValue SymAddr = DAG.getTargetExternalSymbol("_TLS_MODULE_BASE_", PtrVT, 4149 AArch64II::MO_TLS); 4150 4151 // Now we can calculate the offset from TPIDR_EL0 to this module's 4152 // thread-local area. 4153 TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG); 4154 4155 // Now use :dtprel_whatever: operations to calculate this variable's offset 4156 // in its thread-storage area. 4157 SDValue HiVar = DAG.getTargetGlobalAddress( 4158 GV, DL, MVT::i64, 0, AArch64II::MO_TLS | AArch64II::MO_HI12); 4159 SDValue LoVar = DAG.getTargetGlobalAddress( 4160 GV, DL, MVT::i64, 0, 4161 AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); 4162 4163 TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, HiVar, 4164 DAG.getTargetConstant(0, DL, MVT::i32)), 4165 0); 4166 TPOff = SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TPOff, LoVar, 4167 DAG.getTargetConstant(0, DL, MVT::i32)), 4168 0); 4169 } else if (Model == TLSModel::GeneralDynamic) { 4170 // The call needs a relocation too for linker relaxation. It doesn't make 4171 // sense to call it MO_PAGE or MO_PAGEOFF though so we need another copy of 4172 // the address. 4173 SDValue SymAddr = 4174 DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, AArch64II::MO_TLS); 4175 4176 // Finally we can make a call to calculate the offset from tpidr_el0. 4177 TPOff = LowerELFTLSDescCallSeq(SymAddr, DL, DAG); 4178 } else 4179 llvm_unreachable("Unsupported ELF TLS access model"); 4180 4181 return DAG.getNode(ISD::ADD, DL, PtrVT, ThreadBase, TPOff); 4182 } 4183 4184 SDValue 4185 AArch64TargetLowering::LowerWindowsGlobalTLSAddress(SDValue Op, 4186 SelectionDAG &DAG) const { 4187 assert(Subtarget->isTargetWindows() && "Windows specific TLS lowering"); 4188 4189 SDValue Chain = DAG.getEntryNode(); 4190 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 4191 SDLoc DL(Op); 4192 4193 SDValue TEB = DAG.getRegister(AArch64::X18, MVT::i64); 4194 4195 // Load the ThreadLocalStoragePointer from the TEB 4196 // A pointer to the TLS array is located at offset 0x58 from the TEB. 4197 SDValue TLSArray = 4198 DAG.getNode(ISD::ADD, DL, PtrVT, TEB, DAG.getIntPtrConstant(0x58, DL)); 4199 TLSArray = DAG.getLoad(PtrVT, DL, Chain, TLSArray, MachinePointerInfo()); 4200 Chain = TLSArray.getValue(1); 4201 4202 // Load the TLS index from the C runtime; 4203 // This does the same as getAddr(), but without having a GlobalAddressSDNode. 4204 // This also does the same as LOADgot, but using a generic i32 load, 4205 // while LOADgot only loads i64. 4206 SDValue TLSIndexHi = 4207 DAG.getTargetExternalSymbol("_tls_index", PtrVT, AArch64II::MO_PAGE); 4208 SDValue TLSIndexLo = DAG.getTargetExternalSymbol( 4209 "_tls_index", PtrVT, AArch64II::MO_PAGEOFF | AArch64II::MO_NC); 4210 SDValue ADRP = DAG.getNode(AArch64ISD::ADRP, DL, PtrVT, TLSIndexHi); 4211 SDValue TLSIndex = 4212 DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, ADRP, TLSIndexLo); 4213 TLSIndex = DAG.getLoad(MVT::i32, DL, Chain, TLSIndex, MachinePointerInfo()); 4214 Chain = TLSIndex.getValue(1); 4215 4216 // The pointer to the thread's TLS data area is at the TLS Index scaled by 8 4217 // offset into the TLSArray. 4218 TLSIndex = DAG.getNode(ISD::ZERO_EXTEND, DL, PtrVT, TLSIndex); 4219 SDValue Slot = DAG.getNode(ISD::SHL, DL, PtrVT, TLSIndex, 4220 DAG.getConstant(3, DL, PtrVT)); 4221 SDValue TLS = DAG.getLoad(PtrVT, DL, Chain, 4222 DAG.getNode(ISD::ADD, DL, PtrVT, TLSArray, Slot), 4223 MachinePointerInfo()); 4224 Chain = TLS.getValue(1); 4225 4226 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 4227 const GlobalValue *GV = GA->getGlobal(); 4228 SDValue TGAHi = DAG.getTargetGlobalAddress( 4229 GV, DL, PtrVT, 0, AArch64II::MO_TLS | AArch64II::MO_HI12); 4230 SDValue TGALo = DAG.getTargetGlobalAddress( 4231 GV, DL, PtrVT, 0, 4232 AArch64II::MO_TLS | AArch64II::MO_PAGEOFF | AArch64II::MO_NC); 4233 4234 // Add the offset from the start of the .tls section (section base). 4235 SDValue Addr = 4236 SDValue(DAG.getMachineNode(AArch64::ADDXri, DL, PtrVT, TLS, TGAHi, 4237 DAG.getTargetConstant(0, DL, MVT::i32)), 4238 0); 4239 Addr = DAG.getNode(AArch64ISD::ADDlow, DL, PtrVT, Addr, TGALo); 4240 return Addr; 4241 } 4242 4243 SDValue AArch64TargetLowering::LowerGlobalTLSAddress(SDValue Op, 4244 SelectionDAG &DAG) const { 4245 const GlobalAddressSDNode *GA = cast<GlobalAddressSDNode>(Op); 4246 if (DAG.getTarget().useEmulatedTLS()) 4247 return LowerToTLSEmulatedModel(GA, DAG); 4248 4249 if (Subtarget->isTargetDarwin()) 4250 return LowerDarwinGlobalTLSAddress(Op, DAG); 4251 if (Subtarget->isTargetELF()) 4252 return LowerELFGlobalTLSAddress(Op, DAG); 4253 if (Subtarget->isTargetWindows()) 4254 return LowerWindowsGlobalTLSAddress(Op, DAG); 4255 4256 llvm_unreachable("Unexpected platform trying to use TLS"); 4257 } 4258 4259 SDValue AArch64TargetLowering::LowerBR_CC(SDValue Op, SelectionDAG &DAG) const { 4260 SDValue Chain = Op.getOperand(0); 4261 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(1))->get(); 4262 SDValue LHS = Op.getOperand(2); 4263 SDValue RHS = Op.getOperand(3); 4264 SDValue Dest = Op.getOperand(4); 4265 SDLoc dl(Op); 4266 4267 // Handle f128 first, since lowering it will result in comparing the return 4268 // value of a libcall against zero, which is just what the rest of LowerBR_CC 4269 // is expecting to deal with. 4270 if (LHS.getValueType() == MVT::f128) { 4271 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); 4272 4273 // If softenSetCCOperands returned a scalar, we need to compare the result 4274 // against zero to select between true and false values. 4275 if (!RHS.getNode()) { 4276 RHS = DAG.getConstant(0, dl, LHS.getValueType()); 4277 CC = ISD::SETNE; 4278 } 4279 } 4280 4281 // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a branch 4282 // instruction. 4283 if (isOverflowIntrOpRes(LHS) && isOneConstant(RHS) && 4284 (CC == ISD::SETEQ || CC == ISD::SETNE)) { 4285 // Only lower legal XALUO ops. 4286 if (!DAG.getTargetLoweringInfo().isTypeLegal(LHS->getValueType(0))) 4287 return SDValue(); 4288 4289 // The actual operation with overflow check. 4290 AArch64CC::CondCode OFCC; 4291 SDValue Value, Overflow; 4292 std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, LHS.getValue(0), DAG); 4293 4294 if (CC == ISD::SETNE) 4295 OFCC = getInvertedCondCode(OFCC); 4296 SDValue CCVal = DAG.getConstant(OFCC, dl, MVT::i32); 4297 4298 return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal, 4299 Overflow); 4300 } 4301 4302 if (LHS.getValueType().isInteger()) { 4303 assert((LHS.getValueType() == RHS.getValueType()) && 4304 (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64)); 4305 4306 // If the RHS of the comparison is zero, we can potentially fold this 4307 // to a specialized branch. 4308 const ConstantSDNode *RHSC = dyn_cast<ConstantSDNode>(RHS); 4309 if (RHSC && RHSC->getZExtValue() == 0) { 4310 if (CC == ISD::SETEQ) { 4311 // See if we can use a TBZ to fold in an AND as well. 4312 // TBZ has a smaller branch displacement than CBZ. If the offset is 4313 // out of bounds, a late MI-layer pass rewrites branches. 4314 // 403.gcc is an example that hits this case. 4315 if (LHS.getOpcode() == ISD::AND && 4316 isa<ConstantSDNode>(LHS.getOperand(1)) && 4317 isPowerOf2_64(LHS.getConstantOperandVal(1))) { 4318 SDValue Test = LHS.getOperand(0); 4319 uint64_t Mask = LHS.getConstantOperandVal(1); 4320 return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, Test, 4321 DAG.getConstant(Log2_64(Mask), dl, MVT::i64), 4322 Dest); 4323 } 4324 4325 return DAG.getNode(AArch64ISD::CBZ, dl, MVT::Other, Chain, LHS, Dest); 4326 } else if (CC == ISD::SETNE) { 4327 // See if we can use a TBZ to fold in an AND as well. 4328 // TBZ has a smaller branch displacement than CBZ. If the offset is 4329 // out of bounds, a late MI-layer pass rewrites branches. 4330 // 403.gcc is an example that hits this case. 4331 if (LHS.getOpcode() == ISD::AND && 4332 isa<ConstantSDNode>(LHS.getOperand(1)) && 4333 isPowerOf2_64(LHS.getConstantOperandVal(1))) { 4334 SDValue Test = LHS.getOperand(0); 4335 uint64_t Mask = LHS.getConstantOperandVal(1); 4336 return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, Test, 4337 DAG.getConstant(Log2_64(Mask), dl, MVT::i64), 4338 Dest); 4339 } 4340 4341 return DAG.getNode(AArch64ISD::CBNZ, dl, MVT::Other, Chain, LHS, Dest); 4342 } else if (CC == ISD::SETLT && LHS.getOpcode() != ISD::AND) { 4343 // Don't combine AND since emitComparison converts the AND to an ANDS 4344 // (a.k.a. TST) and the test in the test bit and branch instruction 4345 // becomes redundant. This would also increase register pressure. 4346 uint64_t Mask = LHS.getValueSizeInBits() - 1; 4347 return DAG.getNode(AArch64ISD::TBNZ, dl, MVT::Other, Chain, LHS, 4348 DAG.getConstant(Mask, dl, MVT::i64), Dest); 4349 } 4350 } 4351 if (RHSC && RHSC->getSExtValue() == -1 && CC == ISD::SETGT && 4352 LHS.getOpcode() != ISD::AND) { 4353 // Don't combine AND since emitComparison converts the AND to an ANDS 4354 // (a.k.a. TST) and the test in the test bit and branch instruction 4355 // becomes redundant. This would also increase register pressure. 4356 uint64_t Mask = LHS.getValueSizeInBits() - 1; 4357 return DAG.getNode(AArch64ISD::TBZ, dl, MVT::Other, Chain, LHS, 4358 DAG.getConstant(Mask, dl, MVT::i64), Dest); 4359 } 4360 4361 SDValue CCVal; 4362 SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl); 4363 return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CCVal, 4364 Cmp); 4365 } 4366 4367 assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 || 4368 LHS.getValueType() == MVT::f64); 4369 4370 // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally 4371 // clean. Some of them require two branches to implement. 4372 SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG); 4373 AArch64CC::CondCode CC1, CC2; 4374 changeFPCCToAArch64CC(CC, CC1, CC2); 4375 SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32); 4376 SDValue BR1 = 4377 DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, Chain, Dest, CC1Val, Cmp); 4378 if (CC2 != AArch64CC::AL) { 4379 SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32); 4380 return DAG.getNode(AArch64ISD::BRCOND, dl, MVT::Other, BR1, Dest, CC2Val, 4381 Cmp); 4382 } 4383 4384 return BR1; 4385 } 4386 4387 SDValue AArch64TargetLowering::LowerFCOPYSIGN(SDValue Op, 4388 SelectionDAG &DAG) const { 4389 EVT VT = Op.getValueType(); 4390 SDLoc DL(Op); 4391 4392 SDValue In1 = Op.getOperand(0); 4393 SDValue In2 = Op.getOperand(1); 4394 EVT SrcVT = In2.getValueType(); 4395 4396 if (SrcVT.bitsLT(VT)) 4397 In2 = DAG.getNode(ISD::FP_EXTEND, DL, VT, In2); 4398 else if (SrcVT.bitsGT(VT)) 4399 In2 = DAG.getNode(ISD::FP_ROUND, DL, VT, In2, DAG.getIntPtrConstant(0, DL)); 4400 4401 EVT VecVT; 4402 uint64_t EltMask; 4403 SDValue VecVal1, VecVal2; 4404 4405 auto setVecVal = [&] (int Idx) { 4406 if (!VT.isVector()) { 4407 VecVal1 = DAG.getTargetInsertSubreg(Idx, DL, VecVT, 4408 DAG.getUNDEF(VecVT), In1); 4409 VecVal2 = DAG.getTargetInsertSubreg(Idx, DL, VecVT, 4410 DAG.getUNDEF(VecVT), In2); 4411 } else { 4412 VecVal1 = DAG.getNode(ISD::BITCAST, DL, VecVT, In1); 4413 VecVal2 = DAG.getNode(ISD::BITCAST, DL, VecVT, In2); 4414 } 4415 }; 4416 4417 if (VT == MVT::f32 || VT == MVT::v2f32 || VT == MVT::v4f32) { 4418 VecVT = (VT == MVT::v2f32 ? MVT::v2i32 : MVT::v4i32); 4419 EltMask = 0x80000000ULL; 4420 setVecVal(AArch64::ssub); 4421 } else if (VT == MVT::f64 || VT == MVT::v2f64) { 4422 VecVT = MVT::v2i64; 4423 4424 // We want to materialize a mask with the high bit set, but the AdvSIMD 4425 // immediate moves cannot materialize that in a single instruction for 4426 // 64-bit elements. Instead, materialize zero and then negate it. 4427 EltMask = 0; 4428 4429 setVecVal(AArch64::dsub); 4430 } else if (VT == MVT::f16 || VT == MVT::v4f16 || VT == MVT::v8f16) { 4431 VecVT = (VT == MVT::v4f16 ? MVT::v4i16 : MVT::v8i16); 4432 EltMask = 0x8000ULL; 4433 setVecVal(AArch64::hsub); 4434 } else { 4435 llvm_unreachable("Invalid type for copysign!"); 4436 } 4437 4438 SDValue BuildVec = DAG.getConstant(EltMask, DL, VecVT); 4439 4440 // If we couldn't materialize the mask above, then the mask vector will be 4441 // the zero vector, and we need to negate it here. 4442 if (VT == MVT::f64 || VT == MVT::v2f64) { 4443 BuildVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2f64, BuildVec); 4444 BuildVec = DAG.getNode(ISD::FNEG, DL, MVT::v2f64, BuildVec); 4445 BuildVec = DAG.getNode(ISD::BITCAST, DL, MVT::v2i64, BuildVec); 4446 } 4447 4448 SDValue Sel = 4449 DAG.getNode(AArch64ISD::BIT, DL, VecVT, VecVal1, VecVal2, BuildVec); 4450 4451 if (VT == MVT::f16) 4452 return DAG.getTargetExtractSubreg(AArch64::hsub, DL, VT, Sel); 4453 if (VT == MVT::f32) 4454 return DAG.getTargetExtractSubreg(AArch64::ssub, DL, VT, Sel); 4455 else if (VT == MVT::f64) 4456 return DAG.getTargetExtractSubreg(AArch64::dsub, DL, VT, Sel); 4457 else 4458 return DAG.getNode(ISD::BITCAST, DL, VT, Sel); 4459 } 4460 4461 SDValue AArch64TargetLowering::LowerCTPOP(SDValue Op, SelectionDAG &DAG) const { 4462 if (DAG.getMachineFunction().getFunction().hasFnAttribute( 4463 Attribute::NoImplicitFloat)) 4464 return SDValue(); 4465 4466 if (!Subtarget->hasNEON()) 4467 return SDValue(); 4468 4469 // While there is no integer popcount instruction, it can 4470 // be more efficiently lowered to the following sequence that uses 4471 // AdvSIMD registers/instructions as long as the copies to/from 4472 // the AdvSIMD registers are cheap. 4473 // FMOV D0, X0 // copy 64-bit int to vector, high bits zero'd 4474 // CNT V0.8B, V0.8B // 8xbyte pop-counts 4475 // ADDV B0, V0.8B // sum 8xbyte pop-counts 4476 // UMOV X0, V0.B[0] // copy byte result back to integer reg 4477 SDValue Val = Op.getOperand(0); 4478 SDLoc DL(Op); 4479 EVT VT = Op.getValueType(); 4480 4481 if (VT == MVT::i32) 4482 Val = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, Val); 4483 Val = DAG.getNode(ISD::BITCAST, DL, MVT::v8i8, Val); 4484 4485 SDValue CtPop = DAG.getNode(ISD::CTPOP, DL, MVT::v8i8, Val); 4486 SDValue UaddLV = DAG.getNode( 4487 ISD::INTRINSIC_WO_CHAIN, DL, MVT::i32, 4488 DAG.getConstant(Intrinsic::aarch64_neon_uaddlv, DL, MVT::i32), CtPop); 4489 4490 if (VT == MVT::i64) 4491 UaddLV = DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, UaddLV); 4492 return UaddLV; 4493 } 4494 4495 SDValue AArch64TargetLowering::LowerSETCC(SDValue Op, SelectionDAG &DAG) const { 4496 4497 if (Op.getValueType().isVector()) 4498 return LowerVSETCC(Op, DAG); 4499 4500 SDValue LHS = Op.getOperand(0); 4501 SDValue RHS = Op.getOperand(1); 4502 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 4503 SDLoc dl(Op); 4504 4505 // We chose ZeroOrOneBooleanContents, so use zero and one. 4506 EVT VT = Op.getValueType(); 4507 SDValue TVal = DAG.getConstant(1, dl, VT); 4508 SDValue FVal = DAG.getConstant(0, dl, VT); 4509 4510 // Handle f128 first, since one possible outcome is a normal integer 4511 // comparison which gets picked up by the next if statement. 4512 if (LHS.getValueType() == MVT::f128) { 4513 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); 4514 4515 // If softenSetCCOperands returned a scalar, use it. 4516 if (!RHS.getNode()) { 4517 assert(LHS.getValueType() == Op.getValueType() && 4518 "Unexpected setcc expansion!"); 4519 return LHS; 4520 } 4521 } 4522 4523 if (LHS.getValueType().isInteger()) { 4524 SDValue CCVal; 4525 SDValue Cmp = 4526 getAArch64Cmp(LHS, RHS, ISD::getSetCCInverse(CC, true), CCVal, DAG, dl); 4527 4528 // Note that we inverted the condition above, so we reverse the order of 4529 // the true and false operands here. This will allow the setcc to be 4530 // matched to a single CSINC instruction. 4531 return DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CCVal, Cmp); 4532 } 4533 4534 // Now we know we're dealing with FP values. 4535 assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 || 4536 LHS.getValueType() == MVT::f64); 4537 4538 // If that fails, we'll need to perform an FCMP + CSEL sequence. Go ahead 4539 // and do the comparison. 4540 SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG); 4541 4542 AArch64CC::CondCode CC1, CC2; 4543 changeFPCCToAArch64CC(CC, CC1, CC2); 4544 if (CC2 == AArch64CC::AL) { 4545 changeFPCCToAArch64CC(ISD::getSetCCInverse(CC, false), CC1, CC2); 4546 SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32); 4547 4548 // Note that we inverted the condition above, so we reverse the order of 4549 // the true and false operands here. This will allow the setcc to be 4550 // matched to a single CSINC instruction. 4551 return DAG.getNode(AArch64ISD::CSEL, dl, VT, FVal, TVal, CC1Val, Cmp); 4552 } else { 4553 // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't 4554 // totally clean. Some of them require two CSELs to implement. As is in 4555 // this case, we emit the first CSEL and then emit a second using the output 4556 // of the first as the RHS. We're effectively OR'ing the two CC's together. 4557 4558 // FIXME: It would be nice if we could match the two CSELs to two CSINCs. 4559 SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32); 4560 SDValue CS1 = 4561 DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp); 4562 4563 SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32); 4564 return DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp); 4565 } 4566 } 4567 4568 SDValue AArch64TargetLowering::LowerSELECT_CC(ISD::CondCode CC, SDValue LHS, 4569 SDValue RHS, SDValue TVal, 4570 SDValue FVal, const SDLoc &dl, 4571 SelectionDAG &DAG) const { 4572 // Handle f128 first, because it will result in a comparison of some RTLIB 4573 // call result against zero. 4574 if (LHS.getValueType() == MVT::f128) { 4575 softenSetCCOperands(DAG, MVT::f128, LHS, RHS, CC, dl); 4576 4577 // If softenSetCCOperands returned a scalar, we need to compare the result 4578 // against zero to select between true and false values. 4579 if (!RHS.getNode()) { 4580 RHS = DAG.getConstant(0, dl, LHS.getValueType()); 4581 CC = ISD::SETNE; 4582 } 4583 } 4584 4585 // Also handle f16, for which we need to do a f32 comparison. 4586 if (LHS.getValueType() == MVT::f16 && !Subtarget->hasFullFP16()) { 4587 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, LHS); 4588 RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::f32, RHS); 4589 } 4590 4591 // Next, handle integers. 4592 if (LHS.getValueType().isInteger()) { 4593 assert((LHS.getValueType() == RHS.getValueType()) && 4594 (LHS.getValueType() == MVT::i32 || LHS.getValueType() == MVT::i64)); 4595 4596 unsigned Opcode = AArch64ISD::CSEL; 4597 4598 // If both the TVal and the FVal are constants, see if we can swap them in 4599 // order to for a CSINV or CSINC out of them. 4600 ConstantSDNode *CFVal = dyn_cast<ConstantSDNode>(FVal); 4601 ConstantSDNode *CTVal = dyn_cast<ConstantSDNode>(TVal); 4602 4603 if (CTVal && CFVal && CTVal->isAllOnesValue() && CFVal->isNullValue()) { 4604 std::swap(TVal, FVal); 4605 std::swap(CTVal, CFVal); 4606 CC = ISD::getSetCCInverse(CC, true); 4607 } else if (CTVal && CFVal && CTVal->isOne() && CFVal->isNullValue()) { 4608 std::swap(TVal, FVal); 4609 std::swap(CTVal, CFVal); 4610 CC = ISD::getSetCCInverse(CC, true); 4611 } else if (TVal.getOpcode() == ISD::XOR) { 4612 // If TVal is a NOT we want to swap TVal and FVal so that we can match 4613 // with a CSINV rather than a CSEL. 4614 if (isAllOnesConstant(TVal.getOperand(1))) { 4615 std::swap(TVal, FVal); 4616 std::swap(CTVal, CFVal); 4617 CC = ISD::getSetCCInverse(CC, true); 4618 } 4619 } else if (TVal.getOpcode() == ISD::SUB) { 4620 // If TVal is a negation (SUB from 0) we want to swap TVal and FVal so 4621 // that we can match with a CSNEG rather than a CSEL. 4622 if (isNullConstant(TVal.getOperand(0))) { 4623 std::swap(TVal, FVal); 4624 std::swap(CTVal, CFVal); 4625 CC = ISD::getSetCCInverse(CC, true); 4626 } 4627 } else if (CTVal && CFVal) { 4628 const int64_t TrueVal = CTVal->getSExtValue(); 4629 const int64_t FalseVal = CFVal->getSExtValue(); 4630 bool Swap = false; 4631 4632 // If both TVal and FVal are constants, see if FVal is the 4633 // inverse/negation/increment of TVal and generate a CSINV/CSNEG/CSINC 4634 // instead of a CSEL in that case. 4635 if (TrueVal == ~FalseVal) { 4636 Opcode = AArch64ISD::CSINV; 4637 } else if (TrueVal == -FalseVal) { 4638 Opcode = AArch64ISD::CSNEG; 4639 } else if (TVal.getValueType() == MVT::i32) { 4640 // If our operands are only 32-bit wide, make sure we use 32-bit 4641 // arithmetic for the check whether we can use CSINC. This ensures that 4642 // the addition in the check will wrap around properly in case there is 4643 // an overflow (which would not be the case if we do the check with 4644 // 64-bit arithmetic). 4645 const uint32_t TrueVal32 = CTVal->getZExtValue(); 4646 const uint32_t FalseVal32 = CFVal->getZExtValue(); 4647 4648 if ((TrueVal32 == FalseVal32 + 1) || (TrueVal32 + 1 == FalseVal32)) { 4649 Opcode = AArch64ISD::CSINC; 4650 4651 if (TrueVal32 > FalseVal32) { 4652 Swap = true; 4653 } 4654 } 4655 // 64-bit check whether we can use CSINC. 4656 } else if ((TrueVal == FalseVal + 1) || (TrueVal + 1 == FalseVal)) { 4657 Opcode = AArch64ISD::CSINC; 4658 4659 if (TrueVal > FalseVal) { 4660 Swap = true; 4661 } 4662 } 4663 4664 // Swap TVal and FVal if necessary. 4665 if (Swap) { 4666 std::swap(TVal, FVal); 4667 std::swap(CTVal, CFVal); 4668 CC = ISD::getSetCCInverse(CC, true); 4669 } 4670 4671 if (Opcode != AArch64ISD::CSEL) { 4672 // Drop FVal since we can get its value by simply inverting/negating 4673 // TVal. 4674 FVal = TVal; 4675 } 4676 } 4677 4678 // Avoid materializing a constant when possible by reusing a known value in 4679 // a register. However, don't perform this optimization if the known value 4680 // is one, zero or negative one in the case of a CSEL. We can always 4681 // materialize these values using CSINC, CSEL and CSINV with wzr/xzr as the 4682 // FVal, respectively. 4683 ConstantSDNode *RHSVal = dyn_cast<ConstantSDNode>(RHS); 4684 if (Opcode == AArch64ISD::CSEL && RHSVal && !RHSVal->isOne() && 4685 !RHSVal->isNullValue() && !RHSVal->isAllOnesValue()) { 4686 AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC); 4687 // Transform "a == C ? C : x" to "a == C ? a : x" and "a != C ? x : C" to 4688 // "a != C ? x : a" to avoid materializing C. 4689 if (CTVal && CTVal == RHSVal && AArch64CC == AArch64CC::EQ) 4690 TVal = LHS; 4691 else if (CFVal && CFVal == RHSVal && AArch64CC == AArch64CC::NE) 4692 FVal = LHS; 4693 } else if (Opcode == AArch64ISD::CSNEG && RHSVal && RHSVal->isOne()) { 4694 assert (CTVal && CFVal && "Expected constant operands for CSNEG."); 4695 // Use a CSINV to transform "a == C ? 1 : -1" to "a == C ? a : -1" to 4696 // avoid materializing C. 4697 AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC); 4698 if (CTVal == RHSVal && AArch64CC == AArch64CC::EQ) { 4699 Opcode = AArch64ISD::CSINV; 4700 TVal = LHS; 4701 FVal = DAG.getConstant(0, dl, FVal.getValueType()); 4702 } 4703 } 4704 4705 SDValue CCVal; 4706 SDValue Cmp = getAArch64Cmp(LHS, RHS, CC, CCVal, DAG, dl); 4707 EVT VT = TVal.getValueType(); 4708 return DAG.getNode(Opcode, dl, VT, TVal, FVal, CCVal, Cmp); 4709 } 4710 4711 // Now we know we're dealing with FP values. 4712 assert(LHS.getValueType() == MVT::f16 || LHS.getValueType() == MVT::f32 || 4713 LHS.getValueType() == MVT::f64); 4714 assert(LHS.getValueType() == RHS.getValueType()); 4715 EVT VT = TVal.getValueType(); 4716 SDValue Cmp = emitComparison(LHS, RHS, CC, dl, DAG); 4717 4718 // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally 4719 // clean. Some of them require two CSELs to implement. 4720 AArch64CC::CondCode CC1, CC2; 4721 changeFPCCToAArch64CC(CC, CC1, CC2); 4722 4723 if (DAG.getTarget().Options.UnsafeFPMath) { 4724 // Transform "a == 0.0 ? 0.0 : x" to "a == 0.0 ? a : x" and 4725 // "a != 0.0 ? x : 0.0" to "a != 0.0 ? x : a" to avoid materializing 0.0. 4726 ConstantFPSDNode *RHSVal = dyn_cast<ConstantFPSDNode>(RHS); 4727 if (RHSVal && RHSVal->isZero()) { 4728 ConstantFPSDNode *CFVal = dyn_cast<ConstantFPSDNode>(FVal); 4729 ConstantFPSDNode *CTVal = dyn_cast<ConstantFPSDNode>(TVal); 4730 4731 if ((CC == ISD::SETEQ || CC == ISD::SETOEQ || CC == ISD::SETUEQ) && 4732 CTVal && CTVal->isZero() && TVal.getValueType() == LHS.getValueType()) 4733 TVal = LHS; 4734 else if ((CC == ISD::SETNE || CC == ISD::SETONE || CC == ISD::SETUNE) && 4735 CFVal && CFVal->isZero() && 4736 FVal.getValueType() == LHS.getValueType()) 4737 FVal = LHS; 4738 } 4739 } 4740 4741 // Emit first, and possibly only, CSEL. 4742 SDValue CC1Val = DAG.getConstant(CC1, dl, MVT::i32); 4743 SDValue CS1 = DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, FVal, CC1Val, Cmp); 4744 4745 // If we need a second CSEL, emit it, using the output of the first as the 4746 // RHS. We're effectively OR'ing the two CC's together. 4747 if (CC2 != AArch64CC::AL) { 4748 SDValue CC2Val = DAG.getConstant(CC2, dl, MVT::i32); 4749 return DAG.getNode(AArch64ISD::CSEL, dl, VT, TVal, CS1, CC2Val, Cmp); 4750 } 4751 4752 // Otherwise, return the output of the first CSEL. 4753 return CS1; 4754 } 4755 4756 SDValue AArch64TargetLowering::LowerSELECT_CC(SDValue Op, 4757 SelectionDAG &DAG) const { 4758 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(4))->get(); 4759 SDValue LHS = Op.getOperand(0); 4760 SDValue RHS = Op.getOperand(1); 4761 SDValue TVal = Op.getOperand(2); 4762 SDValue FVal = Op.getOperand(3); 4763 SDLoc DL(Op); 4764 return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG); 4765 } 4766 4767 SDValue AArch64TargetLowering::LowerSELECT(SDValue Op, 4768 SelectionDAG &DAG) const { 4769 SDValue CCVal = Op->getOperand(0); 4770 SDValue TVal = Op->getOperand(1); 4771 SDValue FVal = Op->getOperand(2); 4772 SDLoc DL(Op); 4773 4774 // Optimize {s|u}{add|sub|mul}.with.overflow feeding into a select 4775 // instruction. 4776 if (isOverflowIntrOpRes(CCVal)) { 4777 // Only lower legal XALUO ops. 4778 if (!DAG.getTargetLoweringInfo().isTypeLegal(CCVal->getValueType(0))) 4779 return SDValue(); 4780 4781 AArch64CC::CondCode OFCC; 4782 SDValue Value, Overflow; 4783 std::tie(Value, Overflow) = getAArch64XALUOOp(OFCC, CCVal.getValue(0), DAG); 4784 SDValue CCVal = DAG.getConstant(OFCC, DL, MVT::i32); 4785 4786 return DAG.getNode(AArch64ISD::CSEL, DL, Op.getValueType(), TVal, FVal, 4787 CCVal, Overflow); 4788 } 4789 4790 // Lower it the same way as we would lower a SELECT_CC node. 4791 ISD::CondCode CC; 4792 SDValue LHS, RHS; 4793 if (CCVal.getOpcode() == ISD::SETCC) { 4794 LHS = CCVal.getOperand(0); 4795 RHS = CCVal.getOperand(1); 4796 CC = cast<CondCodeSDNode>(CCVal->getOperand(2))->get(); 4797 } else { 4798 LHS = CCVal; 4799 RHS = DAG.getConstant(0, DL, CCVal.getValueType()); 4800 CC = ISD::SETNE; 4801 } 4802 return LowerSELECT_CC(CC, LHS, RHS, TVal, FVal, DL, DAG); 4803 } 4804 4805 SDValue AArch64TargetLowering::LowerJumpTable(SDValue Op, 4806 SelectionDAG &DAG) const { 4807 // Jump table entries as PC relative offsets. No additional tweaking 4808 // is necessary here. Just get the address of the jump table. 4809 JumpTableSDNode *JT = cast<JumpTableSDNode>(Op); 4810 4811 if (getTargetMachine().getCodeModel() == CodeModel::Large && 4812 !Subtarget->isTargetMachO()) { 4813 return getAddrLarge(JT, DAG); 4814 } 4815 return getAddr(JT, DAG); 4816 } 4817 4818 SDValue AArch64TargetLowering::LowerConstantPool(SDValue Op, 4819 SelectionDAG &DAG) const { 4820 ConstantPoolSDNode *CP = cast<ConstantPoolSDNode>(Op); 4821 4822 if (getTargetMachine().getCodeModel() == CodeModel::Large) { 4823 // Use the GOT for the large code model on iOS. 4824 if (Subtarget->isTargetMachO()) { 4825 return getGOT(CP, DAG); 4826 } 4827 return getAddrLarge(CP, DAG); 4828 } else { 4829 return getAddr(CP, DAG); 4830 } 4831 } 4832 4833 SDValue AArch64TargetLowering::LowerBlockAddress(SDValue Op, 4834 SelectionDAG &DAG) const { 4835 BlockAddressSDNode *BA = cast<BlockAddressSDNode>(Op); 4836 if (getTargetMachine().getCodeModel() == CodeModel::Large && 4837 !Subtarget->isTargetMachO()) { 4838 return getAddrLarge(BA, DAG); 4839 } else { 4840 return getAddr(BA, DAG); 4841 } 4842 } 4843 4844 SDValue AArch64TargetLowering::LowerDarwin_VASTART(SDValue Op, 4845 SelectionDAG &DAG) const { 4846 AArch64FunctionInfo *FuncInfo = 4847 DAG.getMachineFunction().getInfo<AArch64FunctionInfo>(); 4848 4849 SDLoc DL(Op); 4850 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(), 4851 getPointerTy(DAG.getDataLayout())); 4852 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 4853 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), 4854 MachinePointerInfo(SV)); 4855 } 4856 4857 SDValue AArch64TargetLowering::LowerWin64_VASTART(SDValue Op, 4858 SelectionDAG &DAG) const { 4859 AArch64FunctionInfo *FuncInfo = 4860 DAG.getMachineFunction().getInfo<AArch64FunctionInfo>(); 4861 4862 SDLoc DL(Op); 4863 SDValue FR = DAG.getFrameIndex(FuncInfo->getVarArgsGPRSize() > 0 4864 ? FuncInfo->getVarArgsGPRIndex() 4865 : FuncInfo->getVarArgsStackIndex(), 4866 getPointerTy(DAG.getDataLayout())); 4867 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 4868 return DAG.getStore(Op.getOperand(0), DL, FR, Op.getOperand(1), 4869 MachinePointerInfo(SV)); 4870 } 4871 4872 SDValue AArch64TargetLowering::LowerAAPCS_VASTART(SDValue Op, 4873 SelectionDAG &DAG) const { 4874 // The layout of the va_list struct is specified in the AArch64 Procedure Call 4875 // Standard, section B.3. 4876 MachineFunction &MF = DAG.getMachineFunction(); 4877 AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>(); 4878 auto PtrVT = getPointerTy(DAG.getDataLayout()); 4879 SDLoc DL(Op); 4880 4881 SDValue Chain = Op.getOperand(0); 4882 SDValue VAList = Op.getOperand(1); 4883 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 4884 SmallVector<SDValue, 4> MemOps; 4885 4886 // void *__stack at offset 0 4887 SDValue Stack = DAG.getFrameIndex(FuncInfo->getVarArgsStackIndex(), PtrVT); 4888 MemOps.push_back(DAG.getStore(Chain, DL, Stack, VAList, 4889 MachinePointerInfo(SV), /* Alignment = */ 8)); 4890 4891 // void *__gr_top at offset 8 4892 int GPRSize = FuncInfo->getVarArgsGPRSize(); 4893 if (GPRSize > 0) { 4894 SDValue GRTop, GRTopAddr; 4895 4896 GRTopAddr = 4897 DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getConstant(8, DL, PtrVT)); 4898 4899 GRTop = DAG.getFrameIndex(FuncInfo->getVarArgsGPRIndex(), PtrVT); 4900 GRTop = DAG.getNode(ISD::ADD, DL, PtrVT, GRTop, 4901 DAG.getConstant(GPRSize, DL, PtrVT)); 4902 4903 MemOps.push_back(DAG.getStore(Chain, DL, GRTop, GRTopAddr, 4904 MachinePointerInfo(SV, 8), 4905 /* Alignment = */ 8)); 4906 } 4907 4908 // void *__vr_top at offset 16 4909 int FPRSize = FuncInfo->getVarArgsFPRSize(); 4910 if (FPRSize > 0) { 4911 SDValue VRTop, VRTopAddr; 4912 VRTopAddr = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, 4913 DAG.getConstant(16, DL, PtrVT)); 4914 4915 VRTop = DAG.getFrameIndex(FuncInfo->getVarArgsFPRIndex(), PtrVT); 4916 VRTop = DAG.getNode(ISD::ADD, DL, PtrVT, VRTop, 4917 DAG.getConstant(FPRSize, DL, PtrVT)); 4918 4919 MemOps.push_back(DAG.getStore(Chain, DL, VRTop, VRTopAddr, 4920 MachinePointerInfo(SV, 16), 4921 /* Alignment = */ 8)); 4922 } 4923 4924 // int __gr_offs at offset 24 4925 SDValue GROffsAddr = 4926 DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getConstant(24, DL, PtrVT)); 4927 MemOps.push_back(DAG.getStore( 4928 Chain, DL, DAG.getConstant(-GPRSize, DL, MVT::i32), GROffsAddr, 4929 MachinePointerInfo(SV, 24), /* Alignment = */ 4)); 4930 4931 // int __vr_offs at offset 28 4932 SDValue VROffsAddr = 4933 DAG.getNode(ISD::ADD, DL, PtrVT, VAList, DAG.getConstant(28, DL, PtrVT)); 4934 MemOps.push_back(DAG.getStore( 4935 Chain, DL, DAG.getConstant(-FPRSize, DL, MVT::i32), VROffsAddr, 4936 MachinePointerInfo(SV, 28), /* Alignment = */ 4)); 4937 4938 return DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOps); 4939 } 4940 4941 SDValue AArch64TargetLowering::LowerVASTART(SDValue Op, 4942 SelectionDAG &DAG) const { 4943 MachineFunction &MF = DAG.getMachineFunction(); 4944 4945 if (Subtarget->isCallingConvWin64(MF.getFunction().getCallingConv())) 4946 return LowerWin64_VASTART(Op, DAG); 4947 else if (Subtarget->isTargetDarwin()) 4948 return LowerDarwin_VASTART(Op, DAG); 4949 else 4950 return LowerAAPCS_VASTART(Op, DAG); 4951 } 4952 4953 SDValue AArch64TargetLowering::LowerVACOPY(SDValue Op, 4954 SelectionDAG &DAG) const { 4955 // AAPCS has three pointers and two ints (= 32 bytes), Darwin has single 4956 // pointer. 4957 SDLoc DL(Op); 4958 unsigned VaListSize = 4959 Subtarget->isTargetDarwin() || Subtarget->isTargetWindows() ? 8 : 32; 4960 const Value *DestSV = cast<SrcValueSDNode>(Op.getOperand(3))->getValue(); 4961 const Value *SrcSV = cast<SrcValueSDNode>(Op.getOperand(4))->getValue(); 4962 4963 return DAG.getMemcpy(Op.getOperand(0), DL, Op.getOperand(1), 4964 Op.getOperand(2), 4965 DAG.getConstant(VaListSize, DL, MVT::i32), 4966 8, false, false, false, MachinePointerInfo(DestSV), 4967 MachinePointerInfo(SrcSV)); 4968 } 4969 4970 SDValue AArch64TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const { 4971 assert(Subtarget->isTargetDarwin() && 4972 "automatic va_arg instruction only works on Darwin"); 4973 4974 const Value *V = cast<SrcValueSDNode>(Op.getOperand(2))->getValue(); 4975 EVT VT = Op.getValueType(); 4976 SDLoc DL(Op); 4977 SDValue Chain = Op.getOperand(0); 4978 SDValue Addr = Op.getOperand(1); 4979 unsigned Align = Op.getConstantOperandVal(3); 4980 auto PtrVT = getPointerTy(DAG.getDataLayout()); 4981 4982 SDValue VAList = DAG.getLoad(PtrVT, DL, Chain, Addr, MachinePointerInfo(V)); 4983 Chain = VAList.getValue(1); 4984 4985 if (Align > 8) { 4986 assert(((Align & (Align - 1)) == 0) && "Expected Align to be a power of 2"); 4987 VAList = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, 4988 DAG.getConstant(Align - 1, DL, PtrVT)); 4989 VAList = DAG.getNode(ISD::AND, DL, PtrVT, VAList, 4990 DAG.getConstant(-(int64_t)Align, DL, PtrVT)); 4991 } 4992 4993 Type *ArgTy = VT.getTypeForEVT(*DAG.getContext()); 4994 uint64_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy); 4995 4996 // Scalar integer and FP values smaller than 64 bits are implicitly extended 4997 // up to 64 bits. At the very least, we have to increase the striding of the 4998 // vaargs list to match this, and for FP values we need to introduce 4999 // FP_ROUND nodes as well. 5000 if (VT.isInteger() && !VT.isVector()) 5001 ArgSize = 8; 5002 bool NeedFPTrunc = false; 5003 if (VT.isFloatingPoint() && !VT.isVector() && VT != MVT::f64) { 5004 ArgSize = 8; 5005 NeedFPTrunc = true; 5006 } 5007 5008 // Increment the pointer, VAList, to the next vaarg 5009 SDValue VANext = DAG.getNode(ISD::ADD, DL, PtrVT, VAList, 5010 DAG.getConstant(ArgSize, DL, PtrVT)); 5011 // Store the incremented VAList to the legalized pointer 5012 SDValue APStore = 5013 DAG.getStore(Chain, DL, VANext, Addr, MachinePointerInfo(V)); 5014 5015 // Load the actual argument out of the pointer VAList 5016 if (NeedFPTrunc) { 5017 // Load the value as an f64. 5018 SDValue WideFP = 5019 DAG.getLoad(MVT::f64, DL, APStore, VAList, MachinePointerInfo()); 5020 // Round the value down to an f32. 5021 SDValue NarrowFP = DAG.getNode(ISD::FP_ROUND, DL, VT, WideFP.getValue(0), 5022 DAG.getIntPtrConstant(1, DL)); 5023 SDValue Ops[] = { NarrowFP, WideFP.getValue(1) }; 5024 // Merge the rounded value with the chain output of the load. 5025 return DAG.getMergeValues(Ops, DL); 5026 } 5027 5028 return DAG.getLoad(VT, DL, APStore, VAList, MachinePointerInfo()); 5029 } 5030 5031 SDValue AArch64TargetLowering::LowerFRAMEADDR(SDValue Op, 5032 SelectionDAG &DAG) const { 5033 MachineFrameInfo &MFI = DAG.getMachineFunction().getFrameInfo(); 5034 MFI.setFrameAddressIsTaken(true); 5035 5036 EVT VT = Op.getValueType(); 5037 SDLoc DL(Op); 5038 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 5039 SDValue FrameAddr = 5040 DAG.getCopyFromReg(DAG.getEntryNode(), DL, AArch64::FP, VT); 5041 while (Depth--) 5042 FrameAddr = DAG.getLoad(VT, DL, DAG.getEntryNode(), FrameAddr, 5043 MachinePointerInfo()); 5044 return FrameAddr; 5045 } 5046 5047 // FIXME? Maybe this could be a TableGen attribute on some registers and 5048 // this table could be generated automatically from RegInfo. 5049 unsigned AArch64TargetLowering::getRegisterByName(const char* RegName, EVT VT, 5050 SelectionDAG &DAG) const { 5051 unsigned Reg = StringSwitch<unsigned>(RegName) 5052 .Case("sp", AArch64::SP) 5053 .Case("x18", AArch64::X18) 5054 .Case("w18", AArch64::W18) 5055 .Case("x20", AArch64::X20) 5056 .Case("w20", AArch64::W20) 5057 .Default(0); 5058 if (((Reg == AArch64::X18 || Reg == AArch64::W18) && 5059 !Subtarget->isX18Reserved()) || 5060 ((Reg == AArch64::X20 || Reg == AArch64::W20) && 5061 !Subtarget->isX20Reserved())) 5062 Reg = 0; 5063 if (Reg) 5064 return Reg; 5065 report_fatal_error(Twine("Invalid register name \"" 5066 + StringRef(RegName) + "\".")); 5067 } 5068 5069 SDValue AArch64TargetLowering::LowerRETURNADDR(SDValue Op, 5070 SelectionDAG &DAG) const { 5071 MachineFunction &MF = DAG.getMachineFunction(); 5072 MachineFrameInfo &MFI = MF.getFrameInfo(); 5073 MFI.setReturnAddressIsTaken(true); 5074 5075 EVT VT = Op.getValueType(); 5076 SDLoc DL(Op); 5077 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue(); 5078 if (Depth) { 5079 SDValue FrameAddr = LowerFRAMEADDR(Op, DAG); 5080 SDValue Offset = DAG.getConstant(8, DL, getPointerTy(DAG.getDataLayout())); 5081 return DAG.getLoad(VT, DL, DAG.getEntryNode(), 5082 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset), 5083 MachinePointerInfo()); 5084 } 5085 5086 // Return LR, which contains the return address. Mark it an implicit live-in. 5087 unsigned Reg = MF.addLiveIn(AArch64::LR, &AArch64::GPR64RegClass); 5088 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT); 5089 } 5090 5091 /// LowerShiftRightParts - Lower SRA_PARTS, which returns two 5092 /// i64 values and take a 2 x i64 value to shift plus a shift amount. 5093 SDValue AArch64TargetLowering::LowerShiftRightParts(SDValue Op, 5094 SelectionDAG &DAG) const { 5095 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 5096 EVT VT = Op.getValueType(); 5097 unsigned VTBits = VT.getSizeInBits(); 5098 SDLoc dl(Op); 5099 SDValue ShOpLo = Op.getOperand(0); 5100 SDValue ShOpHi = Op.getOperand(1); 5101 SDValue ShAmt = Op.getOperand(2); 5102 unsigned Opc = (Op.getOpcode() == ISD::SRA_PARTS) ? ISD::SRA : ISD::SRL; 5103 5104 assert(Op.getOpcode() == ISD::SRA_PARTS || Op.getOpcode() == ISD::SRL_PARTS); 5105 5106 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, 5107 DAG.getConstant(VTBits, dl, MVT::i64), ShAmt); 5108 SDValue HiBitsForLo = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, RevShAmt); 5109 5110 // Unfortunately, if ShAmt == 0, we just calculated "(SHL ShOpHi, 64)" which 5111 // is "undef". We wanted 0, so CSEL it directly. 5112 SDValue Cmp = emitComparison(ShAmt, DAG.getConstant(0, dl, MVT::i64), 5113 ISD::SETEQ, dl, DAG); 5114 SDValue CCVal = DAG.getConstant(AArch64CC::EQ, dl, MVT::i32); 5115 HiBitsForLo = 5116 DAG.getNode(AArch64ISD::CSEL, dl, VT, DAG.getConstant(0, dl, MVT::i64), 5117 HiBitsForLo, CCVal, Cmp); 5118 5119 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, ShAmt, 5120 DAG.getConstant(VTBits, dl, MVT::i64)); 5121 5122 SDValue LoBitsForLo = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, ShAmt); 5123 SDValue LoForNormalShift = 5124 DAG.getNode(ISD::OR, dl, VT, LoBitsForLo, HiBitsForLo); 5125 5126 Cmp = emitComparison(ExtraShAmt, DAG.getConstant(0, dl, MVT::i64), ISD::SETGE, 5127 dl, DAG); 5128 CCVal = DAG.getConstant(AArch64CC::GE, dl, MVT::i32); 5129 SDValue LoForBigShift = DAG.getNode(Opc, dl, VT, ShOpHi, ExtraShAmt); 5130 SDValue Lo = DAG.getNode(AArch64ISD::CSEL, dl, VT, LoForBigShift, 5131 LoForNormalShift, CCVal, Cmp); 5132 5133 // AArch64 shifts larger than the register width are wrapped rather than 5134 // clamped, so we can't just emit "hi >> x". 5135 SDValue HiForNormalShift = DAG.getNode(Opc, dl, VT, ShOpHi, ShAmt); 5136 SDValue HiForBigShift = 5137 Opc == ISD::SRA 5138 ? DAG.getNode(Opc, dl, VT, ShOpHi, 5139 DAG.getConstant(VTBits - 1, dl, MVT::i64)) 5140 : DAG.getConstant(0, dl, VT); 5141 SDValue Hi = DAG.getNode(AArch64ISD::CSEL, dl, VT, HiForBigShift, 5142 HiForNormalShift, CCVal, Cmp); 5143 5144 SDValue Ops[2] = { Lo, Hi }; 5145 return DAG.getMergeValues(Ops, dl); 5146 } 5147 5148 /// LowerShiftLeftParts - Lower SHL_PARTS, which returns two 5149 /// i64 values and take a 2 x i64 value to shift plus a shift amount. 5150 SDValue AArch64TargetLowering::LowerShiftLeftParts(SDValue Op, 5151 SelectionDAG &DAG) const { 5152 assert(Op.getNumOperands() == 3 && "Not a double-shift!"); 5153 EVT VT = Op.getValueType(); 5154 unsigned VTBits = VT.getSizeInBits(); 5155 SDLoc dl(Op); 5156 SDValue ShOpLo = Op.getOperand(0); 5157 SDValue ShOpHi = Op.getOperand(1); 5158 SDValue ShAmt = Op.getOperand(2); 5159 5160 assert(Op.getOpcode() == ISD::SHL_PARTS); 5161 SDValue RevShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, 5162 DAG.getConstant(VTBits, dl, MVT::i64), ShAmt); 5163 SDValue LoBitsForHi = DAG.getNode(ISD::SRL, dl, VT, ShOpLo, RevShAmt); 5164 5165 // Unfortunately, if ShAmt == 0, we just calculated "(SRL ShOpLo, 64)" which 5166 // is "undef". We wanted 0, so CSEL it directly. 5167 SDValue Cmp = emitComparison(ShAmt, DAG.getConstant(0, dl, MVT::i64), 5168 ISD::SETEQ, dl, DAG); 5169 SDValue CCVal = DAG.getConstant(AArch64CC::EQ, dl, MVT::i32); 5170 LoBitsForHi = 5171 DAG.getNode(AArch64ISD::CSEL, dl, VT, DAG.getConstant(0, dl, MVT::i64), 5172 LoBitsForHi, CCVal, Cmp); 5173 5174 SDValue ExtraShAmt = DAG.getNode(ISD::SUB, dl, MVT::i64, ShAmt, 5175 DAG.getConstant(VTBits, dl, MVT::i64)); 5176 SDValue HiBitsForHi = DAG.getNode(ISD::SHL, dl, VT, ShOpHi, ShAmt); 5177 SDValue HiForNormalShift = 5178 DAG.getNode(ISD::OR, dl, VT, LoBitsForHi, HiBitsForHi); 5179 5180 SDValue HiForBigShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ExtraShAmt); 5181 5182 Cmp = emitComparison(ExtraShAmt, DAG.getConstant(0, dl, MVT::i64), ISD::SETGE, 5183 dl, DAG); 5184 CCVal = DAG.getConstant(AArch64CC::GE, dl, MVT::i32); 5185 SDValue Hi = DAG.getNode(AArch64ISD::CSEL, dl, VT, HiForBigShift, 5186 HiForNormalShift, CCVal, Cmp); 5187 5188 // AArch64 shifts of larger than register sizes are wrapped rather than 5189 // clamped, so we can't just emit "lo << a" if a is too big. 5190 SDValue LoForBigShift = DAG.getConstant(0, dl, VT); 5191 SDValue LoForNormalShift = DAG.getNode(ISD::SHL, dl, VT, ShOpLo, ShAmt); 5192 SDValue Lo = DAG.getNode(AArch64ISD::CSEL, dl, VT, LoForBigShift, 5193 LoForNormalShift, CCVal, Cmp); 5194 5195 SDValue Ops[2] = { Lo, Hi }; 5196 return DAG.getMergeValues(Ops, dl); 5197 } 5198 5199 bool AArch64TargetLowering::isOffsetFoldingLegal( 5200 const GlobalAddressSDNode *GA) const { 5201 // Offsets are folded in the DAG combine rather than here so that we can 5202 // intelligently choose an offset based on the uses. 5203 return false; 5204 } 5205 5206 bool AArch64TargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT) const { 5207 // We can materialize #0.0 as fmov $Rd, XZR for 64-bit and 32-bit cases. 5208 // FIXME: We should be able to handle f128 as well with a clever lowering. 5209 if (Imm.isPosZero() && (VT == MVT::f64 || VT == MVT::f32 || 5210 (VT == MVT::f16 && Subtarget->hasFullFP16()))) { 5211 LLVM_DEBUG( 5212 dbgs() << "Legal fp imm: materialize 0 using the zero register\n"); 5213 return true; 5214 } 5215 5216 StringRef FPType; 5217 bool IsLegal = false; 5218 SmallString<128> ImmStrVal; 5219 Imm.toString(ImmStrVal); 5220 5221 if (VT == MVT::f64) { 5222 FPType = "f64"; 5223 IsLegal = AArch64_AM::getFP64Imm(Imm) != -1; 5224 } else if (VT == MVT::f32) { 5225 FPType = "f32"; 5226 IsLegal = AArch64_AM::getFP32Imm(Imm) != -1; 5227 } else if (VT == MVT::f16 && Subtarget->hasFullFP16()) { 5228 FPType = "f16"; 5229 IsLegal = AArch64_AM::getFP16Imm(Imm) != -1; 5230 } 5231 5232 if (IsLegal) { 5233 LLVM_DEBUG(dbgs() << "Legal " << FPType << " imm value: " << ImmStrVal 5234 << "\n"); 5235 return true; 5236 } 5237 5238 if (!FPType.empty()) 5239 LLVM_DEBUG(dbgs() << "Illegal " << FPType << " imm value: " << ImmStrVal 5240 << "\n"); 5241 else 5242 LLVM_DEBUG(dbgs() << "Illegal fp imm " << ImmStrVal 5243 << ": unsupported fp type\n"); 5244 5245 return false; 5246 } 5247 5248 //===----------------------------------------------------------------------===// 5249 // AArch64 Optimization Hooks 5250 //===----------------------------------------------------------------------===// 5251 5252 static SDValue getEstimate(const AArch64Subtarget *ST, unsigned Opcode, 5253 SDValue Operand, SelectionDAG &DAG, 5254 int &ExtraSteps) { 5255 EVT VT = Operand.getValueType(); 5256 if (ST->hasNEON() && 5257 (VT == MVT::f64 || VT == MVT::v1f64 || VT == MVT::v2f64 || 5258 VT == MVT::f32 || VT == MVT::v1f32 || 5259 VT == MVT::v2f32 || VT == MVT::v4f32)) { 5260 if (ExtraSteps == TargetLoweringBase::ReciprocalEstimate::Unspecified) 5261 // For the reciprocal estimates, convergence is quadratic, so the number 5262 // of digits is doubled after each iteration. In ARMv8, the accuracy of 5263 // the initial estimate is 2^-8. Thus the number of extra steps to refine 5264 // the result for float (23 mantissa bits) is 2 and for double (52 5265 // mantissa bits) is 3. 5266 ExtraSteps = VT.getScalarType() == MVT::f64 ? 3 : 2; 5267 5268 return DAG.getNode(Opcode, SDLoc(Operand), VT, Operand); 5269 } 5270 5271 return SDValue(); 5272 } 5273 5274 SDValue AArch64TargetLowering::getSqrtEstimate(SDValue Operand, 5275 SelectionDAG &DAG, int Enabled, 5276 int &ExtraSteps, 5277 bool &UseOneConst, 5278 bool Reciprocal) const { 5279 if (Enabled == ReciprocalEstimate::Enabled || 5280 (Enabled == ReciprocalEstimate::Unspecified && Subtarget->useRSqrt())) 5281 if (SDValue Estimate = getEstimate(Subtarget, AArch64ISD::FRSQRTE, Operand, 5282 DAG, ExtraSteps)) { 5283 SDLoc DL(Operand); 5284 EVT VT = Operand.getValueType(); 5285 5286 SDNodeFlags Flags; 5287 Flags.setAllowReassociation(true); 5288 5289 // Newton reciprocal square root iteration: E * 0.5 * (3 - X * E^2) 5290 // AArch64 reciprocal square root iteration instruction: 0.5 * (3 - M * N) 5291 for (int i = ExtraSteps; i > 0; --i) { 5292 SDValue Step = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Estimate, 5293 Flags); 5294 Step = DAG.getNode(AArch64ISD::FRSQRTS, DL, VT, Operand, Step, Flags); 5295 Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags); 5296 } 5297 if (!Reciprocal) { 5298 EVT CCVT = getSetCCResultType(DAG.getDataLayout(), *DAG.getContext(), 5299 VT); 5300 SDValue FPZero = DAG.getConstantFP(0.0, DL, VT); 5301 SDValue Eq = DAG.getSetCC(DL, CCVT, Operand, FPZero, ISD::SETEQ); 5302 5303 Estimate = DAG.getNode(ISD::FMUL, DL, VT, Operand, Estimate, Flags); 5304 // Correct the result if the operand is 0.0. 5305 Estimate = DAG.getNode(VT.isVector() ? ISD::VSELECT : ISD::SELECT, DL, 5306 VT, Eq, Operand, Estimate); 5307 } 5308 5309 ExtraSteps = 0; 5310 return Estimate; 5311 } 5312 5313 return SDValue(); 5314 } 5315 5316 SDValue AArch64TargetLowering::getRecipEstimate(SDValue Operand, 5317 SelectionDAG &DAG, int Enabled, 5318 int &ExtraSteps) const { 5319 if (Enabled == ReciprocalEstimate::Enabled) 5320 if (SDValue Estimate = getEstimate(Subtarget, AArch64ISD::FRECPE, Operand, 5321 DAG, ExtraSteps)) { 5322 SDLoc DL(Operand); 5323 EVT VT = Operand.getValueType(); 5324 5325 SDNodeFlags Flags; 5326 Flags.setAllowReassociation(true); 5327 5328 // Newton reciprocal iteration: E * (2 - X * E) 5329 // AArch64 reciprocal iteration instruction: (2 - M * N) 5330 for (int i = ExtraSteps; i > 0; --i) { 5331 SDValue Step = DAG.getNode(AArch64ISD::FRECPS, DL, VT, Operand, 5332 Estimate, Flags); 5333 Estimate = DAG.getNode(ISD::FMUL, DL, VT, Estimate, Step, Flags); 5334 } 5335 5336 ExtraSteps = 0; 5337 return Estimate; 5338 } 5339 5340 return SDValue(); 5341 } 5342 5343 //===----------------------------------------------------------------------===// 5344 // AArch64 Inline Assembly Support 5345 //===----------------------------------------------------------------------===// 5346 5347 // Table of Constraints 5348 // TODO: This is the current set of constraints supported by ARM for the 5349 // compiler, not all of them may make sense. 5350 // 5351 // r - A general register 5352 // w - An FP/SIMD register of some size in the range v0-v31 5353 // x - An FP/SIMD register of some size in the range v0-v15 5354 // I - Constant that can be used with an ADD instruction 5355 // J - Constant that can be used with a SUB instruction 5356 // K - Constant that can be used with a 32-bit logical instruction 5357 // L - Constant that can be used with a 64-bit logical instruction 5358 // M - Constant that can be used as a 32-bit MOV immediate 5359 // N - Constant that can be used as a 64-bit MOV immediate 5360 // Q - A memory reference with base register and no offset 5361 // S - A symbolic address 5362 // Y - Floating point constant zero 5363 // Z - Integer constant zero 5364 // 5365 // Note that general register operands will be output using their 64-bit x 5366 // register name, whatever the size of the variable, unless the asm operand 5367 // is prefixed by the %w modifier. Floating-point and SIMD register operands 5368 // will be output with the v prefix unless prefixed by the %b, %h, %s, %d or 5369 // %q modifier. 5370 const char *AArch64TargetLowering::LowerXConstraint(EVT ConstraintVT) const { 5371 // At this point, we have to lower this constraint to something else, so we 5372 // lower it to an "r" or "w". However, by doing this we will force the result 5373 // to be in register, while the X constraint is much more permissive. 5374 // 5375 // Although we are correct (we are free to emit anything, without 5376 // constraints), we might break use cases that would expect us to be more 5377 // efficient and emit something else. 5378 if (!Subtarget->hasFPARMv8()) 5379 return "r"; 5380 5381 if (ConstraintVT.isFloatingPoint()) 5382 return "w"; 5383 5384 if (ConstraintVT.isVector() && 5385 (ConstraintVT.getSizeInBits() == 64 || 5386 ConstraintVT.getSizeInBits() == 128)) 5387 return "w"; 5388 5389 return "r"; 5390 } 5391 5392 /// getConstraintType - Given a constraint letter, return the type of 5393 /// constraint it is for this target. 5394 AArch64TargetLowering::ConstraintType 5395 AArch64TargetLowering::getConstraintType(StringRef Constraint) const { 5396 if (Constraint.size() == 1) { 5397 switch (Constraint[0]) { 5398 default: 5399 break; 5400 case 'z': 5401 return C_Other; 5402 case 'x': 5403 case 'w': 5404 return C_RegisterClass; 5405 // An address with a single base register. Due to the way we 5406 // currently handle addresses it is the same as 'r'. 5407 case 'Q': 5408 return C_Memory; 5409 case 'S': // A symbolic address 5410 return C_Other; 5411 } 5412 } 5413 return TargetLowering::getConstraintType(Constraint); 5414 } 5415 5416 /// Examine constraint type and operand type and determine a weight value. 5417 /// This object must already have been set up with the operand type 5418 /// and the current alternative constraint selected. 5419 TargetLowering::ConstraintWeight 5420 AArch64TargetLowering::getSingleConstraintMatchWeight( 5421 AsmOperandInfo &info, const char *constraint) const { 5422 ConstraintWeight weight = CW_Invalid; 5423 Value *CallOperandVal = info.CallOperandVal; 5424 // If we don't have a value, we can't do a match, 5425 // but allow it at the lowest weight. 5426 if (!CallOperandVal) 5427 return CW_Default; 5428 Type *type = CallOperandVal->getType(); 5429 // Look at the constraint type. 5430 switch (*constraint) { 5431 default: 5432 weight = TargetLowering::getSingleConstraintMatchWeight(info, constraint); 5433 break; 5434 case 'x': 5435 case 'w': 5436 if (type->isFloatingPointTy() || type->isVectorTy()) 5437 weight = CW_Register; 5438 break; 5439 case 'z': 5440 weight = CW_Constant; 5441 break; 5442 } 5443 return weight; 5444 } 5445 5446 std::pair<unsigned, const TargetRegisterClass *> 5447 AArch64TargetLowering::getRegForInlineAsmConstraint( 5448 const TargetRegisterInfo *TRI, StringRef Constraint, MVT VT) const { 5449 if (Constraint.size() == 1) { 5450 switch (Constraint[0]) { 5451 case 'r': 5452 if (VT.getSizeInBits() == 64) 5453 return std::make_pair(0U, &AArch64::GPR64commonRegClass); 5454 return std::make_pair(0U, &AArch64::GPR32commonRegClass); 5455 case 'w': 5456 if (VT.getSizeInBits() == 16) 5457 return std::make_pair(0U, &AArch64::FPR16RegClass); 5458 if (VT.getSizeInBits() == 32) 5459 return std::make_pair(0U, &AArch64::FPR32RegClass); 5460 if (VT.getSizeInBits() == 64) 5461 return std::make_pair(0U, &AArch64::FPR64RegClass); 5462 if (VT.getSizeInBits() == 128) 5463 return std::make_pair(0U, &AArch64::FPR128RegClass); 5464 break; 5465 // The instructions that this constraint is designed for can 5466 // only take 128-bit registers so just use that regclass. 5467 case 'x': 5468 if (VT.getSizeInBits() == 128) 5469 return std::make_pair(0U, &AArch64::FPR128_loRegClass); 5470 break; 5471 } 5472 } 5473 if (StringRef("{cc}").equals_lower(Constraint)) 5474 return std::make_pair(unsigned(AArch64::NZCV), &AArch64::CCRRegClass); 5475 5476 // Use the default implementation in TargetLowering to convert the register 5477 // constraint into a member of a register class. 5478 std::pair<unsigned, const TargetRegisterClass *> Res; 5479 Res = TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT); 5480 5481 // Not found as a standard register? 5482 if (!Res.second) { 5483 unsigned Size = Constraint.size(); 5484 if ((Size == 4 || Size == 5) && Constraint[0] == '{' && 5485 tolower(Constraint[1]) == 'v' && Constraint[Size - 1] == '}') { 5486 int RegNo; 5487 bool Failed = Constraint.slice(2, Size - 1).getAsInteger(10, RegNo); 5488 if (!Failed && RegNo >= 0 && RegNo <= 31) { 5489 // v0 - v31 are aliases of q0 - q31 or d0 - d31 depending on size. 5490 // By default we'll emit v0-v31 for this unless there's a modifier where 5491 // we'll emit the correct register as well. 5492 if (VT != MVT::Other && VT.getSizeInBits() == 64) { 5493 Res.first = AArch64::FPR64RegClass.getRegister(RegNo); 5494 Res.second = &AArch64::FPR64RegClass; 5495 } else { 5496 Res.first = AArch64::FPR128RegClass.getRegister(RegNo); 5497 Res.second = &AArch64::FPR128RegClass; 5498 } 5499 } 5500 } 5501 } 5502 5503 return Res; 5504 } 5505 5506 /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops 5507 /// vector. If it is invalid, don't add anything to Ops. 5508 void AArch64TargetLowering::LowerAsmOperandForConstraint( 5509 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops, 5510 SelectionDAG &DAG) const { 5511 SDValue Result; 5512 5513 // Currently only support length 1 constraints. 5514 if (Constraint.length() != 1) 5515 return; 5516 5517 char ConstraintLetter = Constraint[0]; 5518 switch (ConstraintLetter) { 5519 default: 5520 break; 5521 5522 // This set of constraints deal with valid constants for various instructions. 5523 // Validate and return a target constant for them if we can. 5524 case 'z': { 5525 // 'z' maps to xzr or wzr so it needs an input of 0. 5526 if (!isNullConstant(Op)) 5527 return; 5528 5529 if (Op.getValueType() == MVT::i64) 5530 Result = DAG.getRegister(AArch64::XZR, MVT::i64); 5531 else 5532 Result = DAG.getRegister(AArch64::WZR, MVT::i32); 5533 break; 5534 } 5535 case 'S': { 5536 // An absolute symbolic address or label reference. 5537 if (const GlobalAddressSDNode *GA = dyn_cast<GlobalAddressSDNode>(Op)) { 5538 Result = DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op), 5539 GA->getValueType(0)); 5540 } else if (const BlockAddressSDNode *BA = 5541 dyn_cast<BlockAddressSDNode>(Op)) { 5542 Result = 5543 DAG.getTargetBlockAddress(BA->getBlockAddress(), BA->getValueType(0)); 5544 } else if (const ExternalSymbolSDNode *ES = 5545 dyn_cast<ExternalSymbolSDNode>(Op)) { 5546 Result = 5547 DAG.getTargetExternalSymbol(ES->getSymbol(), ES->getValueType(0)); 5548 } else 5549 return; 5550 break; 5551 } 5552 5553 case 'I': 5554 case 'J': 5555 case 'K': 5556 case 'L': 5557 case 'M': 5558 case 'N': 5559 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op); 5560 if (!C) 5561 return; 5562 5563 // Grab the value and do some validation. 5564 uint64_t CVal = C->getZExtValue(); 5565 switch (ConstraintLetter) { 5566 // The I constraint applies only to simple ADD or SUB immediate operands: 5567 // i.e. 0 to 4095 with optional shift by 12 5568 // The J constraint applies only to ADD or SUB immediates that would be 5569 // valid when negated, i.e. if [an add pattern] were to be output as a SUB 5570 // instruction [or vice versa], in other words -1 to -4095 with optional 5571 // left shift by 12. 5572 case 'I': 5573 if (isUInt<12>(CVal) || isShiftedUInt<12, 12>(CVal)) 5574 break; 5575 return; 5576 case 'J': { 5577 uint64_t NVal = -C->getSExtValue(); 5578 if (isUInt<12>(NVal) || isShiftedUInt<12, 12>(NVal)) { 5579 CVal = C->getSExtValue(); 5580 break; 5581 } 5582 return; 5583 } 5584 // The K and L constraints apply *only* to logical immediates, including 5585 // what used to be the MOVI alias for ORR (though the MOVI alias has now 5586 // been removed and MOV should be used). So these constraints have to 5587 // distinguish between bit patterns that are valid 32-bit or 64-bit 5588 // "bitmask immediates": for example 0xaaaaaaaa is a valid bimm32 (K), but 5589 // not a valid bimm64 (L) where 0xaaaaaaaaaaaaaaaa would be valid, and vice 5590 // versa. 5591 case 'K': 5592 if (AArch64_AM::isLogicalImmediate(CVal, 32)) 5593 break; 5594 return; 5595 case 'L': 5596 if (AArch64_AM::isLogicalImmediate(CVal, 64)) 5597 break; 5598 return; 5599 // The M and N constraints are a superset of K and L respectively, for use 5600 // with the MOV (immediate) alias. As well as the logical immediates they 5601 // also match 32 or 64-bit immediates that can be loaded either using a 5602 // *single* MOVZ or MOVN , such as 32-bit 0x12340000, 0x00001234, 0xffffedca 5603 // (M) or 64-bit 0x1234000000000000 (N) etc. 5604 // As a note some of this code is liberally stolen from the asm parser. 5605 case 'M': { 5606 if (!isUInt<32>(CVal)) 5607 return; 5608 if (AArch64_AM::isLogicalImmediate(CVal, 32)) 5609 break; 5610 if ((CVal & 0xFFFF) == CVal) 5611 break; 5612 if ((CVal & 0xFFFF0000ULL) == CVal) 5613 break; 5614 uint64_t NCVal = ~(uint32_t)CVal; 5615 if ((NCVal & 0xFFFFULL) == NCVal) 5616 break; 5617 if ((NCVal & 0xFFFF0000ULL) == NCVal) 5618 break; 5619 return; 5620 } 5621 case 'N': { 5622 if (AArch64_AM::isLogicalImmediate(CVal, 64)) 5623 break; 5624 if ((CVal & 0xFFFFULL) == CVal) 5625 break; 5626 if ((CVal & 0xFFFF0000ULL) == CVal) 5627 break; 5628 if ((CVal & 0xFFFF00000000ULL) == CVal) 5629 break; 5630 if ((CVal & 0xFFFF000000000000ULL) == CVal) 5631 break; 5632 uint64_t NCVal = ~CVal; 5633 if ((NCVal & 0xFFFFULL) == NCVal) 5634 break; 5635 if ((NCVal & 0xFFFF0000ULL) == NCVal) 5636 break; 5637 if ((NCVal & 0xFFFF00000000ULL) == NCVal) 5638 break; 5639 if ((NCVal & 0xFFFF000000000000ULL) == NCVal) 5640 break; 5641 return; 5642 } 5643 default: 5644 return; 5645 } 5646 5647 // All assembler immediates are 64-bit integers. 5648 Result = DAG.getTargetConstant(CVal, SDLoc(Op), MVT::i64); 5649 break; 5650 } 5651 5652 if (Result.getNode()) { 5653 Ops.push_back(Result); 5654 return; 5655 } 5656 5657 return TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG); 5658 } 5659 5660 //===----------------------------------------------------------------------===// 5661 // AArch64 Advanced SIMD Support 5662 //===----------------------------------------------------------------------===// 5663 5664 /// WidenVector - Given a value in the V64 register class, produce the 5665 /// equivalent value in the V128 register class. 5666 static SDValue WidenVector(SDValue V64Reg, SelectionDAG &DAG) { 5667 EVT VT = V64Reg.getValueType(); 5668 unsigned NarrowSize = VT.getVectorNumElements(); 5669 MVT EltTy = VT.getVectorElementType().getSimpleVT(); 5670 MVT WideTy = MVT::getVectorVT(EltTy, 2 * NarrowSize); 5671 SDLoc DL(V64Reg); 5672 5673 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, WideTy, DAG.getUNDEF(WideTy), 5674 V64Reg, DAG.getConstant(0, DL, MVT::i32)); 5675 } 5676 5677 /// getExtFactor - Determine the adjustment factor for the position when 5678 /// generating an "extract from vector registers" instruction. 5679 static unsigned getExtFactor(SDValue &V) { 5680 EVT EltType = V.getValueType().getVectorElementType(); 5681 return EltType.getSizeInBits() / 8; 5682 } 5683 5684 /// NarrowVector - Given a value in the V128 register class, produce the 5685 /// equivalent value in the V64 register class. 5686 static SDValue NarrowVector(SDValue V128Reg, SelectionDAG &DAG) { 5687 EVT VT = V128Reg.getValueType(); 5688 unsigned WideSize = VT.getVectorNumElements(); 5689 MVT EltTy = VT.getVectorElementType().getSimpleVT(); 5690 MVT NarrowTy = MVT::getVectorVT(EltTy, WideSize / 2); 5691 SDLoc DL(V128Reg); 5692 5693 return DAG.getTargetExtractSubreg(AArch64::dsub, DL, NarrowTy, V128Reg); 5694 } 5695 5696 // Gather data to see if the operation can be modelled as a 5697 // shuffle in combination with VEXTs. 5698 SDValue AArch64TargetLowering::ReconstructShuffle(SDValue Op, 5699 SelectionDAG &DAG) const { 5700 assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!"); 5701 LLVM_DEBUG(dbgs() << "AArch64TargetLowering::ReconstructShuffle\n"); 5702 SDLoc dl(Op); 5703 EVT VT = Op.getValueType(); 5704 unsigned NumElts = VT.getVectorNumElements(); 5705 5706 struct ShuffleSourceInfo { 5707 SDValue Vec; 5708 unsigned MinElt; 5709 unsigned MaxElt; 5710 5711 // We may insert some combination of BITCASTs and VEXT nodes to force Vec to 5712 // be compatible with the shuffle we intend to construct. As a result 5713 // ShuffleVec will be some sliding window into the original Vec. 5714 SDValue ShuffleVec; 5715 5716 // Code should guarantee that element i in Vec starts at element "WindowBase 5717 // + i * WindowScale in ShuffleVec". 5718 int WindowBase; 5719 int WindowScale; 5720 5721 ShuffleSourceInfo(SDValue Vec) 5722 : Vec(Vec), MinElt(std::numeric_limits<unsigned>::max()), MaxElt(0), 5723 ShuffleVec(Vec), WindowBase(0), WindowScale(1) {} 5724 5725 bool operator ==(SDValue OtherVec) { return Vec == OtherVec; } 5726 }; 5727 5728 // First gather all vectors used as an immediate source for this BUILD_VECTOR 5729 // node. 5730 SmallVector<ShuffleSourceInfo, 2> Sources; 5731 for (unsigned i = 0; i < NumElts; ++i) { 5732 SDValue V = Op.getOperand(i); 5733 if (V.isUndef()) 5734 continue; 5735 else if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 5736 !isa<ConstantSDNode>(V.getOperand(1))) { 5737 LLVM_DEBUG( 5738 dbgs() << "Reshuffle failed: " 5739 "a shuffle can only come from building a vector from " 5740 "various elements of other vectors, provided their " 5741 "indices are constant\n"); 5742 return SDValue(); 5743 } 5744 5745 // Add this element source to the list if it's not already there. 5746 SDValue SourceVec = V.getOperand(0); 5747 auto Source = find(Sources, SourceVec); 5748 if (Source == Sources.end()) 5749 Source = Sources.insert(Sources.end(), ShuffleSourceInfo(SourceVec)); 5750 5751 // Update the minimum and maximum lane number seen. 5752 unsigned EltNo = cast<ConstantSDNode>(V.getOperand(1))->getZExtValue(); 5753 Source->MinElt = std::min(Source->MinElt, EltNo); 5754 Source->MaxElt = std::max(Source->MaxElt, EltNo); 5755 } 5756 5757 if (Sources.size() > 2) { 5758 LLVM_DEBUG( 5759 dbgs() << "Reshuffle failed: currently only do something sane when at " 5760 "most two source vectors are involved\n"); 5761 return SDValue(); 5762 } 5763 5764 // Find out the smallest element size among result and two sources, and use 5765 // it as element size to build the shuffle_vector. 5766 EVT SmallestEltTy = VT.getVectorElementType(); 5767 for (auto &Source : Sources) { 5768 EVT SrcEltTy = Source.Vec.getValueType().getVectorElementType(); 5769 if (SrcEltTy.bitsLT(SmallestEltTy)) { 5770 SmallestEltTy = SrcEltTy; 5771 } 5772 } 5773 unsigned ResMultiplier = 5774 VT.getScalarSizeInBits() / SmallestEltTy.getSizeInBits(); 5775 NumElts = VT.getSizeInBits() / SmallestEltTy.getSizeInBits(); 5776 EVT ShuffleVT = EVT::getVectorVT(*DAG.getContext(), SmallestEltTy, NumElts); 5777 5778 // If the source vector is too wide or too narrow, we may nevertheless be able 5779 // to construct a compatible shuffle either by concatenating it with UNDEF or 5780 // extracting a suitable range of elements. 5781 for (auto &Src : Sources) { 5782 EVT SrcVT = Src.ShuffleVec.getValueType(); 5783 5784 if (SrcVT.getSizeInBits() == VT.getSizeInBits()) 5785 continue; 5786 5787 // This stage of the search produces a source with the same element type as 5788 // the original, but with a total width matching the BUILD_VECTOR output. 5789 EVT EltVT = SrcVT.getVectorElementType(); 5790 unsigned NumSrcElts = VT.getSizeInBits() / EltVT.getSizeInBits(); 5791 EVT DestVT = EVT::getVectorVT(*DAG.getContext(), EltVT, NumSrcElts); 5792 5793 if (SrcVT.getSizeInBits() < VT.getSizeInBits()) { 5794 assert(2 * SrcVT.getSizeInBits() == VT.getSizeInBits()); 5795 // We can pad out the smaller vector for free, so if it's part of a 5796 // shuffle... 5797 Src.ShuffleVec = 5798 DAG.getNode(ISD::CONCAT_VECTORS, dl, DestVT, Src.ShuffleVec, 5799 DAG.getUNDEF(Src.ShuffleVec.getValueType())); 5800 continue; 5801 } 5802 5803 assert(SrcVT.getSizeInBits() == 2 * VT.getSizeInBits()); 5804 5805 if (Src.MaxElt - Src.MinElt >= NumSrcElts) { 5806 LLVM_DEBUG( 5807 dbgs() << "Reshuffle failed: span too large for a VEXT to cope\n"); 5808 return SDValue(); 5809 } 5810 5811 if (Src.MinElt >= NumSrcElts) { 5812 // The extraction can just take the second half 5813 Src.ShuffleVec = 5814 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 5815 DAG.getConstant(NumSrcElts, dl, MVT::i64)); 5816 Src.WindowBase = -NumSrcElts; 5817 } else if (Src.MaxElt < NumSrcElts) { 5818 // The extraction can just take the first half 5819 Src.ShuffleVec = 5820 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 5821 DAG.getConstant(0, dl, MVT::i64)); 5822 } else { 5823 // An actual VEXT is needed 5824 SDValue VEXTSrc1 = 5825 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 5826 DAG.getConstant(0, dl, MVT::i64)); 5827 SDValue VEXTSrc2 = 5828 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, DestVT, Src.ShuffleVec, 5829 DAG.getConstant(NumSrcElts, dl, MVT::i64)); 5830 unsigned Imm = Src.MinElt * getExtFactor(VEXTSrc1); 5831 5832 Src.ShuffleVec = DAG.getNode(AArch64ISD::EXT, dl, DestVT, VEXTSrc1, 5833 VEXTSrc2, 5834 DAG.getConstant(Imm, dl, MVT::i32)); 5835 Src.WindowBase = -Src.MinElt; 5836 } 5837 } 5838 5839 // Another possible incompatibility occurs from the vector element types. We 5840 // can fix this by bitcasting the source vectors to the same type we intend 5841 // for the shuffle. 5842 for (auto &Src : Sources) { 5843 EVT SrcEltTy = Src.ShuffleVec.getValueType().getVectorElementType(); 5844 if (SrcEltTy == SmallestEltTy) 5845 continue; 5846 assert(ShuffleVT.getVectorElementType() == SmallestEltTy); 5847 Src.ShuffleVec = DAG.getNode(ISD::BITCAST, dl, ShuffleVT, Src.ShuffleVec); 5848 Src.WindowScale = SrcEltTy.getSizeInBits() / SmallestEltTy.getSizeInBits(); 5849 Src.WindowBase *= Src.WindowScale; 5850 } 5851 5852 // Final sanity check before we try to actually produce a shuffle. 5853 LLVM_DEBUG(for (auto Src 5854 : Sources) 5855 assert(Src.ShuffleVec.getValueType() == ShuffleVT);); 5856 5857 // The stars all align, our next step is to produce the mask for the shuffle. 5858 SmallVector<int, 8> Mask(ShuffleVT.getVectorNumElements(), -1); 5859 int BitsPerShuffleLane = ShuffleVT.getScalarSizeInBits(); 5860 for (unsigned i = 0; i < VT.getVectorNumElements(); ++i) { 5861 SDValue Entry = Op.getOperand(i); 5862 if (Entry.isUndef()) 5863 continue; 5864 5865 auto Src = find(Sources, Entry.getOperand(0)); 5866 int EltNo = cast<ConstantSDNode>(Entry.getOperand(1))->getSExtValue(); 5867 5868 // EXTRACT_VECTOR_ELT performs an implicit any_ext; BUILD_VECTOR an implicit 5869 // trunc. So only std::min(SrcBits, DestBits) actually get defined in this 5870 // segment. 5871 EVT OrigEltTy = Entry.getOperand(0).getValueType().getVectorElementType(); 5872 int BitsDefined = 5873 std::min(OrigEltTy.getSizeInBits(), VT.getScalarSizeInBits()); 5874 int LanesDefined = BitsDefined / BitsPerShuffleLane; 5875 5876 // This source is expected to fill ResMultiplier lanes of the final shuffle, 5877 // starting at the appropriate offset. 5878 int *LaneMask = &Mask[i * ResMultiplier]; 5879 5880 int ExtractBase = EltNo * Src->WindowScale + Src->WindowBase; 5881 ExtractBase += NumElts * (Src - Sources.begin()); 5882 for (int j = 0; j < LanesDefined; ++j) 5883 LaneMask[j] = ExtractBase + j; 5884 } 5885 5886 // Final check before we try to produce nonsense... 5887 if (!isShuffleMaskLegal(Mask, ShuffleVT)) { 5888 LLVM_DEBUG(dbgs() << "Reshuffle failed: illegal shuffle mask\n"); 5889 return SDValue(); 5890 } 5891 5892 SDValue ShuffleOps[] = { DAG.getUNDEF(ShuffleVT), DAG.getUNDEF(ShuffleVT) }; 5893 for (unsigned i = 0; i < Sources.size(); ++i) 5894 ShuffleOps[i] = Sources[i].ShuffleVec; 5895 5896 SDValue Shuffle = DAG.getVectorShuffle(ShuffleVT, dl, ShuffleOps[0], 5897 ShuffleOps[1], Mask); 5898 SDValue V = DAG.getNode(ISD::BITCAST, dl, VT, Shuffle); 5899 5900 LLVM_DEBUG(dbgs() << "Reshuffle, creating node: "; Shuffle.dump(); 5901 dbgs() << "Reshuffle, creating node: "; V.dump();); 5902 5903 return V; 5904 } 5905 5906 // check if an EXT instruction can handle the shuffle mask when the 5907 // vector sources of the shuffle are the same. 5908 static bool isSingletonEXTMask(ArrayRef<int> M, EVT VT, unsigned &Imm) { 5909 unsigned NumElts = VT.getVectorNumElements(); 5910 5911 // Assume that the first shuffle index is not UNDEF. Fail if it is. 5912 if (M[0] < 0) 5913 return false; 5914 5915 Imm = M[0]; 5916 5917 // If this is a VEXT shuffle, the immediate value is the index of the first 5918 // element. The other shuffle indices must be the successive elements after 5919 // the first one. 5920 unsigned ExpectedElt = Imm; 5921 for (unsigned i = 1; i < NumElts; ++i) { 5922 // Increment the expected index. If it wraps around, just follow it 5923 // back to index zero and keep going. 5924 ++ExpectedElt; 5925 if (ExpectedElt == NumElts) 5926 ExpectedElt = 0; 5927 5928 if (M[i] < 0) 5929 continue; // ignore UNDEF indices 5930 if (ExpectedElt != static_cast<unsigned>(M[i])) 5931 return false; 5932 } 5933 5934 return true; 5935 } 5936 5937 // check if an EXT instruction can handle the shuffle mask when the 5938 // vector sources of the shuffle are different. 5939 static bool isEXTMask(ArrayRef<int> M, EVT VT, bool &ReverseEXT, 5940 unsigned &Imm) { 5941 // Look for the first non-undef element. 5942 const int *FirstRealElt = find_if(M, [](int Elt) { return Elt >= 0; }); 5943 5944 // Benefit form APInt to handle overflow when calculating expected element. 5945 unsigned NumElts = VT.getVectorNumElements(); 5946 unsigned MaskBits = APInt(32, NumElts * 2).logBase2(); 5947 APInt ExpectedElt = APInt(MaskBits, *FirstRealElt + 1); 5948 // The following shuffle indices must be the successive elements after the 5949 // first real element. 5950 const int *FirstWrongElt = std::find_if(FirstRealElt + 1, M.end(), 5951 [&](int Elt) {return Elt != ExpectedElt++ && Elt != -1;}); 5952 if (FirstWrongElt != M.end()) 5953 return false; 5954 5955 // The index of an EXT is the first element if it is not UNDEF. 5956 // Watch out for the beginning UNDEFs. The EXT index should be the expected 5957 // value of the first element. E.g. 5958 // <-1, -1, 3, ...> is treated as <1, 2, 3, ...>. 5959 // <-1, -1, 0, 1, ...> is treated as <2*NumElts-2, 2*NumElts-1, 0, 1, ...>. 5960 // ExpectedElt is the last mask index plus 1. 5961 Imm = ExpectedElt.getZExtValue(); 5962 5963 // There are two difference cases requiring to reverse input vectors. 5964 // For example, for vector <4 x i32> we have the following cases, 5965 // Case 1: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, -1, 0>) 5966 // Case 2: shufflevector(<4 x i32>,<4 x i32>,<-1, -1, 7, 0>) 5967 // For both cases, we finally use mask <5, 6, 7, 0>, which requires 5968 // to reverse two input vectors. 5969 if (Imm < NumElts) 5970 ReverseEXT = true; 5971 else 5972 Imm -= NumElts; 5973 5974 return true; 5975 } 5976 5977 /// isREVMask - Check if a vector shuffle corresponds to a REV 5978 /// instruction with the specified blocksize. (The order of the elements 5979 /// within each block of the vector is reversed.) 5980 static bool isREVMask(ArrayRef<int> M, EVT VT, unsigned BlockSize) { 5981 assert((BlockSize == 16 || BlockSize == 32 || BlockSize == 64) && 5982 "Only possible block sizes for REV are: 16, 32, 64"); 5983 5984 unsigned EltSz = VT.getScalarSizeInBits(); 5985 if (EltSz == 64) 5986 return false; 5987 5988 unsigned NumElts = VT.getVectorNumElements(); 5989 unsigned BlockElts = M[0] + 1; 5990 // If the first shuffle index is UNDEF, be optimistic. 5991 if (M[0] < 0) 5992 BlockElts = BlockSize / EltSz; 5993 5994 if (BlockSize <= EltSz || BlockSize != BlockElts * EltSz) 5995 return false; 5996 5997 for (unsigned i = 0; i < NumElts; ++i) { 5998 if (M[i] < 0) 5999 continue; // ignore UNDEF indices 6000 if ((unsigned)M[i] != (i - i % BlockElts) + (BlockElts - 1 - i % BlockElts)) 6001 return false; 6002 } 6003 6004 return true; 6005 } 6006 6007 static bool isZIPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 6008 unsigned NumElts = VT.getVectorNumElements(); 6009 WhichResult = (M[0] == 0 ? 0 : 1); 6010 unsigned Idx = WhichResult * NumElts / 2; 6011 for (unsigned i = 0; i != NumElts; i += 2) { 6012 if ((M[i] >= 0 && (unsigned)M[i] != Idx) || 6013 (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx + NumElts)) 6014 return false; 6015 Idx += 1; 6016 } 6017 6018 return true; 6019 } 6020 6021 static bool isUZPMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 6022 unsigned NumElts = VT.getVectorNumElements(); 6023 WhichResult = (M[0] == 0 ? 0 : 1); 6024 for (unsigned i = 0; i != NumElts; ++i) { 6025 if (M[i] < 0) 6026 continue; // ignore UNDEF indices 6027 if ((unsigned)M[i] != 2 * i + WhichResult) 6028 return false; 6029 } 6030 6031 return true; 6032 } 6033 6034 static bool isTRNMask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 6035 unsigned NumElts = VT.getVectorNumElements(); 6036 WhichResult = (M[0] == 0 ? 0 : 1); 6037 for (unsigned i = 0; i < NumElts; i += 2) { 6038 if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) || 6039 (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + NumElts + WhichResult)) 6040 return false; 6041 } 6042 return true; 6043 } 6044 6045 /// isZIP_v_undef_Mask - Special case of isZIPMask for canonical form of 6046 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 6047 /// Mask is e.g., <0, 0, 1, 1> instead of <0, 4, 1, 5>. 6048 static bool isZIP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 6049 unsigned NumElts = VT.getVectorNumElements(); 6050 WhichResult = (M[0] == 0 ? 0 : 1); 6051 unsigned Idx = WhichResult * NumElts / 2; 6052 for (unsigned i = 0; i != NumElts; i += 2) { 6053 if ((M[i] >= 0 && (unsigned)M[i] != Idx) || 6054 (M[i + 1] >= 0 && (unsigned)M[i + 1] != Idx)) 6055 return false; 6056 Idx += 1; 6057 } 6058 6059 return true; 6060 } 6061 6062 /// isUZP_v_undef_Mask - Special case of isUZPMask for canonical form of 6063 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 6064 /// Mask is e.g., <0, 2, 0, 2> instead of <0, 2, 4, 6>, 6065 static bool isUZP_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 6066 unsigned Half = VT.getVectorNumElements() / 2; 6067 WhichResult = (M[0] == 0 ? 0 : 1); 6068 for (unsigned j = 0; j != 2; ++j) { 6069 unsigned Idx = WhichResult; 6070 for (unsigned i = 0; i != Half; ++i) { 6071 int MIdx = M[i + j * Half]; 6072 if (MIdx >= 0 && (unsigned)MIdx != Idx) 6073 return false; 6074 Idx += 2; 6075 } 6076 } 6077 6078 return true; 6079 } 6080 6081 /// isTRN_v_undef_Mask - Special case of isTRNMask for canonical form of 6082 /// "vector_shuffle v, v", i.e., "vector_shuffle v, undef". 6083 /// Mask is e.g., <0, 0, 2, 2> instead of <0, 4, 2, 6>. 6084 static bool isTRN_v_undef_Mask(ArrayRef<int> M, EVT VT, unsigned &WhichResult) { 6085 unsigned NumElts = VT.getVectorNumElements(); 6086 WhichResult = (M[0] == 0 ? 0 : 1); 6087 for (unsigned i = 0; i < NumElts; i += 2) { 6088 if ((M[i] >= 0 && (unsigned)M[i] != i + WhichResult) || 6089 (M[i + 1] >= 0 && (unsigned)M[i + 1] != i + WhichResult)) 6090 return false; 6091 } 6092 return true; 6093 } 6094 6095 static bool isINSMask(ArrayRef<int> M, int NumInputElements, 6096 bool &DstIsLeft, int &Anomaly) { 6097 if (M.size() != static_cast<size_t>(NumInputElements)) 6098 return false; 6099 6100 int NumLHSMatch = 0, NumRHSMatch = 0; 6101 int LastLHSMismatch = -1, LastRHSMismatch = -1; 6102 6103 for (int i = 0; i < NumInputElements; ++i) { 6104 if (M[i] == -1) { 6105 ++NumLHSMatch; 6106 ++NumRHSMatch; 6107 continue; 6108 } 6109 6110 if (M[i] == i) 6111 ++NumLHSMatch; 6112 else 6113 LastLHSMismatch = i; 6114 6115 if (M[i] == i + NumInputElements) 6116 ++NumRHSMatch; 6117 else 6118 LastRHSMismatch = i; 6119 } 6120 6121 if (NumLHSMatch == NumInputElements - 1) { 6122 DstIsLeft = true; 6123 Anomaly = LastLHSMismatch; 6124 return true; 6125 } else if (NumRHSMatch == NumInputElements - 1) { 6126 DstIsLeft = false; 6127 Anomaly = LastRHSMismatch; 6128 return true; 6129 } 6130 6131 return false; 6132 } 6133 6134 static bool isConcatMask(ArrayRef<int> Mask, EVT VT, bool SplitLHS) { 6135 if (VT.getSizeInBits() != 128) 6136 return false; 6137 6138 unsigned NumElts = VT.getVectorNumElements(); 6139 6140 for (int I = 0, E = NumElts / 2; I != E; I++) { 6141 if (Mask[I] != I) 6142 return false; 6143 } 6144 6145 int Offset = NumElts / 2; 6146 for (int I = NumElts / 2, E = NumElts; I != E; I++) { 6147 if (Mask[I] != I + SplitLHS * Offset) 6148 return false; 6149 } 6150 6151 return true; 6152 } 6153 6154 static SDValue tryFormConcatFromShuffle(SDValue Op, SelectionDAG &DAG) { 6155 SDLoc DL(Op); 6156 EVT VT = Op.getValueType(); 6157 SDValue V0 = Op.getOperand(0); 6158 SDValue V1 = Op.getOperand(1); 6159 ArrayRef<int> Mask = cast<ShuffleVectorSDNode>(Op)->getMask(); 6160 6161 if (VT.getVectorElementType() != V0.getValueType().getVectorElementType() || 6162 VT.getVectorElementType() != V1.getValueType().getVectorElementType()) 6163 return SDValue(); 6164 6165 bool SplitV0 = V0.getValueSizeInBits() == 128; 6166 6167 if (!isConcatMask(Mask, VT, SplitV0)) 6168 return SDValue(); 6169 6170 EVT CastVT = EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), 6171 VT.getVectorNumElements() / 2); 6172 if (SplitV0) { 6173 V0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V0, 6174 DAG.getConstant(0, DL, MVT::i64)); 6175 } 6176 if (V1.getValueSizeInBits() == 128) { 6177 V1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, CastVT, V1, 6178 DAG.getConstant(0, DL, MVT::i64)); 6179 } 6180 return DAG.getNode(ISD::CONCAT_VECTORS, DL, VT, V0, V1); 6181 } 6182 6183 /// GeneratePerfectShuffle - Given an entry in the perfect-shuffle table, emit 6184 /// the specified operations to build the shuffle. 6185 static SDValue GeneratePerfectShuffle(unsigned PFEntry, SDValue LHS, 6186 SDValue RHS, SelectionDAG &DAG, 6187 const SDLoc &dl) { 6188 unsigned OpNum = (PFEntry >> 26) & 0x0F; 6189 unsigned LHSID = (PFEntry >> 13) & ((1 << 13) - 1); 6190 unsigned RHSID = (PFEntry >> 0) & ((1 << 13) - 1); 6191 6192 enum { 6193 OP_COPY = 0, // Copy, used for things like <u,u,u,3> to say it is <0,1,2,3> 6194 OP_VREV, 6195 OP_VDUP0, 6196 OP_VDUP1, 6197 OP_VDUP2, 6198 OP_VDUP3, 6199 OP_VEXT1, 6200 OP_VEXT2, 6201 OP_VEXT3, 6202 OP_VUZPL, // VUZP, left result 6203 OP_VUZPR, // VUZP, right result 6204 OP_VZIPL, // VZIP, left result 6205 OP_VZIPR, // VZIP, right result 6206 OP_VTRNL, // VTRN, left result 6207 OP_VTRNR // VTRN, right result 6208 }; 6209 6210 if (OpNum == OP_COPY) { 6211 if (LHSID == (1 * 9 + 2) * 9 + 3) 6212 return LHS; 6213 assert(LHSID == ((4 * 9 + 5) * 9 + 6) * 9 + 7 && "Illegal OP_COPY!"); 6214 return RHS; 6215 } 6216 6217 SDValue OpLHS, OpRHS; 6218 OpLHS = GeneratePerfectShuffle(PerfectShuffleTable[LHSID], LHS, RHS, DAG, dl); 6219 OpRHS = GeneratePerfectShuffle(PerfectShuffleTable[RHSID], LHS, RHS, DAG, dl); 6220 EVT VT = OpLHS.getValueType(); 6221 6222 switch (OpNum) { 6223 default: 6224 llvm_unreachable("Unknown shuffle opcode!"); 6225 case OP_VREV: 6226 // VREV divides the vector in half and swaps within the half. 6227 if (VT.getVectorElementType() == MVT::i32 || 6228 VT.getVectorElementType() == MVT::f32) 6229 return DAG.getNode(AArch64ISD::REV64, dl, VT, OpLHS); 6230 // vrev <4 x i16> -> REV32 6231 if (VT.getVectorElementType() == MVT::i16 || 6232 VT.getVectorElementType() == MVT::f16) 6233 return DAG.getNode(AArch64ISD::REV32, dl, VT, OpLHS); 6234 // vrev <4 x i8> -> REV16 6235 assert(VT.getVectorElementType() == MVT::i8); 6236 return DAG.getNode(AArch64ISD::REV16, dl, VT, OpLHS); 6237 case OP_VDUP0: 6238 case OP_VDUP1: 6239 case OP_VDUP2: 6240 case OP_VDUP3: { 6241 EVT EltTy = VT.getVectorElementType(); 6242 unsigned Opcode; 6243 if (EltTy == MVT::i8) 6244 Opcode = AArch64ISD::DUPLANE8; 6245 else if (EltTy == MVT::i16 || EltTy == MVT::f16) 6246 Opcode = AArch64ISD::DUPLANE16; 6247 else if (EltTy == MVT::i32 || EltTy == MVT::f32) 6248 Opcode = AArch64ISD::DUPLANE32; 6249 else if (EltTy == MVT::i64 || EltTy == MVT::f64) 6250 Opcode = AArch64ISD::DUPLANE64; 6251 else 6252 llvm_unreachable("Invalid vector element type?"); 6253 6254 if (VT.getSizeInBits() == 64) 6255 OpLHS = WidenVector(OpLHS, DAG); 6256 SDValue Lane = DAG.getConstant(OpNum - OP_VDUP0, dl, MVT::i64); 6257 return DAG.getNode(Opcode, dl, VT, OpLHS, Lane); 6258 } 6259 case OP_VEXT1: 6260 case OP_VEXT2: 6261 case OP_VEXT3: { 6262 unsigned Imm = (OpNum - OP_VEXT1 + 1) * getExtFactor(OpLHS); 6263 return DAG.getNode(AArch64ISD::EXT, dl, VT, OpLHS, OpRHS, 6264 DAG.getConstant(Imm, dl, MVT::i32)); 6265 } 6266 case OP_VUZPL: 6267 return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), OpLHS, 6268 OpRHS); 6269 case OP_VUZPR: 6270 return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), OpLHS, 6271 OpRHS); 6272 case OP_VZIPL: 6273 return DAG.getNode(AArch64ISD::ZIP1, dl, DAG.getVTList(VT, VT), OpLHS, 6274 OpRHS); 6275 case OP_VZIPR: 6276 return DAG.getNode(AArch64ISD::ZIP2, dl, DAG.getVTList(VT, VT), OpLHS, 6277 OpRHS); 6278 case OP_VTRNL: 6279 return DAG.getNode(AArch64ISD::TRN1, dl, DAG.getVTList(VT, VT), OpLHS, 6280 OpRHS); 6281 case OP_VTRNR: 6282 return DAG.getNode(AArch64ISD::TRN2, dl, DAG.getVTList(VT, VT), OpLHS, 6283 OpRHS); 6284 } 6285 } 6286 6287 static SDValue GenerateTBL(SDValue Op, ArrayRef<int> ShuffleMask, 6288 SelectionDAG &DAG) { 6289 // Check to see if we can use the TBL instruction. 6290 SDValue V1 = Op.getOperand(0); 6291 SDValue V2 = Op.getOperand(1); 6292 SDLoc DL(Op); 6293 6294 EVT EltVT = Op.getValueType().getVectorElementType(); 6295 unsigned BytesPerElt = EltVT.getSizeInBits() / 8; 6296 6297 SmallVector<SDValue, 8> TBLMask; 6298 for (int Val : ShuffleMask) { 6299 for (unsigned Byte = 0; Byte < BytesPerElt; ++Byte) { 6300 unsigned Offset = Byte + Val * BytesPerElt; 6301 TBLMask.push_back(DAG.getConstant(Offset, DL, MVT::i32)); 6302 } 6303 } 6304 6305 MVT IndexVT = MVT::v8i8; 6306 unsigned IndexLen = 8; 6307 if (Op.getValueSizeInBits() == 128) { 6308 IndexVT = MVT::v16i8; 6309 IndexLen = 16; 6310 } 6311 6312 SDValue V1Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V1); 6313 SDValue V2Cst = DAG.getNode(ISD::BITCAST, DL, IndexVT, V2); 6314 6315 SDValue Shuffle; 6316 if (V2.getNode()->isUndef()) { 6317 if (IndexLen == 8) 6318 V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V1Cst); 6319 Shuffle = DAG.getNode( 6320 ISD::INTRINSIC_WO_CHAIN, DL, IndexVT, 6321 DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst, 6322 DAG.getBuildVector(IndexVT, DL, 6323 makeArrayRef(TBLMask.data(), IndexLen))); 6324 } else { 6325 if (IndexLen == 8) { 6326 V1Cst = DAG.getNode(ISD::CONCAT_VECTORS, DL, MVT::v16i8, V1Cst, V2Cst); 6327 Shuffle = DAG.getNode( 6328 ISD::INTRINSIC_WO_CHAIN, DL, IndexVT, 6329 DAG.getConstant(Intrinsic::aarch64_neon_tbl1, DL, MVT::i32), V1Cst, 6330 DAG.getBuildVector(IndexVT, DL, 6331 makeArrayRef(TBLMask.data(), IndexLen))); 6332 } else { 6333 // FIXME: We cannot, for the moment, emit a TBL2 instruction because we 6334 // cannot currently represent the register constraints on the input 6335 // table registers. 6336 // Shuffle = DAG.getNode(AArch64ISD::TBL2, DL, IndexVT, V1Cst, V2Cst, 6337 // DAG.getBuildVector(IndexVT, DL, &TBLMask[0], 6338 // IndexLen)); 6339 Shuffle = DAG.getNode( 6340 ISD::INTRINSIC_WO_CHAIN, DL, IndexVT, 6341 DAG.getConstant(Intrinsic::aarch64_neon_tbl2, DL, MVT::i32), V1Cst, 6342 V2Cst, DAG.getBuildVector(IndexVT, DL, 6343 makeArrayRef(TBLMask.data(), IndexLen))); 6344 } 6345 } 6346 return DAG.getNode(ISD::BITCAST, DL, Op.getValueType(), Shuffle); 6347 } 6348 6349 static unsigned getDUPLANEOp(EVT EltType) { 6350 if (EltType == MVT::i8) 6351 return AArch64ISD::DUPLANE8; 6352 if (EltType == MVT::i16 || EltType == MVT::f16) 6353 return AArch64ISD::DUPLANE16; 6354 if (EltType == MVT::i32 || EltType == MVT::f32) 6355 return AArch64ISD::DUPLANE32; 6356 if (EltType == MVT::i64 || EltType == MVT::f64) 6357 return AArch64ISD::DUPLANE64; 6358 6359 llvm_unreachable("Invalid vector element type?"); 6360 } 6361 6362 SDValue AArch64TargetLowering::LowerVECTOR_SHUFFLE(SDValue Op, 6363 SelectionDAG &DAG) const { 6364 SDLoc dl(Op); 6365 EVT VT = Op.getValueType(); 6366 6367 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode()); 6368 6369 // Convert shuffles that are directly supported on NEON to target-specific 6370 // DAG nodes, instead of keeping them as shuffles and matching them again 6371 // during code selection. This is more efficient and avoids the possibility 6372 // of inconsistencies between legalization and selection. 6373 ArrayRef<int> ShuffleMask = SVN->getMask(); 6374 6375 SDValue V1 = Op.getOperand(0); 6376 SDValue V2 = Op.getOperand(1); 6377 6378 if (SVN->isSplat()) { 6379 int Lane = SVN->getSplatIndex(); 6380 // If this is undef splat, generate it via "just" vdup, if possible. 6381 if (Lane == -1) 6382 Lane = 0; 6383 6384 if (Lane == 0 && V1.getOpcode() == ISD::SCALAR_TO_VECTOR) 6385 return DAG.getNode(AArch64ISD::DUP, dl, V1.getValueType(), 6386 V1.getOperand(0)); 6387 // Test if V1 is a BUILD_VECTOR and the lane being referenced is a non- 6388 // constant. If so, we can just reference the lane's definition directly. 6389 if (V1.getOpcode() == ISD::BUILD_VECTOR && 6390 !isa<ConstantSDNode>(V1.getOperand(Lane))) 6391 return DAG.getNode(AArch64ISD::DUP, dl, VT, V1.getOperand(Lane)); 6392 6393 // Otherwise, duplicate from the lane of the input vector. 6394 unsigned Opcode = getDUPLANEOp(V1.getValueType().getVectorElementType()); 6395 6396 // SelectionDAGBuilder may have "helpfully" already extracted or conatenated 6397 // to make a vector of the same size as this SHUFFLE. We can ignore the 6398 // extract entirely, and canonicalise the concat using WidenVector. 6399 if (V1.getOpcode() == ISD::EXTRACT_SUBVECTOR) { 6400 Lane += cast<ConstantSDNode>(V1.getOperand(1))->getZExtValue(); 6401 V1 = V1.getOperand(0); 6402 } else if (V1.getOpcode() == ISD::CONCAT_VECTORS) { 6403 unsigned Idx = Lane >= (int)VT.getVectorNumElements() / 2; 6404 Lane -= Idx * VT.getVectorNumElements() / 2; 6405 V1 = WidenVector(V1.getOperand(Idx), DAG); 6406 } else if (VT.getSizeInBits() == 64) 6407 V1 = WidenVector(V1, DAG); 6408 6409 return DAG.getNode(Opcode, dl, VT, V1, DAG.getConstant(Lane, dl, MVT::i64)); 6410 } 6411 6412 if (isREVMask(ShuffleMask, VT, 64)) 6413 return DAG.getNode(AArch64ISD::REV64, dl, V1.getValueType(), V1, V2); 6414 if (isREVMask(ShuffleMask, VT, 32)) 6415 return DAG.getNode(AArch64ISD::REV32, dl, V1.getValueType(), V1, V2); 6416 if (isREVMask(ShuffleMask, VT, 16)) 6417 return DAG.getNode(AArch64ISD::REV16, dl, V1.getValueType(), V1, V2); 6418 6419 bool ReverseEXT = false; 6420 unsigned Imm; 6421 if (isEXTMask(ShuffleMask, VT, ReverseEXT, Imm)) { 6422 if (ReverseEXT) 6423 std::swap(V1, V2); 6424 Imm *= getExtFactor(V1); 6425 return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V2, 6426 DAG.getConstant(Imm, dl, MVT::i32)); 6427 } else if (V2->isUndef() && isSingletonEXTMask(ShuffleMask, VT, Imm)) { 6428 Imm *= getExtFactor(V1); 6429 return DAG.getNode(AArch64ISD::EXT, dl, V1.getValueType(), V1, V1, 6430 DAG.getConstant(Imm, dl, MVT::i32)); 6431 } 6432 6433 unsigned WhichResult; 6434 if (isZIPMask(ShuffleMask, VT, WhichResult)) { 6435 unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2; 6436 return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2); 6437 } 6438 if (isUZPMask(ShuffleMask, VT, WhichResult)) { 6439 unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2; 6440 return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2); 6441 } 6442 if (isTRNMask(ShuffleMask, VT, WhichResult)) { 6443 unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2; 6444 return DAG.getNode(Opc, dl, V1.getValueType(), V1, V2); 6445 } 6446 6447 if (isZIP_v_undef_Mask(ShuffleMask, VT, WhichResult)) { 6448 unsigned Opc = (WhichResult == 0) ? AArch64ISD::ZIP1 : AArch64ISD::ZIP2; 6449 return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1); 6450 } 6451 if (isUZP_v_undef_Mask(ShuffleMask, VT, WhichResult)) { 6452 unsigned Opc = (WhichResult == 0) ? AArch64ISD::UZP1 : AArch64ISD::UZP2; 6453 return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1); 6454 } 6455 if (isTRN_v_undef_Mask(ShuffleMask, VT, WhichResult)) { 6456 unsigned Opc = (WhichResult == 0) ? AArch64ISD::TRN1 : AArch64ISD::TRN2; 6457 return DAG.getNode(Opc, dl, V1.getValueType(), V1, V1); 6458 } 6459 6460 if (SDValue Concat = tryFormConcatFromShuffle(Op, DAG)) 6461 return Concat; 6462 6463 bool DstIsLeft; 6464 int Anomaly; 6465 int NumInputElements = V1.getValueType().getVectorNumElements(); 6466 if (isINSMask(ShuffleMask, NumInputElements, DstIsLeft, Anomaly)) { 6467 SDValue DstVec = DstIsLeft ? V1 : V2; 6468 SDValue DstLaneV = DAG.getConstant(Anomaly, dl, MVT::i64); 6469 6470 SDValue SrcVec = V1; 6471 int SrcLane = ShuffleMask[Anomaly]; 6472 if (SrcLane >= NumInputElements) { 6473 SrcVec = V2; 6474 SrcLane -= VT.getVectorNumElements(); 6475 } 6476 SDValue SrcLaneV = DAG.getConstant(SrcLane, dl, MVT::i64); 6477 6478 EVT ScalarVT = VT.getVectorElementType(); 6479 6480 if (ScalarVT.getSizeInBits() < 32 && ScalarVT.isInteger()) 6481 ScalarVT = MVT::i32; 6482 6483 return DAG.getNode( 6484 ISD::INSERT_VECTOR_ELT, dl, VT, DstVec, 6485 DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, ScalarVT, SrcVec, SrcLaneV), 6486 DstLaneV); 6487 } 6488 6489 // If the shuffle is not directly supported and it has 4 elements, use 6490 // the PerfectShuffle-generated table to synthesize it from other shuffles. 6491 unsigned NumElts = VT.getVectorNumElements(); 6492 if (NumElts == 4) { 6493 unsigned PFIndexes[4]; 6494 for (unsigned i = 0; i != 4; ++i) { 6495 if (ShuffleMask[i] < 0) 6496 PFIndexes[i] = 8; 6497 else 6498 PFIndexes[i] = ShuffleMask[i]; 6499 } 6500 6501 // Compute the index in the perfect shuffle table. 6502 unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 + 6503 PFIndexes[2] * 9 + PFIndexes[3]; 6504 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 6505 unsigned Cost = (PFEntry >> 30); 6506 6507 if (Cost <= 4) 6508 return GeneratePerfectShuffle(PFEntry, V1, V2, DAG, dl); 6509 } 6510 6511 return GenerateTBL(Op, ShuffleMask, DAG); 6512 } 6513 6514 static bool resolveBuildVector(BuildVectorSDNode *BVN, APInt &CnstBits, 6515 APInt &UndefBits) { 6516 EVT VT = BVN->getValueType(0); 6517 APInt SplatBits, SplatUndef; 6518 unsigned SplatBitSize; 6519 bool HasAnyUndefs; 6520 if (BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, HasAnyUndefs)) { 6521 unsigned NumSplats = VT.getSizeInBits() / SplatBitSize; 6522 6523 for (unsigned i = 0; i < NumSplats; ++i) { 6524 CnstBits <<= SplatBitSize; 6525 UndefBits <<= SplatBitSize; 6526 CnstBits |= SplatBits.zextOrTrunc(VT.getSizeInBits()); 6527 UndefBits |= (SplatBits ^ SplatUndef).zextOrTrunc(VT.getSizeInBits()); 6528 } 6529 6530 return true; 6531 } 6532 6533 return false; 6534 } 6535 6536 // Try 64-bit splatted SIMD immediate. 6537 static SDValue tryAdvSIMDModImm64(unsigned NewOp, SDValue Op, SelectionDAG &DAG, 6538 const APInt &Bits) { 6539 if (Bits.getHiBits(64) == Bits.getLoBits(64)) { 6540 uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); 6541 EVT VT = Op.getValueType(); 6542 MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v2i64 : MVT::f64; 6543 6544 if (AArch64_AM::isAdvSIMDModImmType10(Value)) { 6545 Value = AArch64_AM::encodeAdvSIMDModImmType10(Value); 6546 6547 SDLoc dl(Op); 6548 SDValue Mov = DAG.getNode(NewOp, dl, MovTy, 6549 DAG.getConstant(Value, dl, MVT::i32)); 6550 return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); 6551 } 6552 } 6553 6554 return SDValue(); 6555 } 6556 6557 // Try 32-bit splatted SIMD immediate. 6558 static SDValue tryAdvSIMDModImm32(unsigned NewOp, SDValue Op, SelectionDAG &DAG, 6559 const APInt &Bits, 6560 const SDValue *LHS = nullptr) { 6561 if (Bits.getHiBits(64) == Bits.getLoBits(64)) { 6562 uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); 6563 EVT VT = Op.getValueType(); 6564 MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; 6565 bool isAdvSIMDModImm = false; 6566 uint64_t Shift; 6567 6568 if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType1(Value))) { 6569 Value = AArch64_AM::encodeAdvSIMDModImmType1(Value); 6570 Shift = 0; 6571 } 6572 else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType2(Value))) { 6573 Value = AArch64_AM::encodeAdvSIMDModImmType2(Value); 6574 Shift = 8; 6575 } 6576 else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType3(Value))) { 6577 Value = AArch64_AM::encodeAdvSIMDModImmType3(Value); 6578 Shift = 16; 6579 } 6580 else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType4(Value))) { 6581 Value = AArch64_AM::encodeAdvSIMDModImmType4(Value); 6582 Shift = 24; 6583 } 6584 6585 if (isAdvSIMDModImm) { 6586 SDLoc dl(Op); 6587 SDValue Mov; 6588 6589 if (LHS) 6590 Mov = DAG.getNode(NewOp, dl, MovTy, *LHS, 6591 DAG.getConstant(Value, dl, MVT::i32), 6592 DAG.getConstant(Shift, dl, MVT::i32)); 6593 else 6594 Mov = DAG.getNode(NewOp, dl, MovTy, 6595 DAG.getConstant(Value, dl, MVT::i32), 6596 DAG.getConstant(Shift, dl, MVT::i32)); 6597 6598 return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); 6599 } 6600 } 6601 6602 return SDValue(); 6603 } 6604 6605 // Try 16-bit splatted SIMD immediate. 6606 static SDValue tryAdvSIMDModImm16(unsigned NewOp, SDValue Op, SelectionDAG &DAG, 6607 const APInt &Bits, 6608 const SDValue *LHS = nullptr) { 6609 if (Bits.getHiBits(64) == Bits.getLoBits(64)) { 6610 uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); 6611 EVT VT = Op.getValueType(); 6612 MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v8i16 : MVT::v4i16; 6613 bool isAdvSIMDModImm = false; 6614 uint64_t Shift; 6615 6616 if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType5(Value))) { 6617 Value = AArch64_AM::encodeAdvSIMDModImmType5(Value); 6618 Shift = 0; 6619 } 6620 else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType6(Value))) { 6621 Value = AArch64_AM::encodeAdvSIMDModImmType6(Value); 6622 Shift = 8; 6623 } 6624 6625 if (isAdvSIMDModImm) { 6626 SDLoc dl(Op); 6627 SDValue Mov; 6628 6629 if (LHS) 6630 Mov = DAG.getNode(NewOp, dl, MovTy, *LHS, 6631 DAG.getConstant(Value, dl, MVT::i32), 6632 DAG.getConstant(Shift, dl, MVT::i32)); 6633 else 6634 Mov = DAG.getNode(NewOp, dl, MovTy, 6635 DAG.getConstant(Value, dl, MVT::i32), 6636 DAG.getConstant(Shift, dl, MVT::i32)); 6637 6638 return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); 6639 } 6640 } 6641 6642 return SDValue(); 6643 } 6644 6645 // Try 32-bit splatted SIMD immediate with shifted ones. 6646 static SDValue tryAdvSIMDModImm321s(unsigned NewOp, SDValue Op, 6647 SelectionDAG &DAG, const APInt &Bits) { 6648 if (Bits.getHiBits(64) == Bits.getLoBits(64)) { 6649 uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); 6650 EVT VT = Op.getValueType(); 6651 MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v4i32 : MVT::v2i32; 6652 bool isAdvSIMDModImm = false; 6653 uint64_t Shift; 6654 6655 if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType7(Value))) { 6656 Value = AArch64_AM::encodeAdvSIMDModImmType7(Value); 6657 Shift = 264; 6658 } 6659 else if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType8(Value))) { 6660 Value = AArch64_AM::encodeAdvSIMDModImmType8(Value); 6661 Shift = 272; 6662 } 6663 6664 if (isAdvSIMDModImm) { 6665 SDLoc dl(Op); 6666 SDValue Mov = DAG.getNode(NewOp, dl, MovTy, 6667 DAG.getConstant(Value, dl, MVT::i32), 6668 DAG.getConstant(Shift, dl, MVT::i32)); 6669 return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); 6670 } 6671 } 6672 6673 return SDValue(); 6674 } 6675 6676 // Try 8-bit splatted SIMD immediate. 6677 static SDValue tryAdvSIMDModImm8(unsigned NewOp, SDValue Op, SelectionDAG &DAG, 6678 const APInt &Bits) { 6679 if (Bits.getHiBits(64) == Bits.getLoBits(64)) { 6680 uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); 6681 EVT VT = Op.getValueType(); 6682 MVT MovTy = (VT.getSizeInBits() == 128) ? MVT::v16i8 : MVT::v8i8; 6683 6684 if (AArch64_AM::isAdvSIMDModImmType9(Value)) { 6685 Value = AArch64_AM::encodeAdvSIMDModImmType9(Value); 6686 6687 SDLoc dl(Op); 6688 SDValue Mov = DAG.getNode(NewOp, dl, MovTy, 6689 DAG.getConstant(Value, dl, MVT::i32)); 6690 return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); 6691 } 6692 } 6693 6694 return SDValue(); 6695 } 6696 6697 // Try FP splatted SIMD immediate. 6698 static SDValue tryAdvSIMDModImmFP(unsigned NewOp, SDValue Op, SelectionDAG &DAG, 6699 const APInt &Bits) { 6700 if (Bits.getHiBits(64) == Bits.getLoBits(64)) { 6701 uint64_t Value = Bits.zextOrTrunc(64).getZExtValue(); 6702 EVT VT = Op.getValueType(); 6703 bool isWide = (VT.getSizeInBits() == 128); 6704 MVT MovTy; 6705 bool isAdvSIMDModImm = false; 6706 6707 if ((isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType11(Value))) { 6708 Value = AArch64_AM::encodeAdvSIMDModImmType11(Value); 6709 MovTy = isWide ? MVT::v4f32 : MVT::v2f32; 6710 } 6711 else if (isWide && 6712 (isAdvSIMDModImm = AArch64_AM::isAdvSIMDModImmType12(Value))) { 6713 Value = AArch64_AM::encodeAdvSIMDModImmType12(Value); 6714 MovTy = MVT::v2f64; 6715 } 6716 6717 if (isAdvSIMDModImm) { 6718 SDLoc dl(Op); 6719 SDValue Mov = DAG.getNode(NewOp, dl, MovTy, 6720 DAG.getConstant(Value, dl, MVT::i32)); 6721 return DAG.getNode(AArch64ISD::NVCAST, dl, VT, Mov); 6722 } 6723 } 6724 6725 return SDValue(); 6726 } 6727 6728 SDValue AArch64TargetLowering::LowerVectorAND(SDValue Op, 6729 SelectionDAG &DAG) const { 6730 SDValue LHS = Op.getOperand(0); 6731 EVT VT = Op.getValueType(); 6732 6733 BuildVectorSDNode *BVN = 6734 dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode()); 6735 if (!BVN) { 6736 // AND commutes, so try swapping the operands. 6737 LHS = Op.getOperand(1); 6738 BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(0).getNode()); 6739 } 6740 if (!BVN) 6741 return Op; 6742 6743 APInt DefBits(VT.getSizeInBits(), 0); 6744 APInt UndefBits(VT.getSizeInBits(), 0); 6745 if (resolveBuildVector(BVN, DefBits, UndefBits)) { 6746 SDValue NewOp; 6747 6748 // We only have BIC vector immediate instruction, which is and-not. 6749 DefBits = ~DefBits; 6750 if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, Op, DAG, 6751 DefBits, &LHS)) || 6752 (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, Op, DAG, 6753 DefBits, &LHS))) 6754 return NewOp; 6755 6756 UndefBits = ~UndefBits; 6757 if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::BICi, Op, DAG, 6758 UndefBits, &LHS)) || 6759 (NewOp = tryAdvSIMDModImm16(AArch64ISD::BICi, Op, DAG, 6760 UndefBits, &LHS))) 6761 return NewOp; 6762 } 6763 6764 // We can always fall back to a non-immediate AND. 6765 return Op; 6766 } 6767 6768 // Specialized code to quickly find if PotentialBVec is a BuildVector that 6769 // consists of only the same constant int value, returned in reference arg 6770 // ConstVal 6771 static bool isAllConstantBuildVector(const SDValue &PotentialBVec, 6772 uint64_t &ConstVal) { 6773 BuildVectorSDNode *Bvec = dyn_cast<BuildVectorSDNode>(PotentialBVec); 6774 if (!Bvec) 6775 return false; 6776 ConstantSDNode *FirstElt = dyn_cast<ConstantSDNode>(Bvec->getOperand(0)); 6777 if (!FirstElt) 6778 return false; 6779 EVT VT = Bvec->getValueType(0); 6780 unsigned NumElts = VT.getVectorNumElements(); 6781 for (unsigned i = 1; i < NumElts; ++i) 6782 if (dyn_cast<ConstantSDNode>(Bvec->getOperand(i)) != FirstElt) 6783 return false; 6784 ConstVal = FirstElt->getZExtValue(); 6785 return true; 6786 } 6787 6788 static unsigned getIntrinsicID(const SDNode *N) { 6789 unsigned Opcode = N->getOpcode(); 6790 switch (Opcode) { 6791 default: 6792 return Intrinsic::not_intrinsic; 6793 case ISD::INTRINSIC_WO_CHAIN: { 6794 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue(); 6795 if (IID < Intrinsic::num_intrinsics) 6796 return IID; 6797 return Intrinsic::not_intrinsic; 6798 } 6799 } 6800 } 6801 6802 // Attempt to form a vector S[LR]I from (or (and X, BvecC1), (lsl Y, C2)), 6803 // to (SLI X, Y, C2), where X and Y have matching vector types, BvecC1 is a 6804 // BUILD_VECTORs with constant element C1, C2 is a constant, and C1 == ~C2. 6805 // Also, logical shift right -> sri, with the same structure. 6806 static SDValue tryLowerToSLI(SDNode *N, SelectionDAG &DAG) { 6807 EVT VT = N->getValueType(0); 6808 6809 if (!VT.isVector()) 6810 return SDValue(); 6811 6812 SDLoc DL(N); 6813 6814 // Is the first op an AND? 6815 const SDValue And = N->getOperand(0); 6816 if (And.getOpcode() != ISD::AND) 6817 return SDValue(); 6818 6819 // Is the second op an shl or lshr? 6820 SDValue Shift = N->getOperand(1); 6821 // This will have been turned into: AArch64ISD::VSHL vector, #shift 6822 // or AArch64ISD::VLSHR vector, #shift 6823 unsigned ShiftOpc = Shift.getOpcode(); 6824 if ((ShiftOpc != AArch64ISD::VSHL && ShiftOpc != AArch64ISD::VLSHR)) 6825 return SDValue(); 6826 bool IsShiftRight = ShiftOpc == AArch64ISD::VLSHR; 6827 6828 // Is the shift amount constant? 6829 ConstantSDNode *C2node = dyn_cast<ConstantSDNode>(Shift.getOperand(1)); 6830 if (!C2node) 6831 return SDValue(); 6832 6833 // Is the and mask vector all constant? 6834 uint64_t C1; 6835 if (!isAllConstantBuildVector(And.getOperand(1), C1)) 6836 return SDValue(); 6837 6838 // Is C1 == ~C2, taking into account how much one can shift elements of a 6839 // particular size? 6840 uint64_t C2 = C2node->getZExtValue(); 6841 unsigned ElemSizeInBits = VT.getScalarSizeInBits(); 6842 if (C2 > ElemSizeInBits) 6843 return SDValue(); 6844 unsigned ElemMask = (1 << ElemSizeInBits) - 1; 6845 if ((C1 & ElemMask) != (~C2 & ElemMask)) 6846 return SDValue(); 6847 6848 SDValue X = And.getOperand(0); 6849 SDValue Y = Shift.getOperand(0); 6850 6851 unsigned Intrin = 6852 IsShiftRight ? Intrinsic::aarch64_neon_vsri : Intrinsic::aarch64_neon_vsli; 6853 SDValue ResultSLI = 6854 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 6855 DAG.getConstant(Intrin, DL, MVT::i32), X, Y, 6856 Shift.getOperand(1)); 6857 6858 LLVM_DEBUG(dbgs() << "aarch64-lower: transformed: \n"); 6859 LLVM_DEBUG(N->dump(&DAG)); 6860 LLVM_DEBUG(dbgs() << "into: \n"); 6861 LLVM_DEBUG(ResultSLI->dump(&DAG)); 6862 6863 ++NumShiftInserts; 6864 return ResultSLI; 6865 } 6866 6867 SDValue AArch64TargetLowering::LowerVectorOR(SDValue Op, 6868 SelectionDAG &DAG) const { 6869 // Attempt to form a vector S[LR]I from (or (and X, C1), (lsl Y, C2)) 6870 if (EnableAArch64SlrGeneration) { 6871 if (SDValue Res = tryLowerToSLI(Op.getNode(), DAG)) 6872 return Res; 6873 } 6874 6875 EVT VT = Op.getValueType(); 6876 6877 SDValue LHS = Op.getOperand(0); 6878 BuildVectorSDNode *BVN = 6879 dyn_cast<BuildVectorSDNode>(Op.getOperand(1).getNode()); 6880 if (!BVN) { 6881 // OR commutes, so try swapping the operands. 6882 LHS = Op.getOperand(1); 6883 BVN = dyn_cast<BuildVectorSDNode>(Op.getOperand(0).getNode()); 6884 } 6885 if (!BVN) 6886 return Op; 6887 6888 APInt DefBits(VT.getSizeInBits(), 0); 6889 APInt UndefBits(VT.getSizeInBits(), 0); 6890 if (resolveBuildVector(BVN, DefBits, UndefBits)) { 6891 SDValue NewOp; 6892 6893 if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG, 6894 DefBits, &LHS)) || 6895 (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG, 6896 DefBits, &LHS))) 6897 return NewOp; 6898 6899 if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::ORRi, Op, DAG, 6900 UndefBits, &LHS)) || 6901 (NewOp = tryAdvSIMDModImm16(AArch64ISD::ORRi, Op, DAG, 6902 UndefBits, &LHS))) 6903 return NewOp; 6904 } 6905 6906 // We can always fall back to a non-immediate OR. 6907 return Op; 6908 } 6909 6910 // Normalize the operands of BUILD_VECTOR. The value of constant operands will 6911 // be truncated to fit element width. 6912 static SDValue NormalizeBuildVector(SDValue Op, 6913 SelectionDAG &DAG) { 6914 assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unknown opcode!"); 6915 SDLoc dl(Op); 6916 EVT VT = Op.getValueType(); 6917 EVT EltTy= VT.getVectorElementType(); 6918 6919 if (EltTy.isFloatingPoint() || EltTy.getSizeInBits() > 16) 6920 return Op; 6921 6922 SmallVector<SDValue, 16> Ops; 6923 for (SDValue Lane : Op->ops()) { 6924 if (auto *CstLane = dyn_cast<ConstantSDNode>(Lane)) { 6925 APInt LowBits(EltTy.getSizeInBits(), 6926 CstLane->getZExtValue()); 6927 Lane = DAG.getConstant(LowBits.getZExtValue(), dl, MVT::i32); 6928 } 6929 Ops.push_back(Lane); 6930 } 6931 return DAG.getBuildVector(VT, dl, Ops); 6932 } 6933 6934 static SDValue ConstantBuildVector(SDValue Op, SelectionDAG &DAG) { 6935 EVT VT = Op.getValueType(); 6936 6937 APInt DefBits(VT.getSizeInBits(), 0); 6938 APInt UndefBits(VT.getSizeInBits(), 0); 6939 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 6940 if (resolveBuildVector(BVN, DefBits, UndefBits)) { 6941 SDValue NewOp; 6942 if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) || 6943 (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) || 6944 (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) || 6945 (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) || 6946 (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) || 6947 (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits))) 6948 return NewOp; 6949 6950 DefBits = ~DefBits; 6951 if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) || 6952 (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) || 6953 (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits))) 6954 return NewOp; 6955 6956 DefBits = UndefBits; 6957 if ((NewOp = tryAdvSIMDModImm64(AArch64ISD::MOVIedit, Op, DAG, DefBits)) || 6958 (NewOp = tryAdvSIMDModImm32(AArch64ISD::MOVIshift, Op, DAG, DefBits)) || 6959 (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MOVImsl, Op, DAG, DefBits)) || 6960 (NewOp = tryAdvSIMDModImm16(AArch64ISD::MOVIshift, Op, DAG, DefBits)) || 6961 (NewOp = tryAdvSIMDModImm8(AArch64ISD::MOVI, Op, DAG, DefBits)) || 6962 (NewOp = tryAdvSIMDModImmFP(AArch64ISD::FMOV, Op, DAG, DefBits))) 6963 return NewOp; 6964 6965 DefBits = ~UndefBits; 6966 if ((NewOp = tryAdvSIMDModImm32(AArch64ISD::MVNIshift, Op, DAG, DefBits)) || 6967 (NewOp = tryAdvSIMDModImm321s(AArch64ISD::MVNImsl, Op, DAG, DefBits)) || 6968 (NewOp = tryAdvSIMDModImm16(AArch64ISD::MVNIshift, Op, DAG, DefBits))) 6969 return NewOp; 6970 } 6971 6972 return SDValue(); 6973 } 6974 6975 SDValue AArch64TargetLowering::LowerBUILD_VECTOR(SDValue Op, 6976 SelectionDAG &DAG) const { 6977 EVT VT = Op.getValueType(); 6978 6979 // Try to build a simple constant vector. 6980 Op = NormalizeBuildVector(Op, DAG); 6981 if (VT.isInteger()) { 6982 // Certain vector constants, used to express things like logical NOT and 6983 // arithmetic NEG, are passed through unmodified. This allows special 6984 // patterns for these operations to match, which will lower these constants 6985 // to whatever is proven necessary. 6986 BuildVectorSDNode *BVN = cast<BuildVectorSDNode>(Op.getNode()); 6987 if (BVN->isConstant()) 6988 if (ConstantSDNode *Const = BVN->getConstantSplatNode()) { 6989 unsigned BitSize = VT.getVectorElementType().getSizeInBits(); 6990 APInt Val(BitSize, 6991 Const->getAPIntValue().zextOrTrunc(BitSize).getZExtValue()); 6992 if (Val.isNullValue() || Val.isAllOnesValue()) 6993 return Op; 6994 } 6995 } 6996 6997 if (SDValue V = ConstantBuildVector(Op, DAG)) 6998 return V; 6999 7000 // Scan through the operands to find some interesting properties we can 7001 // exploit: 7002 // 1) If only one value is used, we can use a DUP, or 7003 // 2) if only the low element is not undef, we can just insert that, or 7004 // 3) if only one constant value is used (w/ some non-constant lanes), 7005 // we can splat the constant value into the whole vector then fill 7006 // in the non-constant lanes. 7007 // 4) FIXME: If different constant values are used, but we can intelligently 7008 // select the values we'll be overwriting for the non-constant 7009 // lanes such that we can directly materialize the vector 7010 // some other way (MOVI, e.g.), we can be sneaky. 7011 // 5) if all operands are EXTRACT_VECTOR_ELT, check for VUZP. 7012 SDLoc dl(Op); 7013 unsigned NumElts = VT.getVectorNumElements(); 7014 bool isOnlyLowElement = true; 7015 bool usesOnlyOneValue = true; 7016 bool usesOnlyOneConstantValue = true; 7017 bool isConstant = true; 7018 bool AllLanesExtractElt = true; 7019 unsigned NumConstantLanes = 0; 7020 SDValue Value; 7021 SDValue ConstantValue; 7022 for (unsigned i = 0; i < NumElts; ++i) { 7023 SDValue V = Op.getOperand(i); 7024 if (V.getOpcode() != ISD::EXTRACT_VECTOR_ELT) 7025 AllLanesExtractElt = false; 7026 if (V.isUndef()) 7027 continue; 7028 if (i > 0) 7029 isOnlyLowElement = false; 7030 if (!isa<ConstantFPSDNode>(V) && !isa<ConstantSDNode>(V)) 7031 isConstant = false; 7032 7033 if (isa<ConstantSDNode>(V) || isa<ConstantFPSDNode>(V)) { 7034 ++NumConstantLanes; 7035 if (!ConstantValue.getNode()) 7036 ConstantValue = V; 7037 else if (ConstantValue != V) 7038 usesOnlyOneConstantValue = false; 7039 } 7040 7041 if (!Value.getNode()) 7042 Value = V; 7043 else if (V != Value) 7044 usesOnlyOneValue = false; 7045 } 7046 7047 if (!Value.getNode()) { 7048 LLVM_DEBUG( 7049 dbgs() << "LowerBUILD_VECTOR: value undefined, creating undef node\n"); 7050 return DAG.getUNDEF(VT); 7051 } 7052 7053 if (isOnlyLowElement) { 7054 LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: only low element used, creating 1 " 7055 "SCALAR_TO_VECTOR node\n"); 7056 return DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Value); 7057 } 7058 7059 if (AllLanesExtractElt) { 7060 SDNode *Vector = nullptr; 7061 bool Even = false; 7062 bool Odd = false; 7063 // Check whether the extract elements match the Even pattern <0,2,4,...> or 7064 // the Odd pattern <1,3,5,...>. 7065 for (unsigned i = 0; i < NumElts; ++i) { 7066 SDValue V = Op.getOperand(i); 7067 const SDNode *N = V.getNode(); 7068 if (!isa<ConstantSDNode>(N->getOperand(1))) 7069 break; 7070 SDValue N0 = N->getOperand(0); 7071 7072 // All elements are extracted from the same vector. 7073 if (!Vector) { 7074 Vector = N0.getNode(); 7075 // Check that the type of EXTRACT_VECTOR_ELT matches the type of 7076 // BUILD_VECTOR. 7077 if (VT.getVectorElementType() != 7078 N0.getValueType().getVectorElementType()) 7079 break; 7080 } else if (Vector != N0.getNode()) { 7081 Odd = false; 7082 Even = false; 7083 break; 7084 } 7085 7086 // Extracted values are either at Even indices <0,2,4,...> or at Odd 7087 // indices <1,3,5,...>. 7088 uint64_t Val = N->getConstantOperandVal(1); 7089 if (Val == 2 * i) { 7090 Even = true; 7091 continue; 7092 } 7093 if (Val - 1 == 2 * i) { 7094 Odd = true; 7095 continue; 7096 } 7097 7098 // Something does not match: abort. 7099 Odd = false; 7100 Even = false; 7101 break; 7102 } 7103 if (Even || Odd) { 7104 SDValue LHS = 7105 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0), 7106 DAG.getConstant(0, dl, MVT::i64)); 7107 SDValue RHS = 7108 DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, SDValue(Vector, 0), 7109 DAG.getConstant(NumElts, dl, MVT::i64)); 7110 7111 if (Even && !Odd) 7112 return DAG.getNode(AArch64ISD::UZP1, dl, DAG.getVTList(VT, VT), LHS, 7113 RHS); 7114 if (Odd && !Even) 7115 return DAG.getNode(AArch64ISD::UZP2, dl, DAG.getVTList(VT, VT), LHS, 7116 RHS); 7117 } 7118 } 7119 7120 // Use DUP for non-constant splats. For f32 constant splats, reduce to 7121 // i32 and try again. 7122 if (usesOnlyOneValue) { 7123 if (!isConstant) { 7124 if (Value.getOpcode() != ISD::EXTRACT_VECTOR_ELT || 7125 Value.getValueType() != VT) { 7126 LLVM_DEBUG( 7127 dbgs() << "LowerBUILD_VECTOR: use DUP for non-constant splats\n"); 7128 return DAG.getNode(AArch64ISD::DUP, dl, VT, Value); 7129 } 7130 7131 // This is actually a DUPLANExx operation, which keeps everything vectory. 7132 7133 SDValue Lane = Value.getOperand(1); 7134 Value = Value.getOperand(0); 7135 if (Value.getValueSizeInBits() == 64) { 7136 LLVM_DEBUG( 7137 dbgs() << "LowerBUILD_VECTOR: DUPLANE works on 128-bit vectors, " 7138 "widening it\n"); 7139 Value = WidenVector(Value, DAG); 7140 } 7141 7142 unsigned Opcode = getDUPLANEOp(VT.getVectorElementType()); 7143 return DAG.getNode(Opcode, dl, VT, Value, Lane); 7144 } 7145 7146 if (VT.getVectorElementType().isFloatingPoint()) { 7147 SmallVector<SDValue, 8> Ops; 7148 EVT EltTy = VT.getVectorElementType(); 7149 assert ((EltTy == MVT::f16 || EltTy == MVT::f32 || EltTy == MVT::f64) && 7150 "Unsupported floating-point vector type"); 7151 LLVM_DEBUG( 7152 dbgs() << "LowerBUILD_VECTOR: float constant splats, creating int " 7153 "BITCASTS, and try again\n"); 7154 MVT NewType = MVT::getIntegerVT(EltTy.getSizeInBits()); 7155 for (unsigned i = 0; i < NumElts; ++i) 7156 Ops.push_back(DAG.getNode(ISD::BITCAST, dl, NewType, Op.getOperand(i))); 7157 EVT VecVT = EVT::getVectorVT(*DAG.getContext(), NewType, NumElts); 7158 SDValue Val = DAG.getBuildVector(VecVT, dl, Ops); 7159 LLVM_DEBUG(dbgs() << "LowerBUILD_VECTOR: trying to lower new vector: "; 7160 Val.dump();); 7161 Val = LowerBUILD_VECTOR(Val, DAG); 7162 if (Val.getNode()) 7163 return DAG.getNode(ISD::BITCAST, dl, VT, Val); 7164 } 7165 } 7166 7167 // If there was only one constant value used and for more than one lane, 7168 // start by splatting that value, then replace the non-constant lanes. This 7169 // is better than the default, which will perform a separate initialization 7170 // for each lane. 7171 if (NumConstantLanes > 0 && usesOnlyOneConstantValue) { 7172 // Firstly, try to materialize the splat constant. 7173 SDValue Vec = DAG.getSplatBuildVector(VT, dl, ConstantValue), 7174 Val = ConstantBuildVector(Vec, DAG); 7175 if (!Val) { 7176 // Otherwise, materialize the constant and splat it. 7177 Val = DAG.getNode(AArch64ISD::DUP, dl, VT, ConstantValue); 7178 DAG.ReplaceAllUsesWith(Vec.getNode(), &Val); 7179 } 7180 7181 // Now insert the non-constant lanes. 7182 for (unsigned i = 0; i < NumElts; ++i) { 7183 SDValue V = Op.getOperand(i); 7184 SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64); 7185 if (!isa<ConstantSDNode>(V) && !isa<ConstantFPSDNode>(V)) 7186 // Note that type legalization likely mucked about with the VT of the 7187 // source operand, so we may have to convert it here before inserting. 7188 Val = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Val, V, LaneIdx); 7189 } 7190 return Val; 7191 } 7192 7193 // This will generate a load from the constant pool. 7194 if (isConstant) { 7195 LLVM_DEBUG( 7196 dbgs() << "LowerBUILD_VECTOR: all elements are constant, use default " 7197 "expansion\n"); 7198 return SDValue(); 7199 } 7200 7201 // Empirical tests suggest this is rarely worth it for vectors of length <= 2. 7202 if (NumElts >= 4) { 7203 if (SDValue shuffle = ReconstructShuffle(Op, DAG)) 7204 return shuffle; 7205 } 7206 7207 // If all else fails, just use a sequence of INSERT_VECTOR_ELT when we 7208 // know the default expansion would otherwise fall back on something even 7209 // worse. For a vector with one or two non-undef values, that's 7210 // scalar_to_vector for the elements followed by a shuffle (provided the 7211 // shuffle is valid for the target) and materialization element by element 7212 // on the stack followed by a load for everything else. 7213 if (!isConstant && !usesOnlyOneValue) { 7214 LLVM_DEBUG( 7215 dbgs() << "LowerBUILD_VECTOR: alternatives failed, creating sequence " 7216 "of INSERT_VECTOR_ELT\n"); 7217 7218 SDValue Vec = DAG.getUNDEF(VT); 7219 SDValue Op0 = Op.getOperand(0); 7220 unsigned i = 0; 7221 7222 // Use SCALAR_TO_VECTOR for lane zero to 7223 // a) Avoid a RMW dependency on the full vector register, and 7224 // b) Allow the register coalescer to fold away the copy if the 7225 // value is already in an S or D register, and we're forced to emit an 7226 // INSERT_SUBREG that we can't fold anywhere. 7227 // 7228 // We also allow types like i8 and i16 which are illegal scalar but legal 7229 // vector element types. After type-legalization the inserted value is 7230 // extended (i32) and it is safe to cast them to the vector type by ignoring 7231 // the upper bits of the lowest lane (e.g. v8i8, v4i16). 7232 if (!Op0.isUndef()) { 7233 LLVM_DEBUG(dbgs() << "Creating node for op0, it is not undefined:\n"); 7234 Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, VT, Op0); 7235 ++i; 7236 } 7237 LLVM_DEBUG(if (i < NumElts) dbgs() 7238 << "Creating nodes for the other vector elements:\n";); 7239 for (; i < NumElts; ++i) { 7240 SDValue V = Op.getOperand(i); 7241 if (V.isUndef()) 7242 continue; 7243 SDValue LaneIdx = DAG.getConstant(i, dl, MVT::i64); 7244 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, dl, VT, Vec, V, LaneIdx); 7245 } 7246 return Vec; 7247 } 7248 7249 LLVM_DEBUG( 7250 dbgs() << "LowerBUILD_VECTOR: use default expansion, failed to find " 7251 "better alternative\n"); 7252 return SDValue(); 7253 } 7254 7255 SDValue AArch64TargetLowering::LowerINSERT_VECTOR_ELT(SDValue Op, 7256 SelectionDAG &DAG) const { 7257 assert(Op.getOpcode() == ISD::INSERT_VECTOR_ELT && "Unknown opcode!"); 7258 7259 // Check for non-constant or out of range lane. 7260 EVT VT = Op.getOperand(0).getValueType(); 7261 ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Op.getOperand(2)); 7262 if (!CI || CI->getZExtValue() >= VT.getVectorNumElements()) 7263 return SDValue(); 7264 7265 7266 // Insertion/extraction are legal for V128 types. 7267 if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 || 7268 VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 || 7269 VT == MVT::v8f16) 7270 return Op; 7271 7272 if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 && 7273 VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16) 7274 return SDValue(); 7275 7276 // For V64 types, we perform insertion by expanding the value 7277 // to a V128 type and perform the insertion on that. 7278 SDLoc DL(Op); 7279 SDValue WideVec = WidenVector(Op.getOperand(0), DAG); 7280 EVT WideTy = WideVec.getValueType(); 7281 7282 SDValue Node = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideTy, WideVec, 7283 Op.getOperand(1), Op.getOperand(2)); 7284 // Re-narrow the resultant vector. 7285 return NarrowVector(Node, DAG); 7286 } 7287 7288 SDValue 7289 AArch64TargetLowering::LowerEXTRACT_VECTOR_ELT(SDValue Op, 7290 SelectionDAG &DAG) const { 7291 assert(Op.getOpcode() == ISD::EXTRACT_VECTOR_ELT && "Unknown opcode!"); 7292 7293 // Check for non-constant or out of range lane. 7294 EVT VT = Op.getOperand(0).getValueType(); 7295 ConstantSDNode *CI = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 7296 if (!CI || CI->getZExtValue() >= VT.getVectorNumElements()) 7297 return SDValue(); 7298 7299 7300 // Insertion/extraction are legal for V128 types. 7301 if (VT == MVT::v16i8 || VT == MVT::v8i16 || VT == MVT::v4i32 || 7302 VT == MVT::v2i64 || VT == MVT::v4f32 || VT == MVT::v2f64 || 7303 VT == MVT::v8f16) 7304 return Op; 7305 7306 if (VT != MVT::v8i8 && VT != MVT::v4i16 && VT != MVT::v2i32 && 7307 VT != MVT::v1i64 && VT != MVT::v2f32 && VT != MVT::v4f16) 7308 return SDValue(); 7309 7310 // For V64 types, we perform extraction by expanding the value 7311 // to a V128 type and perform the extraction on that. 7312 SDLoc DL(Op); 7313 SDValue WideVec = WidenVector(Op.getOperand(0), DAG); 7314 EVT WideTy = WideVec.getValueType(); 7315 7316 EVT ExtrTy = WideTy.getVectorElementType(); 7317 if (ExtrTy == MVT::i16 || ExtrTy == MVT::i8) 7318 ExtrTy = MVT::i32; 7319 7320 // For extractions, we just return the result directly. 7321 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ExtrTy, WideVec, 7322 Op.getOperand(1)); 7323 } 7324 7325 SDValue AArch64TargetLowering::LowerEXTRACT_SUBVECTOR(SDValue Op, 7326 SelectionDAG &DAG) const { 7327 EVT VT = Op.getOperand(0).getValueType(); 7328 SDLoc dl(Op); 7329 // Just in case... 7330 if (!VT.isVector()) 7331 return SDValue(); 7332 7333 ConstantSDNode *Cst = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 7334 if (!Cst) 7335 return SDValue(); 7336 unsigned Val = Cst->getZExtValue(); 7337 7338 unsigned Size = Op.getValueSizeInBits(); 7339 7340 // This will get lowered to an appropriate EXTRACT_SUBREG in ISel. 7341 if (Val == 0) 7342 return Op; 7343 7344 // If this is extracting the upper 64-bits of a 128-bit vector, we match 7345 // that directly. 7346 if (Size == 64 && Val * VT.getScalarSizeInBits() == 64) 7347 return Op; 7348 7349 return SDValue(); 7350 } 7351 7352 bool AArch64TargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const { 7353 if (VT.getVectorNumElements() == 4 && 7354 (VT.is128BitVector() || VT.is64BitVector())) { 7355 unsigned PFIndexes[4]; 7356 for (unsigned i = 0; i != 4; ++i) { 7357 if (M[i] < 0) 7358 PFIndexes[i] = 8; 7359 else 7360 PFIndexes[i] = M[i]; 7361 } 7362 7363 // Compute the index in the perfect shuffle table. 7364 unsigned PFTableIndex = PFIndexes[0] * 9 * 9 * 9 + PFIndexes[1] * 9 * 9 + 7365 PFIndexes[2] * 9 + PFIndexes[3]; 7366 unsigned PFEntry = PerfectShuffleTable[PFTableIndex]; 7367 unsigned Cost = (PFEntry >> 30); 7368 7369 if (Cost <= 4) 7370 return true; 7371 } 7372 7373 bool DummyBool; 7374 int DummyInt; 7375 unsigned DummyUnsigned; 7376 7377 return (ShuffleVectorSDNode::isSplatMask(&M[0], VT) || isREVMask(M, VT, 64) || 7378 isREVMask(M, VT, 32) || isREVMask(M, VT, 16) || 7379 isEXTMask(M, VT, DummyBool, DummyUnsigned) || 7380 // isTBLMask(M, VT) || // FIXME: Port TBL support from ARM. 7381 isTRNMask(M, VT, DummyUnsigned) || isUZPMask(M, VT, DummyUnsigned) || 7382 isZIPMask(M, VT, DummyUnsigned) || 7383 isTRN_v_undef_Mask(M, VT, DummyUnsigned) || 7384 isUZP_v_undef_Mask(M, VT, DummyUnsigned) || 7385 isZIP_v_undef_Mask(M, VT, DummyUnsigned) || 7386 isINSMask(M, VT.getVectorNumElements(), DummyBool, DummyInt) || 7387 isConcatMask(M, VT, VT.getSizeInBits() == 128)); 7388 } 7389 7390 /// getVShiftImm - Check if this is a valid build_vector for the immediate 7391 /// operand of a vector shift operation, where all the elements of the 7392 /// build_vector must have the same constant integer value. 7393 static bool getVShiftImm(SDValue Op, unsigned ElementBits, int64_t &Cnt) { 7394 // Ignore bit_converts. 7395 while (Op.getOpcode() == ISD::BITCAST) 7396 Op = Op.getOperand(0); 7397 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(Op.getNode()); 7398 APInt SplatBits, SplatUndef; 7399 unsigned SplatBitSize; 7400 bool HasAnyUndefs; 7401 if (!BVN || !BVN->isConstantSplat(SplatBits, SplatUndef, SplatBitSize, 7402 HasAnyUndefs, ElementBits) || 7403 SplatBitSize > ElementBits) 7404 return false; 7405 Cnt = SplatBits.getSExtValue(); 7406 return true; 7407 } 7408 7409 /// isVShiftLImm - Check if this is a valid build_vector for the immediate 7410 /// operand of a vector shift left operation. That value must be in the range: 7411 /// 0 <= Value < ElementBits for a left shift; or 7412 /// 0 <= Value <= ElementBits for a long left shift. 7413 static bool isVShiftLImm(SDValue Op, EVT VT, bool isLong, int64_t &Cnt) { 7414 assert(VT.isVector() && "vector shift count is not a vector type"); 7415 int64_t ElementBits = VT.getScalarSizeInBits(); 7416 if (!getVShiftImm(Op, ElementBits, Cnt)) 7417 return false; 7418 return (Cnt >= 0 && (isLong ? Cnt - 1 : Cnt) < ElementBits); 7419 } 7420 7421 /// isVShiftRImm - Check if this is a valid build_vector for the immediate 7422 /// operand of a vector shift right operation. The value must be in the range: 7423 /// 1 <= Value <= ElementBits for a right shift; or 7424 static bool isVShiftRImm(SDValue Op, EVT VT, bool isNarrow, int64_t &Cnt) { 7425 assert(VT.isVector() && "vector shift count is not a vector type"); 7426 int64_t ElementBits = VT.getScalarSizeInBits(); 7427 if (!getVShiftImm(Op, ElementBits, Cnt)) 7428 return false; 7429 return (Cnt >= 1 && Cnt <= (isNarrow ? ElementBits / 2 : ElementBits)); 7430 } 7431 7432 SDValue AArch64TargetLowering::LowerVectorSRA_SRL_SHL(SDValue Op, 7433 SelectionDAG &DAG) const { 7434 EVT VT = Op.getValueType(); 7435 SDLoc DL(Op); 7436 int64_t Cnt; 7437 7438 if (!Op.getOperand(1).getValueType().isVector()) 7439 return Op; 7440 unsigned EltSize = VT.getScalarSizeInBits(); 7441 7442 switch (Op.getOpcode()) { 7443 default: 7444 llvm_unreachable("unexpected shift opcode"); 7445 7446 case ISD::SHL: 7447 if (isVShiftLImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize) 7448 return DAG.getNode(AArch64ISD::VSHL, DL, VT, Op.getOperand(0), 7449 DAG.getConstant(Cnt, DL, MVT::i32)); 7450 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 7451 DAG.getConstant(Intrinsic::aarch64_neon_ushl, DL, 7452 MVT::i32), 7453 Op.getOperand(0), Op.getOperand(1)); 7454 case ISD::SRA: 7455 case ISD::SRL: 7456 // Right shift immediate 7457 if (isVShiftRImm(Op.getOperand(1), VT, false, Cnt) && Cnt < EltSize) { 7458 unsigned Opc = 7459 (Op.getOpcode() == ISD::SRA) ? AArch64ISD::VASHR : AArch64ISD::VLSHR; 7460 return DAG.getNode(Opc, DL, VT, Op.getOperand(0), 7461 DAG.getConstant(Cnt, DL, MVT::i32)); 7462 } 7463 7464 // Right shift register. Note, there is not a shift right register 7465 // instruction, but the shift left register instruction takes a signed 7466 // value, where negative numbers specify a right shift. 7467 unsigned Opc = (Op.getOpcode() == ISD::SRA) ? Intrinsic::aarch64_neon_sshl 7468 : Intrinsic::aarch64_neon_ushl; 7469 // negate the shift amount 7470 SDValue NegShift = DAG.getNode(AArch64ISD::NEG, DL, VT, Op.getOperand(1)); 7471 SDValue NegShiftLeft = 7472 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VT, 7473 DAG.getConstant(Opc, DL, MVT::i32), Op.getOperand(0), 7474 NegShift); 7475 return NegShiftLeft; 7476 } 7477 7478 return SDValue(); 7479 } 7480 7481 static SDValue EmitVectorComparison(SDValue LHS, SDValue RHS, 7482 AArch64CC::CondCode CC, bool NoNans, EVT VT, 7483 const SDLoc &dl, SelectionDAG &DAG) { 7484 EVT SrcVT = LHS.getValueType(); 7485 assert(VT.getSizeInBits() == SrcVT.getSizeInBits() && 7486 "function only supposed to emit natural comparisons"); 7487 7488 BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(RHS.getNode()); 7489 APInt CnstBits(VT.getSizeInBits(), 0); 7490 APInt UndefBits(VT.getSizeInBits(), 0); 7491 bool IsCnst = BVN && resolveBuildVector(BVN, CnstBits, UndefBits); 7492 bool IsZero = IsCnst && (CnstBits == 0); 7493 7494 if (SrcVT.getVectorElementType().isFloatingPoint()) { 7495 switch (CC) { 7496 default: 7497 return SDValue(); 7498 case AArch64CC::NE: { 7499 SDValue Fcmeq; 7500 if (IsZero) 7501 Fcmeq = DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS); 7502 else 7503 Fcmeq = DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS); 7504 return DAG.getNode(AArch64ISD::NOT, dl, VT, Fcmeq); 7505 } 7506 case AArch64CC::EQ: 7507 if (IsZero) 7508 return DAG.getNode(AArch64ISD::FCMEQz, dl, VT, LHS); 7509 return DAG.getNode(AArch64ISD::FCMEQ, dl, VT, LHS, RHS); 7510 case AArch64CC::GE: 7511 if (IsZero) 7512 return DAG.getNode(AArch64ISD::FCMGEz, dl, VT, LHS); 7513 return DAG.getNode(AArch64ISD::FCMGE, dl, VT, LHS, RHS); 7514 case AArch64CC::GT: 7515 if (IsZero) 7516 return DAG.getNode(AArch64ISD::FCMGTz, dl, VT, LHS); 7517 return DAG.getNode(AArch64ISD::FCMGT, dl, VT, LHS, RHS); 7518 case AArch64CC::LS: 7519 if (IsZero) 7520 return DAG.getNode(AArch64ISD::FCMLEz, dl, VT, LHS); 7521 return DAG.getNode(AArch64ISD::FCMGE, dl, VT, RHS, LHS); 7522 case AArch64CC::LT: 7523 if (!NoNans) 7524 return SDValue(); 7525 // If we ignore NaNs then we can use to the MI implementation. 7526 LLVM_FALLTHROUGH; 7527 case AArch64CC::MI: 7528 if (IsZero) 7529 return DAG.getNode(AArch64ISD::FCMLTz, dl, VT, LHS); 7530 return DAG.getNode(AArch64ISD::FCMGT, dl, VT, RHS, LHS); 7531 } 7532 } 7533 7534 switch (CC) { 7535 default: 7536 return SDValue(); 7537 case AArch64CC::NE: { 7538 SDValue Cmeq; 7539 if (IsZero) 7540 Cmeq = DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS); 7541 else 7542 Cmeq = DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS); 7543 return DAG.getNode(AArch64ISD::NOT, dl, VT, Cmeq); 7544 } 7545 case AArch64CC::EQ: 7546 if (IsZero) 7547 return DAG.getNode(AArch64ISD::CMEQz, dl, VT, LHS); 7548 return DAG.getNode(AArch64ISD::CMEQ, dl, VT, LHS, RHS); 7549 case AArch64CC::GE: 7550 if (IsZero) 7551 return DAG.getNode(AArch64ISD::CMGEz, dl, VT, LHS); 7552 return DAG.getNode(AArch64ISD::CMGE, dl, VT, LHS, RHS); 7553 case AArch64CC::GT: 7554 if (IsZero) 7555 return DAG.getNode(AArch64ISD::CMGTz, dl, VT, LHS); 7556 return DAG.getNode(AArch64ISD::CMGT, dl, VT, LHS, RHS); 7557 case AArch64CC::LE: 7558 if (IsZero) 7559 return DAG.getNode(AArch64ISD::CMLEz, dl, VT, LHS); 7560 return DAG.getNode(AArch64ISD::CMGE, dl, VT, RHS, LHS); 7561 case AArch64CC::LS: 7562 return DAG.getNode(AArch64ISD::CMHS, dl, VT, RHS, LHS); 7563 case AArch64CC::LO: 7564 return DAG.getNode(AArch64ISD::CMHI, dl, VT, RHS, LHS); 7565 case AArch64CC::LT: 7566 if (IsZero) 7567 return DAG.getNode(AArch64ISD::CMLTz, dl, VT, LHS); 7568 return DAG.getNode(AArch64ISD::CMGT, dl, VT, RHS, LHS); 7569 case AArch64CC::HI: 7570 return DAG.getNode(AArch64ISD::CMHI, dl, VT, LHS, RHS); 7571 case AArch64CC::HS: 7572 return DAG.getNode(AArch64ISD::CMHS, dl, VT, LHS, RHS); 7573 } 7574 } 7575 7576 SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op, 7577 SelectionDAG &DAG) const { 7578 ISD::CondCode CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 7579 SDValue LHS = Op.getOperand(0); 7580 SDValue RHS = Op.getOperand(1); 7581 EVT CmpVT = LHS.getValueType().changeVectorElementTypeToInteger(); 7582 SDLoc dl(Op); 7583 7584 if (LHS.getValueType().getVectorElementType().isInteger()) { 7585 assert(LHS.getValueType() == RHS.getValueType()); 7586 AArch64CC::CondCode AArch64CC = changeIntCCToAArch64CC(CC); 7587 SDValue Cmp = 7588 EmitVectorComparison(LHS, RHS, AArch64CC, false, CmpVT, dl, DAG); 7589 return DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType()); 7590 } 7591 7592 const bool FullFP16 = 7593 static_cast<const AArch64Subtarget &>(DAG.getSubtarget()).hasFullFP16(); 7594 7595 // Make v4f16 (only) fcmp operations utilise vector instructions 7596 // v8f16 support will be a litle more complicated 7597 if (LHS.getValueType().getVectorElementType() == MVT::f16) { 7598 if (!FullFP16 && LHS.getValueType().getVectorNumElements() == 4) { 7599 LHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, LHS); 7600 RHS = DAG.getNode(ISD::FP_EXTEND, dl, MVT::v4f32, RHS); 7601 SDValue NewSetcc = DAG.getSetCC(dl, MVT::v4i16, LHS, RHS, CC); 7602 DAG.ReplaceAllUsesWith(Op, NewSetcc); 7603 CmpVT = MVT::v4i32; 7604 } else 7605 return SDValue(); 7606 } 7607 7608 assert(LHS.getValueType().getVectorElementType() == MVT::f32 || 7609 LHS.getValueType().getVectorElementType() == MVT::f64); 7610 7611 // Unfortunately, the mapping of LLVM FP CC's onto AArch64 CC's isn't totally 7612 // clean. Some of them require two branches to implement. 7613 AArch64CC::CondCode CC1, CC2; 7614 bool ShouldInvert; 7615 changeVectorFPCCToAArch64CC(CC, CC1, CC2, ShouldInvert); 7616 7617 bool NoNaNs = getTargetMachine().Options.NoNaNsFPMath; 7618 SDValue Cmp = 7619 EmitVectorComparison(LHS, RHS, CC1, NoNaNs, CmpVT, dl, DAG); 7620 if (!Cmp.getNode()) 7621 return SDValue(); 7622 7623 if (CC2 != AArch64CC::AL) { 7624 SDValue Cmp2 = 7625 EmitVectorComparison(LHS, RHS, CC2, NoNaNs, CmpVT, dl, DAG); 7626 if (!Cmp2.getNode()) 7627 return SDValue(); 7628 7629 Cmp = DAG.getNode(ISD::OR, dl, CmpVT, Cmp, Cmp2); 7630 } 7631 7632 Cmp = DAG.getSExtOrTrunc(Cmp, dl, Op.getValueType()); 7633 7634 if (ShouldInvert) 7635 return Cmp = DAG.getNOT(dl, Cmp, Cmp.getValueType()); 7636 7637 return Cmp; 7638 } 7639 7640 static SDValue getReductionSDNode(unsigned Op, SDLoc DL, SDValue ScalarOp, 7641 SelectionDAG &DAG) { 7642 SDValue VecOp = ScalarOp.getOperand(0); 7643 auto Rdx = DAG.getNode(Op, DL, VecOp.getSimpleValueType(), VecOp); 7644 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ScalarOp.getValueType(), Rdx, 7645 DAG.getConstant(0, DL, MVT::i64)); 7646 } 7647 7648 SDValue AArch64TargetLowering::LowerVECREDUCE(SDValue Op, 7649 SelectionDAG &DAG) const { 7650 SDLoc dl(Op); 7651 switch (Op.getOpcode()) { 7652 case ISD::VECREDUCE_ADD: 7653 return getReductionSDNode(AArch64ISD::UADDV, dl, Op, DAG); 7654 case ISD::VECREDUCE_SMAX: 7655 return getReductionSDNode(AArch64ISD::SMAXV, dl, Op, DAG); 7656 case ISD::VECREDUCE_SMIN: 7657 return getReductionSDNode(AArch64ISD::SMINV, dl, Op, DAG); 7658 case ISD::VECREDUCE_UMAX: 7659 return getReductionSDNode(AArch64ISD::UMAXV, dl, Op, DAG); 7660 case ISD::VECREDUCE_UMIN: 7661 return getReductionSDNode(AArch64ISD::UMINV, dl, Op, DAG); 7662 case ISD::VECREDUCE_FMAX: { 7663 assert(Op->getFlags().hasNoNaNs() && "fmax vector reduction needs NoNaN flag"); 7664 return DAG.getNode( 7665 ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(), 7666 DAG.getConstant(Intrinsic::aarch64_neon_fmaxnmv, dl, MVT::i32), 7667 Op.getOperand(0)); 7668 } 7669 case ISD::VECREDUCE_FMIN: { 7670 assert(Op->getFlags().hasNoNaNs() && "fmin vector reduction needs NoNaN flag"); 7671 return DAG.getNode( 7672 ISD::INTRINSIC_WO_CHAIN, dl, Op.getValueType(), 7673 DAG.getConstant(Intrinsic::aarch64_neon_fminnmv, dl, MVT::i32), 7674 Op.getOperand(0)); 7675 } 7676 default: 7677 llvm_unreachable("Unhandled reduction"); 7678 } 7679 } 7680 7681 SDValue AArch64TargetLowering::LowerATOMIC_LOAD_SUB(SDValue Op, 7682 SelectionDAG &DAG) const { 7683 auto &Subtarget = static_cast<const AArch64Subtarget &>(DAG.getSubtarget()); 7684 if (!Subtarget.hasLSE()) 7685 return SDValue(); 7686 7687 // LSE has an atomic load-add instruction, but not a load-sub. 7688 SDLoc dl(Op); 7689 MVT VT = Op.getSimpleValueType(); 7690 SDValue RHS = Op.getOperand(2); 7691 AtomicSDNode *AN = cast<AtomicSDNode>(Op.getNode()); 7692 RHS = DAG.getNode(ISD::SUB, dl, VT, DAG.getConstant(0, dl, VT), RHS); 7693 return DAG.getAtomic(ISD::ATOMIC_LOAD_ADD, dl, AN->getMemoryVT(), 7694 Op.getOperand(0), Op.getOperand(1), RHS, 7695 AN->getMemOperand()); 7696 } 7697 7698 SDValue AArch64TargetLowering::LowerATOMIC_LOAD_AND(SDValue Op, 7699 SelectionDAG &DAG) const { 7700 auto &Subtarget = static_cast<const AArch64Subtarget &>(DAG.getSubtarget()); 7701 if (!Subtarget.hasLSE()) 7702 return SDValue(); 7703 7704 // LSE has an atomic load-clear instruction, but not a load-and. 7705 SDLoc dl(Op); 7706 MVT VT = Op.getSimpleValueType(); 7707 SDValue RHS = Op.getOperand(2); 7708 AtomicSDNode *AN = cast<AtomicSDNode>(Op.getNode()); 7709 RHS = DAG.getNode(ISD::XOR, dl, VT, DAG.getConstant(-1ULL, dl, VT), RHS); 7710 return DAG.getAtomic(ISD::ATOMIC_LOAD_CLR, dl, AN->getMemoryVT(), 7711 Op.getOperand(0), Op.getOperand(1), RHS, 7712 AN->getMemOperand()); 7713 } 7714 7715 SDValue AArch64TargetLowering::LowerWindowsDYNAMIC_STACKALLOC( 7716 SDValue Op, SDValue Chain, SDValue &Size, SelectionDAG &DAG) const { 7717 SDLoc dl(Op); 7718 EVT PtrVT = getPointerTy(DAG.getDataLayout()); 7719 SDValue Callee = DAG.getTargetExternalSymbol("__chkstk", PtrVT, 0); 7720 7721 const uint32_t *Mask = 7722 Subtarget->getRegisterInfo()->getWindowsStackProbePreservedMask(); 7723 7724 Size = DAG.getNode(ISD::SRL, dl, MVT::i64, Size, 7725 DAG.getConstant(4, dl, MVT::i64)); 7726 Chain = DAG.getCopyToReg(Chain, dl, AArch64::X15, Size, SDValue()); 7727 Chain = 7728 DAG.getNode(AArch64ISD::CALL, dl, DAG.getVTList(MVT::Other, MVT::Glue), 7729 Chain, Callee, DAG.getRegister(AArch64::X15, MVT::i64), 7730 DAG.getRegisterMask(Mask), Chain.getValue(1)); 7731 // To match the actual intent better, we should read the output from X15 here 7732 // again (instead of potentially spilling it to the stack), but rereading Size 7733 // from X15 here doesn't work at -O0, since it thinks that X15 is undefined 7734 // here. 7735 7736 Size = DAG.getNode(ISD::SHL, dl, MVT::i64, Size, 7737 DAG.getConstant(4, dl, MVT::i64)); 7738 return Chain; 7739 } 7740 7741 SDValue 7742 AArch64TargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op, 7743 SelectionDAG &DAG) const { 7744 assert(Subtarget->isTargetWindows() && 7745 "Only Windows alloca probing supported"); 7746 SDLoc dl(Op); 7747 // Get the inputs. 7748 SDNode *Node = Op.getNode(); 7749 SDValue Chain = Op.getOperand(0); 7750 SDValue Size = Op.getOperand(1); 7751 unsigned Align = cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue(); 7752 EVT VT = Node->getValueType(0); 7753 7754 if (DAG.getMachineFunction().getFunction().hasFnAttribute( 7755 "no-stack-arg-probe")) { 7756 SDValue SP = DAG.getCopyFromReg(Chain, dl, AArch64::SP, MVT::i64); 7757 Chain = SP.getValue(1); 7758 SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size); 7759 if (Align) 7760 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), 7761 DAG.getConstant(-(uint64_t)Align, dl, VT)); 7762 Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP); 7763 SDValue Ops[2] = {SP, Chain}; 7764 return DAG.getMergeValues(Ops, dl); 7765 } 7766 7767 Chain = DAG.getCALLSEQ_START(Chain, 0, 0, dl); 7768 7769 Chain = LowerWindowsDYNAMIC_STACKALLOC(Op, Chain, Size, DAG); 7770 7771 SDValue SP = DAG.getCopyFromReg(Chain, dl, AArch64::SP, MVT::i64); 7772 Chain = SP.getValue(1); 7773 SP = DAG.getNode(ISD::SUB, dl, MVT::i64, SP, Size); 7774 if (Align) 7775 SP = DAG.getNode(ISD::AND, dl, VT, SP.getValue(0), 7776 DAG.getConstant(-(uint64_t)Align, dl, VT)); 7777 Chain = DAG.getCopyToReg(Chain, dl, AArch64::SP, SP); 7778 7779 Chain = DAG.getCALLSEQ_END(Chain, DAG.getIntPtrConstant(0, dl, true), 7780 DAG.getIntPtrConstant(0, dl, true), SDValue(), dl); 7781 7782 SDValue Ops[2] = {SP, Chain}; 7783 return DAG.getMergeValues(Ops, dl); 7784 } 7785 7786 /// getTgtMemIntrinsic - Represent NEON load and store intrinsics as 7787 /// MemIntrinsicNodes. The associated MachineMemOperands record the alignment 7788 /// specified in the intrinsic calls. 7789 bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, 7790 const CallInst &I, 7791 MachineFunction &MF, 7792 unsigned Intrinsic) const { 7793 auto &DL = I.getModule()->getDataLayout(); 7794 switch (Intrinsic) { 7795 case Intrinsic::aarch64_neon_ld2: 7796 case Intrinsic::aarch64_neon_ld3: 7797 case Intrinsic::aarch64_neon_ld4: 7798 case Intrinsic::aarch64_neon_ld1x2: 7799 case Intrinsic::aarch64_neon_ld1x3: 7800 case Intrinsic::aarch64_neon_ld1x4: 7801 case Intrinsic::aarch64_neon_ld2lane: 7802 case Intrinsic::aarch64_neon_ld3lane: 7803 case Intrinsic::aarch64_neon_ld4lane: 7804 case Intrinsic::aarch64_neon_ld2r: 7805 case Intrinsic::aarch64_neon_ld3r: 7806 case Intrinsic::aarch64_neon_ld4r: { 7807 Info.opc = ISD::INTRINSIC_W_CHAIN; 7808 // Conservatively set memVT to the entire set of vectors loaded. 7809 uint64_t NumElts = DL.getTypeSizeInBits(I.getType()) / 64; 7810 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 7811 Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1); 7812 Info.offset = 0; 7813 Info.align = 0; 7814 // volatile loads with NEON intrinsics not supported 7815 Info.flags = MachineMemOperand::MOLoad; 7816 return true; 7817 } 7818 case Intrinsic::aarch64_neon_st2: 7819 case Intrinsic::aarch64_neon_st3: 7820 case Intrinsic::aarch64_neon_st4: 7821 case Intrinsic::aarch64_neon_st1x2: 7822 case Intrinsic::aarch64_neon_st1x3: 7823 case Intrinsic::aarch64_neon_st1x4: 7824 case Intrinsic::aarch64_neon_st2lane: 7825 case Intrinsic::aarch64_neon_st3lane: 7826 case Intrinsic::aarch64_neon_st4lane: { 7827 Info.opc = ISD::INTRINSIC_VOID; 7828 // Conservatively set memVT to the entire set of vectors stored. 7829 unsigned NumElts = 0; 7830 for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { 7831 Type *ArgTy = I.getArgOperand(ArgI)->getType(); 7832 if (!ArgTy->isVectorTy()) 7833 break; 7834 NumElts += DL.getTypeSizeInBits(ArgTy) / 64; 7835 } 7836 Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); 7837 Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1); 7838 Info.offset = 0; 7839 Info.align = 0; 7840 // volatile stores with NEON intrinsics not supported 7841 Info.flags = MachineMemOperand::MOStore; 7842 return true; 7843 } 7844 case Intrinsic::aarch64_ldaxr: 7845 case Intrinsic::aarch64_ldxr: { 7846 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType()); 7847 Info.opc = ISD::INTRINSIC_W_CHAIN; 7848 Info.memVT = MVT::getVT(PtrTy->getElementType()); 7849 Info.ptrVal = I.getArgOperand(0); 7850 Info.offset = 0; 7851 Info.align = DL.getABITypeAlignment(PtrTy->getElementType()); 7852 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; 7853 return true; 7854 } 7855 case Intrinsic::aarch64_stlxr: 7856 case Intrinsic::aarch64_stxr: { 7857 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType()); 7858 Info.opc = ISD::INTRINSIC_W_CHAIN; 7859 Info.memVT = MVT::getVT(PtrTy->getElementType()); 7860 Info.ptrVal = I.getArgOperand(1); 7861 Info.offset = 0; 7862 Info.align = DL.getABITypeAlignment(PtrTy->getElementType()); 7863 Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; 7864 return true; 7865 } 7866 case Intrinsic::aarch64_ldaxp: 7867 case Intrinsic::aarch64_ldxp: 7868 Info.opc = ISD::INTRINSIC_W_CHAIN; 7869 Info.memVT = MVT::i128; 7870 Info.ptrVal = I.getArgOperand(0); 7871 Info.offset = 0; 7872 Info.align = 16; 7873 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile; 7874 return true; 7875 case Intrinsic::aarch64_stlxp: 7876 case Intrinsic::aarch64_stxp: 7877 Info.opc = ISD::INTRINSIC_W_CHAIN; 7878 Info.memVT = MVT::i128; 7879 Info.ptrVal = I.getArgOperand(2); 7880 Info.offset = 0; 7881 Info.align = 16; 7882 Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile; 7883 return true; 7884 default: 7885 break; 7886 } 7887 7888 return false; 7889 } 7890 7891 bool AArch64TargetLowering::shouldReduceLoadWidth(SDNode *Load, 7892 ISD::LoadExtType ExtTy, 7893 EVT NewVT) const { 7894 // If we're reducing the load width in order to avoid having to use an extra 7895 // instruction to do extension then it's probably a good idea. 7896 if (ExtTy != ISD::NON_EXTLOAD) 7897 return true; 7898 // Don't reduce load width if it would prevent us from combining a shift into 7899 // the offset. 7900 MemSDNode *Mem = dyn_cast<MemSDNode>(Load); 7901 assert(Mem); 7902 const SDValue &Base = Mem->getBasePtr(); 7903 if (Base.getOpcode() == ISD::ADD && 7904 Base.getOperand(1).getOpcode() == ISD::SHL && 7905 Base.getOperand(1).hasOneUse() && 7906 Base.getOperand(1).getOperand(1).getOpcode() == ISD::Constant) { 7907 // The shift can be combined if it matches the size of the value being 7908 // loaded (and so reducing the width would make it not match). 7909 uint64_t ShiftAmount = Base.getOperand(1).getConstantOperandVal(1); 7910 uint64_t LoadBytes = Mem->getMemoryVT().getSizeInBits()/8; 7911 if (ShiftAmount == Log2_32(LoadBytes)) 7912 return false; 7913 } 7914 // We have no reason to disallow reducing the load width, so allow it. 7915 return true; 7916 } 7917 7918 // Truncations from 64-bit GPR to 32-bit GPR is free. 7919 bool AArch64TargetLowering::isTruncateFree(Type *Ty1, Type *Ty2) const { 7920 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 7921 return false; 7922 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 7923 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 7924 return NumBits1 > NumBits2; 7925 } 7926 bool AArch64TargetLowering::isTruncateFree(EVT VT1, EVT VT2) const { 7927 if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger()) 7928 return false; 7929 unsigned NumBits1 = VT1.getSizeInBits(); 7930 unsigned NumBits2 = VT2.getSizeInBits(); 7931 return NumBits1 > NumBits2; 7932 } 7933 7934 /// Check if it is profitable to hoist instruction in then/else to if. 7935 /// Not profitable if I and it's user can form a FMA instruction 7936 /// because we prefer FMSUB/FMADD. 7937 bool AArch64TargetLowering::isProfitableToHoist(Instruction *I) const { 7938 if (I->getOpcode() != Instruction::FMul) 7939 return true; 7940 7941 if (!I->hasOneUse()) 7942 return true; 7943 7944 Instruction *User = I->user_back(); 7945 7946 if (User && 7947 !(User->getOpcode() == Instruction::FSub || 7948 User->getOpcode() == Instruction::FAdd)) 7949 return true; 7950 7951 const TargetOptions &Options = getTargetMachine().Options; 7952 const DataLayout &DL = I->getModule()->getDataLayout(); 7953 EVT VT = getValueType(DL, User->getOperand(0)->getType()); 7954 7955 return !(isFMAFasterThanFMulAndFAdd(VT) && 7956 isOperationLegalOrCustom(ISD::FMA, VT) && 7957 (Options.AllowFPOpFusion == FPOpFusion::Fast || 7958 Options.UnsafeFPMath)); 7959 } 7960 7961 // All 32-bit GPR operations implicitly zero the high-half of the corresponding 7962 // 64-bit GPR. 7963 bool AArch64TargetLowering::isZExtFree(Type *Ty1, Type *Ty2) const { 7964 if (!Ty1->isIntegerTy() || !Ty2->isIntegerTy()) 7965 return false; 7966 unsigned NumBits1 = Ty1->getPrimitiveSizeInBits(); 7967 unsigned NumBits2 = Ty2->getPrimitiveSizeInBits(); 7968 return NumBits1 == 32 && NumBits2 == 64; 7969 } 7970 bool AArch64TargetLowering::isZExtFree(EVT VT1, EVT VT2) const { 7971 if (VT1.isVector() || VT2.isVector() || !VT1.isInteger() || !VT2.isInteger()) 7972 return false; 7973 unsigned NumBits1 = VT1.getSizeInBits(); 7974 unsigned NumBits2 = VT2.getSizeInBits(); 7975 return NumBits1 == 32 && NumBits2 == 64; 7976 } 7977 7978 bool AArch64TargetLowering::isZExtFree(SDValue Val, EVT VT2) const { 7979 EVT VT1 = Val.getValueType(); 7980 if (isZExtFree(VT1, VT2)) { 7981 return true; 7982 } 7983 7984 if (Val.getOpcode() != ISD::LOAD) 7985 return false; 7986 7987 // 8-, 16-, and 32-bit integer loads all implicitly zero-extend. 7988 return (VT1.isSimple() && !VT1.isVector() && VT1.isInteger() && 7989 VT2.isSimple() && !VT2.isVector() && VT2.isInteger() && 7990 VT1.getSizeInBits() <= 32); 7991 } 7992 7993 bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const { 7994 if (isa<FPExtInst>(Ext)) 7995 return false; 7996 7997 // Vector types are not free. 7998 if (Ext->getType()->isVectorTy()) 7999 return false; 8000 8001 for (const Use &U : Ext->uses()) { 8002 // The extension is free if we can fold it with a left shift in an 8003 // addressing mode or an arithmetic operation: add, sub, and cmp. 8004 8005 // Is there a shift? 8006 const Instruction *Instr = cast<Instruction>(U.getUser()); 8007 8008 // Is this a constant shift? 8009 switch (Instr->getOpcode()) { 8010 case Instruction::Shl: 8011 if (!isa<ConstantInt>(Instr->getOperand(1))) 8012 return false; 8013 break; 8014 case Instruction::GetElementPtr: { 8015 gep_type_iterator GTI = gep_type_begin(Instr); 8016 auto &DL = Ext->getModule()->getDataLayout(); 8017 std::advance(GTI, U.getOperandNo()-1); 8018 Type *IdxTy = GTI.getIndexedType(); 8019 // This extension will end up with a shift because of the scaling factor. 8020 // 8-bit sized types have a scaling factor of 1, thus a shift amount of 0. 8021 // Get the shift amount based on the scaling factor: 8022 // log2(sizeof(IdxTy)) - log2(8). 8023 uint64_t ShiftAmt = 8024 countTrailingZeros(DL.getTypeStoreSizeInBits(IdxTy)) - 3; 8025 // Is the constant foldable in the shift of the addressing mode? 8026 // I.e., shift amount is between 1 and 4 inclusive. 8027 if (ShiftAmt == 0 || ShiftAmt > 4) 8028 return false; 8029 break; 8030 } 8031 case Instruction::Trunc: 8032 // Check if this is a noop. 8033 // trunc(sext ty1 to ty2) to ty1. 8034 if (Instr->getType() == Ext->getOperand(0)->getType()) 8035 continue; 8036 LLVM_FALLTHROUGH; 8037 default: 8038 return false; 8039 } 8040 8041 // At this point we can use the bfm family, so this extension is free 8042 // for that use. 8043 } 8044 return true; 8045 } 8046 8047 bool AArch64TargetLowering::hasPairedLoad(EVT LoadedType, 8048 unsigned &RequiredAligment) const { 8049 if (!LoadedType.isSimple() || 8050 (!LoadedType.isInteger() && !LoadedType.isFloatingPoint())) 8051 return false; 8052 // Cyclone supports unaligned accesses. 8053 RequiredAligment = 0; 8054 unsigned NumBits = LoadedType.getSizeInBits(); 8055 return NumBits == 32 || NumBits == 64; 8056 } 8057 8058 /// A helper function for determining the number of interleaved accesses we 8059 /// will generate when lowering accesses of the given type. 8060 unsigned 8061 AArch64TargetLowering::getNumInterleavedAccesses(VectorType *VecTy, 8062 const DataLayout &DL) const { 8063 return (DL.getTypeSizeInBits(VecTy) + 127) / 128; 8064 } 8065 8066 MachineMemOperand::Flags 8067 AArch64TargetLowering::getMMOFlags(const Instruction &I) const { 8068 if (Subtarget->getProcFamily() == AArch64Subtarget::Falkor && 8069 I.getMetadata(FALKOR_STRIDED_ACCESS_MD) != nullptr) 8070 return MOStridedAccess; 8071 return MachineMemOperand::MONone; 8072 } 8073 8074 bool AArch64TargetLowering::isLegalInterleavedAccessType( 8075 VectorType *VecTy, const DataLayout &DL) const { 8076 8077 unsigned VecSize = DL.getTypeSizeInBits(VecTy); 8078 unsigned ElSize = DL.getTypeSizeInBits(VecTy->getElementType()); 8079 8080 // Ensure the number of vector elements is greater than 1. 8081 if (VecTy->getNumElements() < 2) 8082 return false; 8083 8084 // Ensure the element type is legal. 8085 if (ElSize != 8 && ElSize != 16 && ElSize != 32 && ElSize != 64) 8086 return false; 8087 8088 // Ensure the total vector size is 64 or a multiple of 128. Types larger than 8089 // 128 will be split into multiple interleaved accesses. 8090 return VecSize == 64 || VecSize % 128 == 0; 8091 } 8092 8093 /// Lower an interleaved load into a ldN intrinsic. 8094 /// 8095 /// E.g. Lower an interleaved load (Factor = 2): 8096 /// %wide.vec = load <8 x i32>, <8 x i32>* %ptr 8097 /// %v0 = shuffle %wide.vec, undef, <0, 2, 4, 6> ; Extract even elements 8098 /// %v1 = shuffle %wide.vec, undef, <1, 3, 5, 7> ; Extract odd elements 8099 /// 8100 /// Into: 8101 /// %ld2 = { <4 x i32>, <4 x i32> } call llvm.aarch64.neon.ld2(%ptr) 8102 /// %vec0 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 0 8103 /// %vec1 = extractelement { <4 x i32>, <4 x i32> } %ld2, i32 1 8104 bool AArch64TargetLowering::lowerInterleavedLoad( 8105 LoadInst *LI, ArrayRef<ShuffleVectorInst *> Shuffles, 8106 ArrayRef<unsigned> Indices, unsigned Factor) const { 8107 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && 8108 "Invalid interleave factor"); 8109 assert(!Shuffles.empty() && "Empty shufflevector input"); 8110 assert(Shuffles.size() == Indices.size() && 8111 "Unmatched number of shufflevectors and indices"); 8112 8113 const DataLayout &DL = LI->getModule()->getDataLayout(); 8114 8115 VectorType *VecTy = Shuffles[0]->getType(); 8116 8117 // Skip if we do not have NEON and skip illegal vector types. We can 8118 // "legalize" wide vector types into multiple interleaved accesses as long as 8119 // the vector types are divisible by 128. 8120 if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(VecTy, DL)) 8121 return false; 8122 8123 unsigned NumLoads = getNumInterleavedAccesses(VecTy, DL); 8124 8125 // A pointer vector can not be the return type of the ldN intrinsics. Need to 8126 // load integer vectors first and then convert to pointer vectors. 8127 Type *EltTy = VecTy->getVectorElementType(); 8128 if (EltTy->isPointerTy()) 8129 VecTy = 8130 VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements()); 8131 8132 IRBuilder<> Builder(LI); 8133 8134 // The base address of the load. 8135 Value *BaseAddr = LI->getPointerOperand(); 8136 8137 if (NumLoads > 1) { 8138 // If we're going to generate more than one load, reset the sub-vector type 8139 // to something legal. 8140 VecTy = VectorType::get(VecTy->getVectorElementType(), 8141 VecTy->getVectorNumElements() / NumLoads); 8142 8143 // We will compute the pointer operand of each load from the original base 8144 // address using GEPs. Cast the base address to a pointer to the scalar 8145 // element type. 8146 BaseAddr = Builder.CreateBitCast( 8147 BaseAddr, VecTy->getVectorElementType()->getPointerTo( 8148 LI->getPointerAddressSpace())); 8149 } 8150 8151 Type *PtrTy = VecTy->getPointerTo(LI->getPointerAddressSpace()); 8152 Type *Tys[2] = {VecTy, PtrTy}; 8153 static const Intrinsic::ID LoadInts[3] = {Intrinsic::aarch64_neon_ld2, 8154 Intrinsic::aarch64_neon_ld3, 8155 Intrinsic::aarch64_neon_ld4}; 8156 Function *LdNFunc = 8157 Intrinsic::getDeclaration(LI->getModule(), LoadInts[Factor - 2], Tys); 8158 8159 // Holds sub-vectors extracted from the load intrinsic return values. The 8160 // sub-vectors are associated with the shufflevector instructions they will 8161 // replace. 8162 DenseMap<ShuffleVectorInst *, SmallVector<Value *, 4>> SubVecs; 8163 8164 for (unsigned LoadCount = 0; LoadCount < NumLoads; ++LoadCount) { 8165 8166 // If we're generating more than one load, compute the base address of 8167 // subsequent loads as an offset from the previous. 8168 if (LoadCount > 0) 8169 BaseAddr = Builder.CreateConstGEP1_32( 8170 BaseAddr, VecTy->getVectorNumElements() * Factor); 8171 8172 CallInst *LdN = Builder.CreateCall( 8173 LdNFunc, Builder.CreateBitCast(BaseAddr, PtrTy), "ldN"); 8174 8175 // Extract and store the sub-vectors returned by the load intrinsic. 8176 for (unsigned i = 0; i < Shuffles.size(); i++) { 8177 ShuffleVectorInst *SVI = Shuffles[i]; 8178 unsigned Index = Indices[i]; 8179 8180 Value *SubVec = Builder.CreateExtractValue(LdN, Index); 8181 8182 // Convert the integer vector to pointer vector if the element is pointer. 8183 if (EltTy->isPointerTy()) 8184 SubVec = Builder.CreateIntToPtr( 8185 SubVec, VectorType::get(SVI->getType()->getVectorElementType(), 8186 VecTy->getVectorNumElements())); 8187 SubVecs[SVI].push_back(SubVec); 8188 } 8189 } 8190 8191 // Replace uses of the shufflevector instructions with the sub-vectors 8192 // returned by the load intrinsic. If a shufflevector instruction is 8193 // associated with more than one sub-vector, those sub-vectors will be 8194 // concatenated into a single wide vector. 8195 for (ShuffleVectorInst *SVI : Shuffles) { 8196 auto &SubVec = SubVecs[SVI]; 8197 auto *WideVec = 8198 SubVec.size() > 1 ? concatenateVectors(Builder, SubVec) : SubVec[0]; 8199 SVI->replaceAllUsesWith(WideVec); 8200 } 8201 8202 return true; 8203 } 8204 8205 /// Lower an interleaved store into a stN intrinsic. 8206 /// 8207 /// E.g. Lower an interleaved store (Factor = 3): 8208 /// %i.vec = shuffle <8 x i32> %v0, <8 x i32> %v1, 8209 /// <0, 4, 8, 1, 5, 9, 2, 6, 10, 3, 7, 11> 8210 /// store <12 x i32> %i.vec, <12 x i32>* %ptr 8211 /// 8212 /// Into: 8213 /// %sub.v0 = shuffle <8 x i32> %v0, <8 x i32> v1, <0, 1, 2, 3> 8214 /// %sub.v1 = shuffle <8 x i32> %v0, <8 x i32> v1, <4, 5, 6, 7> 8215 /// %sub.v2 = shuffle <8 x i32> %v0, <8 x i32> v1, <8, 9, 10, 11> 8216 /// call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr) 8217 /// 8218 /// Note that the new shufflevectors will be removed and we'll only generate one 8219 /// st3 instruction in CodeGen. 8220 /// 8221 /// Example for a more general valid mask (Factor 3). Lower: 8222 /// %i.vec = shuffle <32 x i32> %v0, <32 x i32> %v1, 8223 /// <4, 32, 16, 5, 33, 17, 6, 34, 18, 7, 35, 19> 8224 /// store <12 x i32> %i.vec, <12 x i32>* %ptr 8225 /// 8226 /// Into: 8227 /// %sub.v0 = shuffle <32 x i32> %v0, <32 x i32> v1, <4, 5, 6, 7> 8228 /// %sub.v1 = shuffle <32 x i32> %v0, <32 x i32> v1, <32, 33, 34, 35> 8229 /// %sub.v2 = shuffle <32 x i32> %v0, <32 x i32> v1, <16, 17, 18, 19> 8230 /// call void llvm.aarch64.neon.st3(%sub.v0, %sub.v1, %sub.v2, %ptr) 8231 bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI, 8232 ShuffleVectorInst *SVI, 8233 unsigned Factor) const { 8234 assert(Factor >= 2 && Factor <= getMaxSupportedInterleaveFactor() && 8235 "Invalid interleave factor"); 8236 8237 VectorType *VecTy = SVI->getType(); 8238 assert(VecTy->getVectorNumElements() % Factor == 0 && 8239 "Invalid interleaved store"); 8240 8241 unsigned LaneLen = VecTy->getVectorNumElements() / Factor; 8242 Type *EltTy = VecTy->getVectorElementType(); 8243 VectorType *SubVecTy = VectorType::get(EltTy, LaneLen); 8244 8245 const DataLayout &DL = SI->getModule()->getDataLayout(); 8246 8247 // Skip if we do not have NEON and skip illegal vector types. We can 8248 // "legalize" wide vector types into multiple interleaved accesses as long as 8249 // the vector types are divisible by 128. 8250 if (!Subtarget->hasNEON() || !isLegalInterleavedAccessType(SubVecTy, DL)) 8251 return false; 8252 8253 unsigned NumStores = getNumInterleavedAccesses(SubVecTy, DL); 8254 8255 Value *Op0 = SVI->getOperand(0); 8256 Value *Op1 = SVI->getOperand(1); 8257 IRBuilder<> Builder(SI); 8258 8259 // StN intrinsics don't support pointer vectors as arguments. Convert pointer 8260 // vectors to integer vectors. 8261 if (EltTy->isPointerTy()) { 8262 Type *IntTy = DL.getIntPtrType(EltTy); 8263 unsigned NumOpElts = Op0->getType()->getVectorNumElements(); 8264 8265 // Convert to the corresponding integer vector. 8266 Type *IntVecTy = VectorType::get(IntTy, NumOpElts); 8267 Op0 = Builder.CreatePtrToInt(Op0, IntVecTy); 8268 Op1 = Builder.CreatePtrToInt(Op1, IntVecTy); 8269 8270 SubVecTy = VectorType::get(IntTy, LaneLen); 8271 } 8272 8273 // The base address of the store. 8274 Value *BaseAddr = SI->getPointerOperand(); 8275 8276 if (NumStores > 1) { 8277 // If we're going to generate more than one store, reset the lane length 8278 // and sub-vector type to something legal. 8279 LaneLen /= NumStores; 8280 SubVecTy = VectorType::get(SubVecTy->getVectorElementType(), LaneLen); 8281 8282 // We will compute the pointer operand of each store from the original base 8283 // address using GEPs. Cast the base address to a pointer to the scalar 8284 // element type. 8285 BaseAddr = Builder.CreateBitCast( 8286 BaseAddr, SubVecTy->getVectorElementType()->getPointerTo( 8287 SI->getPointerAddressSpace())); 8288 } 8289 8290 auto Mask = SVI->getShuffleMask(); 8291 8292 Type *PtrTy = SubVecTy->getPointerTo(SI->getPointerAddressSpace()); 8293 Type *Tys[2] = {SubVecTy, PtrTy}; 8294 static const Intrinsic::ID StoreInts[3] = {Intrinsic::aarch64_neon_st2, 8295 Intrinsic::aarch64_neon_st3, 8296 Intrinsic::aarch64_neon_st4}; 8297 Function *StNFunc = 8298 Intrinsic::getDeclaration(SI->getModule(), StoreInts[Factor - 2], Tys); 8299 8300 for (unsigned StoreCount = 0; StoreCount < NumStores; ++StoreCount) { 8301 8302 SmallVector<Value *, 5> Ops; 8303 8304 // Split the shufflevector operands into sub vectors for the new stN call. 8305 for (unsigned i = 0; i < Factor; i++) { 8306 unsigned IdxI = StoreCount * LaneLen * Factor + i; 8307 if (Mask[IdxI] >= 0) { 8308 Ops.push_back(Builder.CreateShuffleVector( 8309 Op0, Op1, createSequentialMask(Builder, Mask[IdxI], LaneLen, 0))); 8310 } else { 8311 unsigned StartMask = 0; 8312 for (unsigned j = 1; j < LaneLen; j++) { 8313 unsigned IdxJ = StoreCount * LaneLen * Factor + j; 8314 if (Mask[IdxJ * Factor + IdxI] >= 0) { 8315 StartMask = Mask[IdxJ * Factor + IdxI] - IdxJ; 8316 break; 8317 } 8318 } 8319 // Note: Filling undef gaps with random elements is ok, since 8320 // those elements were being written anyway (with undefs). 8321 // In the case of all undefs we're defaulting to using elems from 0 8322 // Note: StartMask cannot be negative, it's checked in 8323 // isReInterleaveMask 8324 Ops.push_back(Builder.CreateShuffleVector( 8325 Op0, Op1, createSequentialMask(Builder, StartMask, LaneLen, 0))); 8326 } 8327 } 8328 8329 // If we generating more than one store, we compute the base address of 8330 // subsequent stores as an offset from the previous. 8331 if (StoreCount > 0) 8332 BaseAddr = Builder.CreateConstGEP1_32(BaseAddr, LaneLen * Factor); 8333 8334 Ops.push_back(Builder.CreateBitCast(BaseAddr, PtrTy)); 8335 Builder.CreateCall(StNFunc, Ops); 8336 } 8337 return true; 8338 } 8339 8340 static bool memOpAlign(unsigned DstAlign, unsigned SrcAlign, 8341 unsigned AlignCheck) { 8342 return ((SrcAlign == 0 || SrcAlign % AlignCheck == 0) && 8343 (DstAlign == 0 || DstAlign % AlignCheck == 0)); 8344 } 8345 8346 EVT AArch64TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned DstAlign, 8347 unsigned SrcAlign, bool IsMemset, 8348 bool ZeroMemset, 8349 bool MemcpyStrSrc, 8350 MachineFunction &MF) const { 8351 // Don't use AdvSIMD to implement 16-byte memset. It would have taken one 8352 // instruction to materialize the v2i64 zero and one store (with restrictive 8353 // addressing mode). Just do two i64 store of zero-registers. 8354 bool Fast; 8355 const Function &F = MF.getFunction(); 8356 if (Subtarget->hasFPARMv8() && !IsMemset && Size >= 16 && 8357 !F.hasFnAttribute(Attribute::NoImplicitFloat) && 8358 (memOpAlign(SrcAlign, DstAlign, 16) || 8359 (allowsMisalignedMemoryAccesses(MVT::f128, 0, 1, &Fast) && Fast))) 8360 return MVT::f128; 8361 8362 if (Size >= 8 && 8363 (memOpAlign(SrcAlign, DstAlign, 8) || 8364 (allowsMisalignedMemoryAccesses(MVT::i64, 0, 1, &Fast) && Fast))) 8365 return MVT::i64; 8366 8367 if (Size >= 4 && 8368 (memOpAlign(SrcAlign, DstAlign, 4) || 8369 (allowsMisalignedMemoryAccesses(MVT::i32, 0, 1, &Fast) && Fast))) 8370 return MVT::i32; 8371 8372 return MVT::Other; 8373 } 8374 8375 // 12-bit optionally shifted immediates are legal for adds. 8376 bool AArch64TargetLowering::isLegalAddImmediate(int64_t Immed) const { 8377 if (Immed == std::numeric_limits<int64_t>::min()) { 8378 LLVM_DEBUG(dbgs() << "Illegal add imm " << Immed 8379 << ": avoid UB for INT64_MIN\n"); 8380 return false; 8381 } 8382 // Same encoding for add/sub, just flip the sign. 8383 Immed = std::abs(Immed); 8384 bool IsLegal = ((Immed >> 12) == 0 || 8385 ((Immed & 0xfff) == 0 && Immed >> 24 == 0)); 8386 LLVM_DEBUG(dbgs() << "Is " << Immed 8387 << " legal add imm: " << (IsLegal ? "yes" : "no") << "\n"); 8388 return IsLegal; 8389 } 8390 8391 // Integer comparisons are implemented with ADDS/SUBS, so the range of valid 8392 // immediates is the same as for an add or a sub. 8393 bool AArch64TargetLowering::isLegalICmpImmediate(int64_t Immed) const { 8394 return isLegalAddImmediate(Immed); 8395 } 8396 8397 /// isLegalAddressingMode - Return true if the addressing mode represented 8398 /// by AM is legal for this target, for a load/store of the specified type. 8399 bool AArch64TargetLowering::isLegalAddressingMode(const DataLayout &DL, 8400 const AddrMode &AM, Type *Ty, 8401 unsigned AS, Instruction *I) const { 8402 // AArch64 has five basic addressing modes: 8403 // reg 8404 // reg + 9-bit signed offset 8405 // reg + SIZE_IN_BYTES * 12-bit unsigned offset 8406 // reg1 + reg2 8407 // reg + SIZE_IN_BYTES * reg 8408 8409 // No global is ever allowed as a base. 8410 if (AM.BaseGV) 8411 return false; 8412 8413 // No reg+reg+imm addressing. 8414 if (AM.HasBaseReg && AM.BaseOffs && AM.Scale) 8415 return false; 8416 8417 // check reg + imm case: 8418 // i.e., reg + 0, reg + imm9, reg + SIZE_IN_BYTES * uimm12 8419 uint64_t NumBytes = 0; 8420 if (Ty->isSized()) { 8421 uint64_t NumBits = DL.getTypeSizeInBits(Ty); 8422 NumBytes = NumBits / 8; 8423 if (!isPowerOf2_64(NumBits)) 8424 NumBytes = 0; 8425 } 8426 8427 if (!AM.Scale) { 8428 int64_t Offset = AM.BaseOffs; 8429 8430 // 9-bit signed offset 8431 if (isInt<9>(Offset)) 8432 return true; 8433 8434 // 12-bit unsigned offset 8435 unsigned shift = Log2_64(NumBytes); 8436 if (NumBytes && Offset > 0 && (Offset / NumBytes) <= (1LL << 12) - 1 && 8437 // Must be a multiple of NumBytes (NumBytes is a power of 2) 8438 (Offset >> shift) << shift == Offset) 8439 return true; 8440 return false; 8441 } 8442 8443 // Check reg1 + SIZE_IN_BYTES * reg2 and reg1 + reg2 8444 8445 return AM.Scale == 1 || (AM.Scale > 0 && (uint64_t)AM.Scale == NumBytes); 8446 } 8447 8448 bool AArch64TargetLowering::shouldConsiderGEPOffsetSplit() const { 8449 // Consider splitting large offset of struct or array. 8450 return true; 8451 } 8452 8453 int AArch64TargetLowering::getScalingFactorCost(const DataLayout &DL, 8454 const AddrMode &AM, Type *Ty, 8455 unsigned AS) const { 8456 // Scaling factors are not free at all. 8457 // Operands | Rt Latency 8458 // ------------------------------------------- 8459 // Rt, [Xn, Xm] | 4 8460 // ------------------------------------------- 8461 // Rt, [Xn, Xm, lsl #imm] | Rn: 4 Rm: 5 8462 // Rt, [Xn, Wm, <extend> #imm] | 8463 if (isLegalAddressingMode(DL, AM, Ty, AS)) 8464 // Scale represents reg2 * scale, thus account for 1 if 8465 // it is not equal to 0 or 1. 8466 return AM.Scale != 0 && AM.Scale != 1; 8467 return -1; 8468 } 8469 8470 bool AArch64TargetLowering::isFMAFasterThanFMulAndFAdd(EVT VT) const { 8471 VT = VT.getScalarType(); 8472 8473 if (!VT.isSimple()) 8474 return false; 8475 8476 switch (VT.getSimpleVT().SimpleTy) { 8477 case MVT::f32: 8478 case MVT::f64: 8479 return true; 8480 default: 8481 break; 8482 } 8483 8484 return false; 8485 } 8486 8487 const MCPhysReg * 8488 AArch64TargetLowering::getScratchRegisters(CallingConv::ID) const { 8489 // LR is a callee-save register, but we must treat it as clobbered by any call 8490 // site. Hence we include LR in the scratch registers, which are in turn added 8491 // as implicit-defs for stackmaps and patchpoints. 8492 static const MCPhysReg ScratchRegs[] = { 8493 AArch64::X16, AArch64::X17, AArch64::LR, 0 8494 }; 8495 return ScratchRegs; 8496 } 8497 8498 bool 8499 AArch64TargetLowering::isDesirableToCommuteWithShift(const SDNode *N) const { 8500 EVT VT = N->getValueType(0); 8501 // If N is unsigned bit extraction: ((x >> C) & mask), then do not combine 8502 // it with shift to let it be lowered to UBFX. 8503 if (N->getOpcode() == ISD::AND && (VT == MVT::i32 || VT == MVT::i64) && 8504 isa<ConstantSDNode>(N->getOperand(1))) { 8505 uint64_t TruncMask = N->getConstantOperandVal(1); 8506 if (isMask_64(TruncMask) && 8507 N->getOperand(0).getOpcode() == ISD::SRL && 8508 isa<ConstantSDNode>(N->getOperand(0)->getOperand(1))) 8509 return false; 8510 } 8511 return true; 8512 } 8513 8514 bool AArch64TargetLowering::shouldConvertConstantLoadToIntImm(const APInt &Imm, 8515 Type *Ty) const { 8516 assert(Ty->isIntegerTy()); 8517 8518 unsigned BitSize = Ty->getPrimitiveSizeInBits(); 8519 if (BitSize == 0) 8520 return false; 8521 8522 int64_t Val = Imm.getSExtValue(); 8523 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, BitSize)) 8524 return true; 8525 8526 if ((int64_t)Val < 0) 8527 Val = ~Val; 8528 if (BitSize == 32) 8529 Val &= (1LL << 32) - 1; 8530 8531 unsigned LZ = countLeadingZeros((uint64_t)Val); 8532 unsigned Shift = (63 - LZ) / 16; 8533 // MOVZ is free so return true for one or fewer MOVK. 8534 return Shift < 3; 8535 } 8536 8537 bool AArch64TargetLowering::isExtractSubvectorCheap(EVT ResVT, EVT SrcVT, 8538 unsigned Index) const { 8539 if (!isOperationLegalOrCustom(ISD::EXTRACT_SUBVECTOR, ResVT)) 8540 return false; 8541 8542 return (Index == 0 || Index == ResVT.getVectorNumElements()); 8543 } 8544 8545 /// Turn vector tests of the signbit in the form of: 8546 /// xor (sra X, elt_size(X)-1), -1 8547 /// into: 8548 /// cmge X, X, #0 8549 static SDValue foldVectorXorShiftIntoCmp(SDNode *N, SelectionDAG &DAG, 8550 const AArch64Subtarget *Subtarget) { 8551 EVT VT = N->getValueType(0); 8552 if (!Subtarget->hasNEON() || !VT.isVector()) 8553 return SDValue(); 8554 8555 // There must be a shift right algebraic before the xor, and the xor must be a 8556 // 'not' operation. 8557 SDValue Shift = N->getOperand(0); 8558 SDValue Ones = N->getOperand(1); 8559 if (Shift.getOpcode() != AArch64ISD::VASHR || !Shift.hasOneUse() || 8560 !ISD::isBuildVectorAllOnes(Ones.getNode())) 8561 return SDValue(); 8562 8563 // The shift should be smearing the sign bit across each vector element. 8564 auto *ShiftAmt = dyn_cast<ConstantSDNode>(Shift.getOperand(1)); 8565 EVT ShiftEltTy = Shift.getValueType().getVectorElementType(); 8566 if (!ShiftAmt || ShiftAmt->getZExtValue() != ShiftEltTy.getSizeInBits() - 1) 8567 return SDValue(); 8568 8569 return DAG.getNode(AArch64ISD::CMGEz, SDLoc(N), VT, Shift.getOperand(0)); 8570 } 8571 8572 // Generate SUBS and CSEL for integer abs. 8573 static SDValue performIntegerAbsCombine(SDNode *N, SelectionDAG &DAG) { 8574 EVT VT = N->getValueType(0); 8575 8576 SDValue N0 = N->getOperand(0); 8577 SDValue N1 = N->getOperand(1); 8578 SDLoc DL(N); 8579 8580 // Check pattern of XOR(ADD(X,Y), Y) where Y is SRA(X, size(X)-1) 8581 // and change it to SUB and CSEL. 8582 if (VT.isInteger() && N->getOpcode() == ISD::XOR && 8583 N0.getOpcode() == ISD::ADD && N0.getOperand(1) == N1 && 8584 N1.getOpcode() == ISD::SRA && N1.getOperand(0) == N0.getOperand(0)) 8585 if (ConstantSDNode *Y1C = dyn_cast<ConstantSDNode>(N1.getOperand(1))) 8586 if (Y1C->getAPIntValue() == VT.getSizeInBits() - 1) { 8587 SDValue Neg = DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), 8588 N0.getOperand(0)); 8589 // Generate SUBS & CSEL. 8590 SDValue Cmp = 8591 DAG.getNode(AArch64ISD::SUBS, DL, DAG.getVTList(VT, MVT::i32), 8592 N0.getOperand(0), DAG.getConstant(0, DL, VT)); 8593 return DAG.getNode(AArch64ISD::CSEL, DL, VT, N0.getOperand(0), Neg, 8594 DAG.getConstant(AArch64CC::PL, DL, MVT::i32), 8595 SDValue(Cmp.getNode(), 1)); 8596 } 8597 return SDValue(); 8598 } 8599 8600 static SDValue performXorCombine(SDNode *N, SelectionDAG &DAG, 8601 TargetLowering::DAGCombinerInfo &DCI, 8602 const AArch64Subtarget *Subtarget) { 8603 if (DCI.isBeforeLegalizeOps()) 8604 return SDValue(); 8605 8606 if (SDValue Cmp = foldVectorXorShiftIntoCmp(N, DAG, Subtarget)) 8607 return Cmp; 8608 8609 return performIntegerAbsCombine(N, DAG); 8610 } 8611 8612 SDValue 8613 AArch64TargetLowering::BuildSDIVPow2(SDNode *N, const APInt &Divisor, 8614 SelectionDAG &DAG, 8615 SmallVectorImpl<SDNode *> &Created) const { 8616 AttributeList Attr = DAG.getMachineFunction().getFunction().getAttributes(); 8617 if (isIntDivCheap(N->getValueType(0), Attr)) 8618 return SDValue(N,0); // Lower SDIV as SDIV 8619 8620 // fold (sdiv X, pow2) 8621 EVT VT = N->getValueType(0); 8622 if ((VT != MVT::i32 && VT != MVT::i64) || 8623 !(Divisor.isPowerOf2() || (-Divisor).isPowerOf2())) 8624 return SDValue(); 8625 8626 SDLoc DL(N); 8627 SDValue N0 = N->getOperand(0); 8628 unsigned Lg2 = Divisor.countTrailingZeros(); 8629 SDValue Zero = DAG.getConstant(0, DL, VT); 8630 SDValue Pow2MinusOne = DAG.getConstant((1ULL << Lg2) - 1, DL, VT); 8631 8632 // Add (N0 < 0) ? Pow2 - 1 : 0; 8633 SDValue CCVal; 8634 SDValue Cmp = getAArch64Cmp(N0, Zero, ISD::SETLT, CCVal, DAG, DL); 8635 SDValue Add = DAG.getNode(ISD::ADD, DL, VT, N0, Pow2MinusOne); 8636 SDValue CSel = DAG.getNode(AArch64ISD::CSEL, DL, VT, Add, N0, CCVal, Cmp); 8637 8638 Created.push_back(Cmp.getNode()); 8639 Created.push_back(Add.getNode()); 8640 Created.push_back(CSel.getNode()); 8641 8642 // Divide by pow2. 8643 SDValue SRA = 8644 DAG.getNode(ISD::SRA, DL, VT, CSel, DAG.getConstant(Lg2, DL, MVT::i64)); 8645 8646 // If we're dividing by a positive value, we're done. Otherwise, we must 8647 // negate the result. 8648 if (Divisor.isNonNegative()) 8649 return SRA; 8650 8651 Created.push_back(SRA.getNode()); 8652 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), SRA); 8653 } 8654 8655 static SDValue performMulCombine(SDNode *N, SelectionDAG &DAG, 8656 TargetLowering::DAGCombinerInfo &DCI, 8657 const AArch64Subtarget *Subtarget) { 8658 if (DCI.isBeforeLegalizeOps()) 8659 return SDValue(); 8660 8661 // The below optimizations require a constant RHS. 8662 if (!isa<ConstantSDNode>(N->getOperand(1))) 8663 return SDValue(); 8664 8665 ConstantSDNode *C = cast<ConstantSDNode>(N->getOperand(1)); 8666 const APInt &ConstValue = C->getAPIntValue(); 8667 8668 // Multiplication of a power of two plus/minus one can be done more 8669 // cheaply as as shift+add/sub. For now, this is true unilaterally. If 8670 // future CPUs have a cheaper MADD instruction, this may need to be 8671 // gated on a subtarget feature. For Cyclone, 32-bit MADD is 4 cycles and 8672 // 64-bit is 5 cycles, so this is always a win. 8673 // More aggressively, some multiplications N0 * C can be lowered to 8674 // shift+add+shift if the constant C = A * B where A = 2^N + 1 and B = 2^M, 8675 // e.g. 6=3*2=(2+1)*2. 8676 // TODO: consider lowering more cases, e.g. C = 14, -6, -14 or even 45 8677 // which equals to (1+2)*16-(1+2). 8678 SDValue N0 = N->getOperand(0); 8679 // TrailingZeroes is used to test if the mul can be lowered to 8680 // shift+add+shift. 8681 unsigned TrailingZeroes = ConstValue.countTrailingZeros(); 8682 if (TrailingZeroes) { 8683 // Conservatively do not lower to shift+add+shift if the mul might be 8684 // folded into smul or umul. 8685 if (N0->hasOneUse() && (isSignExtended(N0.getNode(), DAG) || 8686 isZeroExtended(N0.getNode(), DAG))) 8687 return SDValue(); 8688 // Conservatively do not lower to shift+add+shift if the mul might be 8689 // folded into madd or msub. 8690 if (N->hasOneUse() && (N->use_begin()->getOpcode() == ISD::ADD || 8691 N->use_begin()->getOpcode() == ISD::SUB)) 8692 return SDValue(); 8693 } 8694 // Use ShiftedConstValue instead of ConstValue to support both shift+add/sub 8695 // and shift+add+shift. 8696 APInt ShiftedConstValue = ConstValue.ashr(TrailingZeroes); 8697 8698 unsigned ShiftAmt, AddSubOpc; 8699 // Is the shifted value the LHS operand of the add/sub? 8700 bool ShiftValUseIsN0 = true; 8701 // Do we need to negate the result? 8702 bool NegateResult = false; 8703 8704 if (ConstValue.isNonNegative()) { 8705 // (mul x, 2^N + 1) => (add (shl x, N), x) 8706 // (mul x, 2^N - 1) => (sub (shl x, N), x) 8707 // (mul x, (2^N + 1) * 2^M) => (shl (add (shl x, N), x), M) 8708 APInt SCVMinus1 = ShiftedConstValue - 1; 8709 APInt CVPlus1 = ConstValue + 1; 8710 if (SCVMinus1.isPowerOf2()) { 8711 ShiftAmt = SCVMinus1.logBase2(); 8712 AddSubOpc = ISD::ADD; 8713 } else if (CVPlus1.isPowerOf2()) { 8714 ShiftAmt = CVPlus1.logBase2(); 8715 AddSubOpc = ISD::SUB; 8716 } else 8717 return SDValue(); 8718 } else { 8719 // (mul x, -(2^N - 1)) => (sub x, (shl x, N)) 8720 // (mul x, -(2^N + 1)) => - (add (shl x, N), x) 8721 APInt CVNegPlus1 = -ConstValue + 1; 8722 APInt CVNegMinus1 = -ConstValue - 1; 8723 if (CVNegPlus1.isPowerOf2()) { 8724 ShiftAmt = CVNegPlus1.logBase2(); 8725 AddSubOpc = ISD::SUB; 8726 ShiftValUseIsN0 = false; 8727 } else if (CVNegMinus1.isPowerOf2()) { 8728 ShiftAmt = CVNegMinus1.logBase2(); 8729 AddSubOpc = ISD::ADD; 8730 NegateResult = true; 8731 } else 8732 return SDValue(); 8733 } 8734 8735 SDLoc DL(N); 8736 EVT VT = N->getValueType(0); 8737 SDValue ShiftedVal = DAG.getNode(ISD::SHL, DL, VT, N0, 8738 DAG.getConstant(ShiftAmt, DL, MVT::i64)); 8739 8740 SDValue AddSubN0 = ShiftValUseIsN0 ? ShiftedVal : N0; 8741 SDValue AddSubN1 = ShiftValUseIsN0 ? N0 : ShiftedVal; 8742 SDValue Res = DAG.getNode(AddSubOpc, DL, VT, AddSubN0, AddSubN1); 8743 assert(!(NegateResult && TrailingZeroes) && 8744 "NegateResult and TrailingZeroes cannot both be true for now."); 8745 // Negate the result. 8746 if (NegateResult) 8747 return DAG.getNode(ISD::SUB, DL, VT, DAG.getConstant(0, DL, VT), Res); 8748 // Shift the result. 8749 if (TrailingZeroes) 8750 return DAG.getNode(ISD::SHL, DL, VT, Res, 8751 DAG.getConstant(TrailingZeroes, DL, MVT::i64)); 8752 return Res; 8753 } 8754 8755 static SDValue performVectorCompareAndMaskUnaryOpCombine(SDNode *N, 8756 SelectionDAG &DAG) { 8757 // Take advantage of vector comparisons producing 0 or -1 in each lane to 8758 // optimize away operation when it's from a constant. 8759 // 8760 // The general transformation is: 8761 // UNARYOP(AND(VECTOR_CMP(x,y), constant)) --> 8762 // AND(VECTOR_CMP(x,y), constant2) 8763 // constant2 = UNARYOP(constant) 8764 8765 // Early exit if this isn't a vector operation, the operand of the 8766 // unary operation isn't a bitwise AND, or if the sizes of the operations 8767 // aren't the same. 8768 EVT VT = N->getValueType(0); 8769 if (!VT.isVector() || N->getOperand(0)->getOpcode() != ISD::AND || 8770 N->getOperand(0)->getOperand(0)->getOpcode() != ISD::SETCC || 8771 VT.getSizeInBits() != N->getOperand(0)->getValueType(0).getSizeInBits()) 8772 return SDValue(); 8773 8774 // Now check that the other operand of the AND is a constant. We could 8775 // make the transformation for non-constant splats as well, but it's unclear 8776 // that would be a benefit as it would not eliminate any operations, just 8777 // perform one more step in scalar code before moving to the vector unit. 8778 if (BuildVectorSDNode *BV = 8779 dyn_cast<BuildVectorSDNode>(N->getOperand(0)->getOperand(1))) { 8780 // Bail out if the vector isn't a constant. 8781 if (!BV->isConstant()) 8782 return SDValue(); 8783 8784 // Everything checks out. Build up the new and improved node. 8785 SDLoc DL(N); 8786 EVT IntVT = BV->getValueType(0); 8787 // Create a new constant of the appropriate type for the transformed 8788 // DAG. 8789 SDValue SourceConst = DAG.getNode(N->getOpcode(), DL, VT, SDValue(BV, 0)); 8790 // The AND node needs bitcasts to/from an integer vector type around it. 8791 SDValue MaskConst = DAG.getNode(ISD::BITCAST, DL, IntVT, SourceConst); 8792 SDValue NewAnd = DAG.getNode(ISD::AND, DL, IntVT, 8793 N->getOperand(0)->getOperand(0), MaskConst); 8794 SDValue Res = DAG.getNode(ISD::BITCAST, DL, VT, NewAnd); 8795 return Res; 8796 } 8797 8798 return SDValue(); 8799 } 8800 8801 static SDValue performIntToFpCombine(SDNode *N, SelectionDAG &DAG, 8802 const AArch64Subtarget *Subtarget) { 8803 // First try to optimize away the conversion when it's conditionally from 8804 // a constant. Vectors only. 8805 if (SDValue Res = performVectorCompareAndMaskUnaryOpCombine(N, DAG)) 8806 return Res; 8807 8808 EVT VT = N->getValueType(0); 8809 if (VT != MVT::f32 && VT != MVT::f64) 8810 return SDValue(); 8811 8812 // Only optimize when the source and destination types have the same width. 8813 if (VT.getSizeInBits() != N->getOperand(0).getValueSizeInBits()) 8814 return SDValue(); 8815 8816 // If the result of an integer load is only used by an integer-to-float 8817 // conversion, use a fp load instead and a AdvSIMD scalar {S|U}CVTF instead. 8818 // This eliminates an "integer-to-vector-move" UOP and improves throughput. 8819 SDValue N0 = N->getOperand(0); 8820 if (Subtarget->hasNEON() && ISD::isNormalLoad(N0.getNode()) && N0.hasOneUse() && 8821 // Do not change the width of a volatile load. 8822 !cast<LoadSDNode>(N0)->isVolatile()) { 8823 LoadSDNode *LN0 = cast<LoadSDNode>(N0); 8824 SDValue Load = DAG.getLoad(VT, SDLoc(N), LN0->getChain(), LN0->getBasePtr(), 8825 LN0->getPointerInfo(), LN0->getAlignment(), 8826 LN0->getMemOperand()->getFlags()); 8827 8828 // Make sure successors of the original load stay after it by updating them 8829 // to use the new Chain. 8830 DAG.ReplaceAllUsesOfValueWith(SDValue(LN0, 1), Load.getValue(1)); 8831 8832 unsigned Opcode = 8833 (N->getOpcode() == ISD::SINT_TO_FP) ? AArch64ISD::SITOF : AArch64ISD::UITOF; 8834 return DAG.getNode(Opcode, SDLoc(N), VT, Load); 8835 } 8836 8837 return SDValue(); 8838 } 8839 8840 /// Fold a floating-point multiply by power of two into floating-point to 8841 /// fixed-point conversion. 8842 static SDValue performFpToIntCombine(SDNode *N, SelectionDAG &DAG, 8843 TargetLowering::DAGCombinerInfo &DCI, 8844 const AArch64Subtarget *Subtarget) { 8845 if (!Subtarget->hasNEON()) 8846 return SDValue(); 8847 8848 SDValue Op = N->getOperand(0); 8849 if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || 8850 Op.getOpcode() != ISD::FMUL) 8851 return SDValue(); 8852 8853 SDValue ConstVec = Op->getOperand(1); 8854 if (!isa<BuildVectorSDNode>(ConstVec)) 8855 return SDValue(); 8856 8857 MVT FloatTy = Op.getSimpleValueType().getVectorElementType(); 8858 uint32_t FloatBits = FloatTy.getSizeInBits(); 8859 if (FloatBits != 32 && FloatBits != 64) 8860 return SDValue(); 8861 8862 MVT IntTy = N->getSimpleValueType(0).getVectorElementType(); 8863 uint32_t IntBits = IntTy.getSizeInBits(); 8864 if (IntBits != 16 && IntBits != 32 && IntBits != 64) 8865 return SDValue(); 8866 8867 // Avoid conversions where iN is larger than the float (e.g., float -> i64). 8868 if (IntBits > FloatBits) 8869 return SDValue(); 8870 8871 BitVector UndefElements; 8872 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); 8873 int32_t Bits = IntBits == 64 ? 64 : 32; 8874 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, Bits + 1); 8875 if (C == -1 || C == 0 || C > Bits) 8876 return SDValue(); 8877 8878 MVT ResTy; 8879 unsigned NumLanes = Op.getValueType().getVectorNumElements(); 8880 switch (NumLanes) { 8881 default: 8882 return SDValue(); 8883 case 2: 8884 ResTy = FloatBits == 32 ? MVT::v2i32 : MVT::v2i64; 8885 break; 8886 case 4: 8887 ResTy = FloatBits == 32 ? MVT::v4i32 : MVT::v4i64; 8888 break; 8889 } 8890 8891 if (ResTy == MVT::v4i64 && DCI.isBeforeLegalizeOps()) 8892 return SDValue(); 8893 8894 assert((ResTy != MVT::v4i64 || DCI.isBeforeLegalizeOps()) && 8895 "Illegal vector type after legalization"); 8896 8897 SDLoc DL(N); 8898 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT; 8899 unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfp2fxs 8900 : Intrinsic::aarch64_neon_vcvtfp2fxu; 8901 SDValue FixConv = 8902 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, ResTy, 8903 DAG.getConstant(IntrinsicOpcode, DL, MVT::i32), 8904 Op->getOperand(0), DAG.getConstant(C, DL, MVT::i32)); 8905 // We can handle smaller integers by generating an extra trunc. 8906 if (IntBits < FloatBits) 8907 FixConv = DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), FixConv); 8908 8909 return FixConv; 8910 } 8911 8912 /// Fold a floating-point divide by power of two into fixed-point to 8913 /// floating-point conversion. 8914 static SDValue performFDivCombine(SDNode *N, SelectionDAG &DAG, 8915 TargetLowering::DAGCombinerInfo &DCI, 8916 const AArch64Subtarget *Subtarget) { 8917 if (!Subtarget->hasNEON()) 8918 return SDValue(); 8919 8920 SDValue Op = N->getOperand(0); 8921 unsigned Opc = Op->getOpcode(); 8922 if (!Op.getValueType().isVector() || !Op.getValueType().isSimple() || 8923 !Op.getOperand(0).getValueType().isSimple() || 8924 (Opc != ISD::SINT_TO_FP && Opc != ISD::UINT_TO_FP)) 8925 return SDValue(); 8926 8927 SDValue ConstVec = N->getOperand(1); 8928 if (!isa<BuildVectorSDNode>(ConstVec)) 8929 return SDValue(); 8930 8931 MVT IntTy = Op.getOperand(0).getSimpleValueType().getVectorElementType(); 8932 int32_t IntBits = IntTy.getSizeInBits(); 8933 if (IntBits != 16 && IntBits != 32 && IntBits != 64) 8934 return SDValue(); 8935 8936 MVT FloatTy = N->getSimpleValueType(0).getVectorElementType(); 8937 int32_t FloatBits = FloatTy.getSizeInBits(); 8938 if (FloatBits != 32 && FloatBits != 64) 8939 return SDValue(); 8940 8941 // Avoid conversions where iN is larger than the float (e.g., i64 -> float). 8942 if (IntBits > FloatBits) 8943 return SDValue(); 8944 8945 BitVector UndefElements; 8946 BuildVectorSDNode *BV = cast<BuildVectorSDNode>(ConstVec); 8947 int32_t C = BV->getConstantFPSplatPow2ToLog2Int(&UndefElements, FloatBits + 1); 8948 if (C == -1 || C == 0 || C > FloatBits) 8949 return SDValue(); 8950 8951 MVT ResTy; 8952 unsigned NumLanes = Op.getValueType().getVectorNumElements(); 8953 switch (NumLanes) { 8954 default: 8955 return SDValue(); 8956 case 2: 8957 ResTy = FloatBits == 32 ? MVT::v2i32 : MVT::v2i64; 8958 break; 8959 case 4: 8960 ResTy = FloatBits == 32 ? MVT::v4i32 : MVT::v4i64; 8961 break; 8962 } 8963 8964 if (ResTy == MVT::v4i64 && DCI.isBeforeLegalizeOps()) 8965 return SDValue(); 8966 8967 SDLoc DL(N); 8968 SDValue ConvInput = Op.getOperand(0); 8969 bool IsSigned = Opc == ISD::SINT_TO_FP; 8970 if (IntBits < FloatBits) 8971 ConvInput = DAG.getNode(IsSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND, DL, 8972 ResTy, ConvInput); 8973 8974 unsigned IntrinsicOpcode = IsSigned ? Intrinsic::aarch64_neon_vcvtfxs2fp 8975 : Intrinsic::aarch64_neon_vcvtfxu2fp; 8976 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, Op.getValueType(), 8977 DAG.getConstant(IntrinsicOpcode, DL, MVT::i32), ConvInput, 8978 DAG.getConstant(C, DL, MVT::i32)); 8979 } 8980 8981 /// An EXTR instruction is made up of two shifts, ORed together. This helper 8982 /// searches for and classifies those shifts. 8983 static bool findEXTRHalf(SDValue N, SDValue &Src, uint32_t &ShiftAmount, 8984 bool &FromHi) { 8985 if (N.getOpcode() == ISD::SHL) 8986 FromHi = false; 8987 else if (N.getOpcode() == ISD::SRL) 8988 FromHi = true; 8989 else 8990 return false; 8991 8992 if (!isa<ConstantSDNode>(N.getOperand(1))) 8993 return false; 8994 8995 ShiftAmount = N->getConstantOperandVal(1); 8996 Src = N->getOperand(0); 8997 return true; 8998 } 8999 9000 /// EXTR instruction extracts a contiguous chunk of bits from two existing 9001 /// registers viewed as a high/low pair. This function looks for the pattern: 9002 /// <tt>(or (shl VAL1, \#N), (srl VAL2, \#RegWidth-N))</tt> and replaces it 9003 /// with an EXTR. Can't quite be done in TableGen because the two immediates 9004 /// aren't independent. 9005 static SDValue tryCombineToEXTR(SDNode *N, 9006 TargetLowering::DAGCombinerInfo &DCI) { 9007 SelectionDAG &DAG = DCI.DAG; 9008 SDLoc DL(N); 9009 EVT VT = N->getValueType(0); 9010 9011 assert(N->getOpcode() == ISD::OR && "Unexpected root"); 9012 9013 if (VT != MVT::i32 && VT != MVT::i64) 9014 return SDValue(); 9015 9016 SDValue LHS; 9017 uint32_t ShiftLHS = 0; 9018 bool LHSFromHi = false; 9019 if (!findEXTRHalf(N->getOperand(0), LHS, ShiftLHS, LHSFromHi)) 9020 return SDValue(); 9021 9022 SDValue RHS; 9023 uint32_t ShiftRHS = 0; 9024 bool RHSFromHi = false; 9025 if (!findEXTRHalf(N->getOperand(1), RHS, ShiftRHS, RHSFromHi)) 9026 return SDValue(); 9027 9028 // If they're both trying to come from the high part of the register, they're 9029 // not really an EXTR. 9030 if (LHSFromHi == RHSFromHi) 9031 return SDValue(); 9032 9033 if (ShiftLHS + ShiftRHS != VT.getSizeInBits()) 9034 return SDValue(); 9035 9036 if (LHSFromHi) { 9037 std::swap(LHS, RHS); 9038 std::swap(ShiftLHS, ShiftRHS); 9039 } 9040 9041 return DAG.getNode(AArch64ISD::EXTR, DL, VT, LHS, RHS, 9042 DAG.getConstant(ShiftRHS, DL, MVT::i64)); 9043 } 9044 9045 static SDValue tryCombineToBSL(SDNode *N, 9046 TargetLowering::DAGCombinerInfo &DCI) { 9047 EVT VT = N->getValueType(0); 9048 SelectionDAG &DAG = DCI.DAG; 9049 SDLoc DL(N); 9050 9051 if (!VT.isVector()) 9052 return SDValue(); 9053 9054 SDValue N0 = N->getOperand(0); 9055 if (N0.getOpcode() != ISD::AND) 9056 return SDValue(); 9057 9058 SDValue N1 = N->getOperand(1); 9059 if (N1.getOpcode() != ISD::AND) 9060 return SDValue(); 9061 9062 // We only have to look for constant vectors here since the general, variable 9063 // case can be handled in TableGen. 9064 unsigned Bits = VT.getScalarSizeInBits(); 9065 uint64_t BitMask = Bits == 64 ? -1ULL : ((1ULL << Bits) - 1); 9066 for (int i = 1; i >= 0; --i) 9067 for (int j = 1; j >= 0; --j) { 9068 BuildVectorSDNode *BVN0 = dyn_cast<BuildVectorSDNode>(N0->getOperand(i)); 9069 BuildVectorSDNode *BVN1 = dyn_cast<BuildVectorSDNode>(N1->getOperand(j)); 9070 if (!BVN0 || !BVN1) 9071 continue; 9072 9073 bool FoundMatch = true; 9074 for (unsigned k = 0; k < VT.getVectorNumElements(); ++k) { 9075 ConstantSDNode *CN0 = dyn_cast<ConstantSDNode>(BVN0->getOperand(k)); 9076 ConstantSDNode *CN1 = dyn_cast<ConstantSDNode>(BVN1->getOperand(k)); 9077 if (!CN0 || !CN1 || 9078 CN0->getZExtValue() != (BitMask & ~CN1->getZExtValue())) { 9079 FoundMatch = false; 9080 break; 9081 } 9082 } 9083 9084 if (FoundMatch) 9085 return DAG.getNode(AArch64ISD::BSL, DL, VT, SDValue(BVN0, 0), 9086 N0->getOperand(1 - i), N1->getOperand(1 - j)); 9087 } 9088 9089 return SDValue(); 9090 } 9091 9092 static SDValue performORCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, 9093 const AArch64Subtarget *Subtarget) { 9094 // Attempt to form an EXTR from (or (shl VAL1, #N), (srl VAL2, #RegWidth-N)) 9095 SelectionDAG &DAG = DCI.DAG; 9096 EVT VT = N->getValueType(0); 9097 9098 if (!DAG.getTargetLoweringInfo().isTypeLegal(VT)) 9099 return SDValue(); 9100 9101 if (SDValue Res = tryCombineToEXTR(N, DCI)) 9102 return Res; 9103 9104 if (SDValue Res = tryCombineToBSL(N, DCI)) 9105 return Res; 9106 9107 return SDValue(); 9108 } 9109 9110 static SDValue performSRLCombine(SDNode *N, 9111 TargetLowering::DAGCombinerInfo &DCI) { 9112 SelectionDAG &DAG = DCI.DAG; 9113 EVT VT = N->getValueType(0); 9114 if (VT != MVT::i32 && VT != MVT::i64) 9115 return SDValue(); 9116 9117 // Canonicalize (srl (bswap i32 x), 16) to (rotr (bswap i32 x), 16), if the 9118 // high 16-bits of x are zero. Similarly, canonicalize (srl (bswap i64 x), 32) 9119 // to (rotr (bswap i64 x), 32), if the high 32-bits of x are zero. 9120 SDValue N0 = N->getOperand(0); 9121 if (N0.getOpcode() == ISD::BSWAP) { 9122 SDLoc DL(N); 9123 SDValue N1 = N->getOperand(1); 9124 SDValue N00 = N0.getOperand(0); 9125 if (ConstantSDNode *C = dyn_cast<ConstantSDNode>(N1)) { 9126 uint64_t ShiftAmt = C->getZExtValue(); 9127 if (VT == MVT::i32 && ShiftAmt == 16 && 9128 DAG.MaskedValueIsZero(N00, APInt::getHighBitsSet(32, 16))) 9129 return DAG.getNode(ISD::ROTR, DL, VT, N0, N1); 9130 if (VT == MVT::i64 && ShiftAmt == 32 && 9131 DAG.MaskedValueIsZero(N00, APInt::getHighBitsSet(64, 32))) 9132 return DAG.getNode(ISD::ROTR, DL, VT, N0, N1); 9133 } 9134 } 9135 return SDValue(); 9136 } 9137 9138 static SDValue performBitcastCombine(SDNode *N, 9139 TargetLowering::DAGCombinerInfo &DCI, 9140 SelectionDAG &DAG) { 9141 // Wait 'til after everything is legalized to try this. That way we have 9142 // legal vector types and such. 9143 if (DCI.isBeforeLegalizeOps()) 9144 return SDValue(); 9145 9146 // Remove extraneous bitcasts around an extract_subvector. 9147 // For example, 9148 // (v4i16 (bitconvert 9149 // (extract_subvector (v2i64 (bitconvert (v8i16 ...)), (i64 1))))) 9150 // becomes 9151 // (extract_subvector ((v8i16 ...), (i64 4))) 9152 9153 // Only interested in 64-bit vectors as the ultimate result. 9154 EVT VT = N->getValueType(0); 9155 if (!VT.isVector()) 9156 return SDValue(); 9157 if (VT.getSimpleVT().getSizeInBits() != 64) 9158 return SDValue(); 9159 // Is the operand an extract_subvector starting at the beginning or halfway 9160 // point of the vector? A low half may also come through as an 9161 // EXTRACT_SUBREG, so look for that, too. 9162 SDValue Op0 = N->getOperand(0); 9163 if (Op0->getOpcode() != ISD::EXTRACT_SUBVECTOR && 9164 !(Op0->isMachineOpcode() && 9165 Op0->getMachineOpcode() == AArch64::EXTRACT_SUBREG)) 9166 return SDValue(); 9167 uint64_t idx = cast<ConstantSDNode>(Op0->getOperand(1))->getZExtValue(); 9168 if (Op0->getOpcode() == ISD::EXTRACT_SUBVECTOR) { 9169 if (Op0->getValueType(0).getVectorNumElements() != idx && idx != 0) 9170 return SDValue(); 9171 } else if (Op0->getMachineOpcode() == AArch64::EXTRACT_SUBREG) { 9172 if (idx != AArch64::dsub) 9173 return SDValue(); 9174 // The dsub reference is equivalent to a lane zero subvector reference. 9175 idx = 0; 9176 } 9177 // Look through the bitcast of the input to the extract. 9178 if (Op0->getOperand(0)->getOpcode() != ISD::BITCAST) 9179 return SDValue(); 9180 SDValue Source = Op0->getOperand(0)->getOperand(0); 9181 // If the source type has twice the number of elements as our destination 9182 // type, we know this is an extract of the high or low half of the vector. 9183 EVT SVT = Source->getValueType(0); 9184 if (!SVT.isVector() || 9185 SVT.getVectorNumElements() != VT.getVectorNumElements() * 2) 9186 return SDValue(); 9187 9188 LLVM_DEBUG( 9189 dbgs() << "aarch64-lower: bitcast extract_subvector simplification\n"); 9190 9191 // Create the simplified form to just extract the low or high half of the 9192 // vector directly rather than bothering with the bitcasts. 9193 SDLoc dl(N); 9194 unsigned NumElements = VT.getVectorNumElements(); 9195 if (idx) { 9196 SDValue HalfIdx = DAG.getConstant(NumElements, dl, MVT::i64); 9197 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, VT, Source, HalfIdx); 9198 } else { 9199 SDValue SubReg = DAG.getTargetConstant(AArch64::dsub, dl, MVT::i32); 9200 return SDValue(DAG.getMachineNode(TargetOpcode::EXTRACT_SUBREG, dl, VT, 9201 Source, SubReg), 9202 0); 9203 } 9204 } 9205 9206 static SDValue performConcatVectorsCombine(SDNode *N, 9207 TargetLowering::DAGCombinerInfo &DCI, 9208 SelectionDAG &DAG) { 9209 SDLoc dl(N); 9210 EVT VT = N->getValueType(0); 9211 SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); 9212 9213 // Optimize concat_vectors of truncated vectors, where the intermediate 9214 // type is illegal, to avoid said illegality, e.g., 9215 // (v4i16 (concat_vectors (v2i16 (truncate (v2i64))), 9216 // (v2i16 (truncate (v2i64))))) 9217 // -> 9218 // (v4i16 (truncate (vector_shuffle (v4i32 (bitcast (v2i64))), 9219 // (v4i32 (bitcast (v2i64))), 9220 // <0, 2, 4, 6>))) 9221 // This isn't really target-specific, but ISD::TRUNCATE legality isn't keyed 9222 // on both input and result type, so we might generate worse code. 9223 // On AArch64 we know it's fine for v2i64->v4i16 and v4i32->v8i8. 9224 if (N->getNumOperands() == 2 && 9225 N0->getOpcode() == ISD::TRUNCATE && 9226 N1->getOpcode() == ISD::TRUNCATE) { 9227 SDValue N00 = N0->getOperand(0); 9228 SDValue N10 = N1->getOperand(0); 9229 EVT N00VT = N00.getValueType(); 9230 9231 if (N00VT == N10.getValueType() && 9232 (N00VT == MVT::v2i64 || N00VT == MVT::v4i32) && 9233 N00VT.getScalarSizeInBits() == 4 * VT.getScalarSizeInBits()) { 9234 MVT MidVT = (N00VT == MVT::v2i64 ? MVT::v4i32 : MVT::v8i16); 9235 SmallVector<int, 8> Mask(MidVT.getVectorNumElements()); 9236 for (size_t i = 0; i < Mask.size(); ++i) 9237 Mask[i] = i * 2; 9238 return DAG.getNode(ISD::TRUNCATE, dl, VT, 9239 DAG.getVectorShuffle( 9240 MidVT, dl, 9241 DAG.getNode(ISD::BITCAST, dl, MidVT, N00), 9242 DAG.getNode(ISD::BITCAST, dl, MidVT, N10), Mask)); 9243 } 9244 } 9245 9246 // Wait 'til after everything is legalized to try this. That way we have 9247 // legal vector types and such. 9248 if (DCI.isBeforeLegalizeOps()) 9249 return SDValue(); 9250 9251 // If we see a (concat_vectors (v1x64 A), (v1x64 A)) it's really a vector 9252 // splat. The indexed instructions are going to be expecting a DUPLANE64, so 9253 // canonicalise to that. 9254 if (N0 == N1 && VT.getVectorNumElements() == 2) { 9255 assert(VT.getScalarSizeInBits() == 64); 9256 return DAG.getNode(AArch64ISD::DUPLANE64, dl, VT, WidenVector(N0, DAG), 9257 DAG.getConstant(0, dl, MVT::i64)); 9258 } 9259 9260 // Canonicalise concat_vectors so that the right-hand vector has as few 9261 // bit-casts as possible before its real operation. The primary matching 9262 // destination for these operations will be the narrowing "2" instructions, 9263 // which depend on the operation being performed on this right-hand vector. 9264 // For example, 9265 // (concat_vectors LHS, (v1i64 (bitconvert (v4i16 RHS)))) 9266 // becomes 9267 // (bitconvert (concat_vectors (v4i16 (bitconvert LHS)), RHS)) 9268 9269 if (N1->getOpcode() != ISD::BITCAST) 9270 return SDValue(); 9271 SDValue RHS = N1->getOperand(0); 9272 MVT RHSTy = RHS.getValueType().getSimpleVT(); 9273 // If the RHS is not a vector, this is not the pattern we're looking for. 9274 if (!RHSTy.isVector()) 9275 return SDValue(); 9276 9277 LLVM_DEBUG( 9278 dbgs() << "aarch64-lower: concat_vectors bitcast simplification\n"); 9279 9280 MVT ConcatTy = MVT::getVectorVT(RHSTy.getVectorElementType(), 9281 RHSTy.getVectorNumElements() * 2); 9282 return DAG.getNode(ISD::BITCAST, dl, VT, 9283 DAG.getNode(ISD::CONCAT_VECTORS, dl, ConcatTy, 9284 DAG.getNode(ISD::BITCAST, dl, RHSTy, N0), 9285 RHS)); 9286 } 9287 9288 static SDValue tryCombineFixedPointConvert(SDNode *N, 9289 TargetLowering::DAGCombinerInfo &DCI, 9290 SelectionDAG &DAG) { 9291 // Wait until after everything is legalized to try this. That way we have 9292 // legal vector types and such. 9293 if (DCI.isBeforeLegalizeOps()) 9294 return SDValue(); 9295 // Transform a scalar conversion of a value from a lane extract into a 9296 // lane extract of a vector conversion. E.g., from foo1 to foo2: 9297 // double foo1(int64x2_t a) { return vcvtd_n_f64_s64(a[1], 9); } 9298 // double foo2(int64x2_t a) { return vcvtq_n_f64_s64(a, 9)[1]; } 9299 // 9300 // The second form interacts better with instruction selection and the 9301 // register allocator to avoid cross-class register copies that aren't 9302 // coalescable due to a lane reference. 9303 9304 // Check the operand and see if it originates from a lane extract. 9305 SDValue Op1 = N->getOperand(1); 9306 if (Op1.getOpcode() == ISD::EXTRACT_VECTOR_ELT) { 9307 // Yep, no additional predication needed. Perform the transform. 9308 SDValue IID = N->getOperand(0); 9309 SDValue Shift = N->getOperand(2); 9310 SDValue Vec = Op1.getOperand(0); 9311 SDValue Lane = Op1.getOperand(1); 9312 EVT ResTy = N->getValueType(0); 9313 EVT VecResTy; 9314 SDLoc DL(N); 9315 9316 // The vector width should be 128 bits by the time we get here, even 9317 // if it started as 64 bits (the extract_vector handling will have 9318 // done so). 9319 assert(Vec.getValueSizeInBits() == 128 && 9320 "unexpected vector size on extract_vector_elt!"); 9321 if (Vec.getValueType() == MVT::v4i32) 9322 VecResTy = MVT::v4f32; 9323 else if (Vec.getValueType() == MVT::v2i64) 9324 VecResTy = MVT::v2f64; 9325 else 9326 llvm_unreachable("unexpected vector type!"); 9327 9328 SDValue Convert = 9329 DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, VecResTy, IID, Vec, Shift); 9330 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResTy, Convert, Lane); 9331 } 9332 return SDValue(); 9333 } 9334 9335 // AArch64 high-vector "long" operations are formed by performing the non-high 9336 // version on an extract_subvector of each operand which gets the high half: 9337 // 9338 // (longop2 LHS, RHS) == (longop (extract_high LHS), (extract_high RHS)) 9339 // 9340 // However, there are cases which don't have an extract_high explicitly, but 9341 // have another operation that can be made compatible with one for free. For 9342 // example: 9343 // 9344 // (dupv64 scalar) --> (extract_high (dup128 scalar)) 9345 // 9346 // This routine does the actual conversion of such DUPs, once outer routines 9347 // have determined that everything else is in order. 9348 // It also supports immediate DUP-like nodes (MOVI/MVNi), which we can fold 9349 // similarly here. 9350 static SDValue tryExtendDUPToExtractHigh(SDValue N, SelectionDAG &DAG) { 9351 switch (N.getOpcode()) { 9352 case AArch64ISD::DUP: 9353 case AArch64ISD::DUPLANE8: 9354 case AArch64ISD::DUPLANE16: 9355 case AArch64ISD::DUPLANE32: 9356 case AArch64ISD::DUPLANE64: 9357 case AArch64ISD::MOVI: 9358 case AArch64ISD::MOVIshift: 9359 case AArch64ISD::MOVIedit: 9360 case AArch64ISD::MOVImsl: 9361 case AArch64ISD::MVNIshift: 9362 case AArch64ISD::MVNImsl: 9363 break; 9364 default: 9365 // FMOV could be supported, but isn't very useful, as it would only occur 9366 // if you passed a bitcast' floating point immediate to an eligible long 9367 // integer op (addl, smull, ...). 9368 return SDValue(); 9369 } 9370 9371 MVT NarrowTy = N.getSimpleValueType(); 9372 if (!NarrowTy.is64BitVector()) 9373 return SDValue(); 9374 9375 MVT ElementTy = NarrowTy.getVectorElementType(); 9376 unsigned NumElems = NarrowTy.getVectorNumElements(); 9377 MVT NewVT = MVT::getVectorVT(ElementTy, NumElems * 2); 9378 9379 SDLoc dl(N); 9380 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, dl, NarrowTy, 9381 DAG.getNode(N->getOpcode(), dl, NewVT, N->ops()), 9382 DAG.getConstant(NumElems, dl, MVT::i64)); 9383 } 9384 9385 static bool isEssentiallyExtractSubvector(SDValue N) { 9386 if (N.getOpcode() == ISD::EXTRACT_SUBVECTOR) 9387 return true; 9388 9389 return N.getOpcode() == ISD::BITCAST && 9390 N.getOperand(0).getOpcode() == ISD::EXTRACT_SUBVECTOR; 9391 } 9392 9393 /// Helper structure to keep track of ISD::SET_CC operands. 9394 struct GenericSetCCInfo { 9395 const SDValue *Opnd0; 9396 const SDValue *Opnd1; 9397 ISD::CondCode CC; 9398 }; 9399 9400 /// Helper structure to keep track of a SET_CC lowered into AArch64 code. 9401 struct AArch64SetCCInfo { 9402 const SDValue *Cmp; 9403 AArch64CC::CondCode CC; 9404 }; 9405 9406 /// Helper structure to keep track of SetCC information. 9407 union SetCCInfo { 9408 GenericSetCCInfo Generic; 9409 AArch64SetCCInfo AArch64; 9410 }; 9411 9412 /// Helper structure to be able to read SetCC information. If set to 9413 /// true, IsAArch64 field, Info is a AArch64SetCCInfo, otherwise Info is a 9414 /// GenericSetCCInfo. 9415 struct SetCCInfoAndKind { 9416 SetCCInfo Info; 9417 bool IsAArch64; 9418 }; 9419 9420 /// Check whether or not \p Op is a SET_CC operation, either a generic or 9421 /// an 9422 /// AArch64 lowered one. 9423 /// \p SetCCInfo is filled accordingly. 9424 /// \post SetCCInfo is meanginfull only when this function returns true. 9425 /// \return True when Op is a kind of SET_CC operation. 9426 static bool isSetCC(SDValue Op, SetCCInfoAndKind &SetCCInfo) { 9427 // If this is a setcc, this is straight forward. 9428 if (Op.getOpcode() == ISD::SETCC) { 9429 SetCCInfo.Info.Generic.Opnd0 = &Op.getOperand(0); 9430 SetCCInfo.Info.Generic.Opnd1 = &Op.getOperand(1); 9431 SetCCInfo.Info.Generic.CC = cast<CondCodeSDNode>(Op.getOperand(2))->get(); 9432 SetCCInfo.IsAArch64 = false; 9433 return true; 9434 } 9435 // Otherwise, check if this is a matching csel instruction. 9436 // In other words: 9437 // - csel 1, 0, cc 9438 // - csel 0, 1, !cc 9439 if (Op.getOpcode() != AArch64ISD::CSEL) 9440 return false; 9441 // Set the information about the operands. 9442 // TODO: we want the operands of the Cmp not the csel 9443 SetCCInfo.Info.AArch64.Cmp = &Op.getOperand(3); 9444 SetCCInfo.IsAArch64 = true; 9445 SetCCInfo.Info.AArch64.CC = static_cast<AArch64CC::CondCode>( 9446 cast<ConstantSDNode>(Op.getOperand(2))->getZExtValue()); 9447 9448 // Check that the operands matches the constraints: 9449 // (1) Both operands must be constants. 9450 // (2) One must be 1 and the other must be 0. 9451 ConstantSDNode *TValue = dyn_cast<ConstantSDNode>(Op.getOperand(0)); 9452 ConstantSDNode *FValue = dyn_cast<ConstantSDNode>(Op.getOperand(1)); 9453 9454 // Check (1). 9455 if (!TValue || !FValue) 9456 return false; 9457 9458 // Check (2). 9459 if (!TValue->isOne()) { 9460 // Update the comparison when we are interested in !cc. 9461 std::swap(TValue, FValue); 9462 SetCCInfo.Info.AArch64.CC = 9463 AArch64CC::getInvertedCondCode(SetCCInfo.Info.AArch64.CC); 9464 } 9465 return TValue->isOne() && FValue->isNullValue(); 9466 } 9467 9468 // Returns true if Op is setcc or zext of setcc. 9469 static bool isSetCCOrZExtSetCC(const SDValue& Op, SetCCInfoAndKind &Info) { 9470 if (isSetCC(Op, Info)) 9471 return true; 9472 return ((Op.getOpcode() == ISD::ZERO_EXTEND) && 9473 isSetCC(Op->getOperand(0), Info)); 9474 } 9475 9476 // The folding we want to perform is: 9477 // (add x, [zext] (setcc cc ...) ) 9478 // --> 9479 // (csel x, (add x, 1), !cc ...) 9480 // 9481 // The latter will get matched to a CSINC instruction. 9482 static SDValue performSetccAddFolding(SDNode *Op, SelectionDAG &DAG) { 9483 assert(Op && Op->getOpcode() == ISD::ADD && "Unexpected operation!"); 9484 SDValue LHS = Op->getOperand(0); 9485 SDValue RHS = Op->getOperand(1); 9486 SetCCInfoAndKind InfoAndKind; 9487 9488 // If neither operand is a SET_CC, give up. 9489 if (!isSetCCOrZExtSetCC(LHS, InfoAndKind)) { 9490 std::swap(LHS, RHS); 9491 if (!isSetCCOrZExtSetCC(LHS, InfoAndKind)) 9492 return SDValue(); 9493 } 9494 9495 // FIXME: This could be generatized to work for FP comparisons. 9496 EVT CmpVT = InfoAndKind.IsAArch64 9497 ? InfoAndKind.Info.AArch64.Cmp->getOperand(0).getValueType() 9498 : InfoAndKind.Info.Generic.Opnd0->getValueType(); 9499 if (CmpVT != MVT::i32 && CmpVT != MVT::i64) 9500 return SDValue(); 9501 9502 SDValue CCVal; 9503 SDValue Cmp; 9504 SDLoc dl(Op); 9505 if (InfoAndKind.IsAArch64) { 9506 CCVal = DAG.getConstant( 9507 AArch64CC::getInvertedCondCode(InfoAndKind.Info.AArch64.CC), dl, 9508 MVT::i32); 9509 Cmp = *InfoAndKind.Info.AArch64.Cmp; 9510 } else 9511 Cmp = getAArch64Cmp(*InfoAndKind.Info.Generic.Opnd0, 9512 *InfoAndKind.Info.Generic.Opnd1, 9513 ISD::getSetCCInverse(InfoAndKind.Info.Generic.CC, true), 9514 CCVal, DAG, dl); 9515 9516 EVT VT = Op->getValueType(0); 9517 LHS = DAG.getNode(ISD::ADD, dl, VT, RHS, DAG.getConstant(1, dl, VT)); 9518 return DAG.getNode(AArch64ISD::CSEL, dl, VT, RHS, LHS, CCVal, Cmp); 9519 } 9520 9521 // The basic add/sub long vector instructions have variants with "2" on the end 9522 // which act on the high-half of their inputs. They are normally matched by 9523 // patterns like: 9524 // 9525 // (add (zeroext (extract_high LHS)), 9526 // (zeroext (extract_high RHS))) 9527 // -> uaddl2 vD, vN, vM 9528 // 9529 // However, if one of the extracts is something like a duplicate, this 9530 // instruction can still be used profitably. This function puts the DAG into a 9531 // more appropriate form for those patterns to trigger. 9532 static SDValue performAddSubLongCombine(SDNode *N, 9533 TargetLowering::DAGCombinerInfo &DCI, 9534 SelectionDAG &DAG) { 9535 if (DCI.isBeforeLegalizeOps()) 9536 return SDValue(); 9537 9538 MVT VT = N->getSimpleValueType(0); 9539 if (!VT.is128BitVector()) { 9540 if (N->getOpcode() == ISD::ADD) 9541 return performSetccAddFolding(N, DAG); 9542 return SDValue(); 9543 } 9544 9545 // Make sure both branches are extended in the same way. 9546 SDValue LHS = N->getOperand(0); 9547 SDValue RHS = N->getOperand(1); 9548 if ((LHS.getOpcode() != ISD::ZERO_EXTEND && 9549 LHS.getOpcode() != ISD::SIGN_EXTEND) || 9550 LHS.getOpcode() != RHS.getOpcode()) 9551 return SDValue(); 9552 9553 unsigned ExtType = LHS.getOpcode(); 9554 9555 // It's not worth doing if at least one of the inputs isn't already an 9556 // extract, but we don't know which it'll be so we have to try both. 9557 if (isEssentiallyExtractSubvector(LHS.getOperand(0))) { 9558 RHS = tryExtendDUPToExtractHigh(RHS.getOperand(0), DAG); 9559 if (!RHS.getNode()) 9560 return SDValue(); 9561 9562 RHS = DAG.getNode(ExtType, SDLoc(N), VT, RHS); 9563 } else if (isEssentiallyExtractSubvector(RHS.getOperand(0))) { 9564 LHS = tryExtendDUPToExtractHigh(LHS.getOperand(0), DAG); 9565 if (!LHS.getNode()) 9566 return SDValue(); 9567 9568 LHS = DAG.getNode(ExtType, SDLoc(N), VT, LHS); 9569 } 9570 9571 return DAG.getNode(N->getOpcode(), SDLoc(N), VT, LHS, RHS); 9572 } 9573 9574 // Massage DAGs which we can use the high-half "long" operations on into 9575 // something isel will recognize better. E.g. 9576 // 9577 // (aarch64_neon_umull (extract_high vec) (dupv64 scalar)) --> 9578 // (aarch64_neon_umull (extract_high (v2i64 vec))) 9579 // (extract_high (v2i64 (dup128 scalar))))) 9580 // 9581 static SDValue tryCombineLongOpWithDup(unsigned IID, SDNode *N, 9582 TargetLowering::DAGCombinerInfo &DCI, 9583 SelectionDAG &DAG) { 9584 if (DCI.isBeforeLegalizeOps()) 9585 return SDValue(); 9586 9587 SDValue LHS = N->getOperand(1); 9588 SDValue RHS = N->getOperand(2); 9589 assert(LHS.getValueType().is64BitVector() && 9590 RHS.getValueType().is64BitVector() && 9591 "unexpected shape for long operation"); 9592 9593 // Either node could be a DUP, but it's not worth doing both of them (you'd 9594 // just as well use the non-high version) so look for a corresponding extract 9595 // operation on the other "wing". 9596 if (isEssentiallyExtractSubvector(LHS)) { 9597 RHS = tryExtendDUPToExtractHigh(RHS, DAG); 9598 if (!RHS.getNode()) 9599 return SDValue(); 9600 } else if (isEssentiallyExtractSubvector(RHS)) { 9601 LHS = tryExtendDUPToExtractHigh(LHS, DAG); 9602 if (!LHS.getNode()) 9603 return SDValue(); 9604 } 9605 9606 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), N->getValueType(0), 9607 N->getOperand(0), LHS, RHS); 9608 } 9609 9610 static SDValue tryCombineShiftImm(unsigned IID, SDNode *N, SelectionDAG &DAG) { 9611 MVT ElemTy = N->getSimpleValueType(0).getScalarType(); 9612 unsigned ElemBits = ElemTy.getSizeInBits(); 9613 9614 int64_t ShiftAmount; 9615 if (BuildVectorSDNode *BVN = dyn_cast<BuildVectorSDNode>(N->getOperand(2))) { 9616 APInt SplatValue, SplatUndef; 9617 unsigned SplatBitSize; 9618 bool HasAnyUndefs; 9619 if (!BVN->isConstantSplat(SplatValue, SplatUndef, SplatBitSize, 9620 HasAnyUndefs, ElemBits) || 9621 SplatBitSize != ElemBits) 9622 return SDValue(); 9623 9624 ShiftAmount = SplatValue.getSExtValue(); 9625 } else if (ConstantSDNode *CVN = dyn_cast<ConstantSDNode>(N->getOperand(2))) { 9626 ShiftAmount = CVN->getSExtValue(); 9627 } else 9628 return SDValue(); 9629 9630 unsigned Opcode; 9631 bool IsRightShift; 9632 switch (IID) { 9633 default: 9634 llvm_unreachable("Unknown shift intrinsic"); 9635 case Intrinsic::aarch64_neon_sqshl: 9636 Opcode = AArch64ISD::SQSHL_I; 9637 IsRightShift = false; 9638 break; 9639 case Intrinsic::aarch64_neon_uqshl: 9640 Opcode = AArch64ISD::UQSHL_I; 9641 IsRightShift = false; 9642 break; 9643 case Intrinsic::aarch64_neon_srshl: 9644 Opcode = AArch64ISD::SRSHR_I; 9645 IsRightShift = true; 9646 break; 9647 case Intrinsic::aarch64_neon_urshl: 9648 Opcode = AArch64ISD::URSHR_I; 9649 IsRightShift = true; 9650 break; 9651 case Intrinsic::aarch64_neon_sqshlu: 9652 Opcode = AArch64ISD::SQSHLU_I; 9653 IsRightShift = false; 9654 break; 9655 } 9656 9657 if (IsRightShift && ShiftAmount <= -1 && ShiftAmount >= -(int)ElemBits) { 9658 SDLoc dl(N); 9659 return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1), 9660 DAG.getConstant(-ShiftAmount, dl, MVT::i32)); 9661 } else if (!IsRightShift && ShiftAmount >= 0 && ShiftAmount < ElemBits) { 9662 SDLoc dl(N); 9663 return DAG.getNode(Opcode, dl, N->getValueType(0), N->getOperand(1), 9664 DAG.getConstant(ShiftAmount, dl, MVT::i32)); 9665 } 9666 9667 return SDValue(); 9668 } 9669 9670 // The CRC32[BH] instructions ignore the high bits of their data operand. Since 9671 // the intrinsics must be legal and take an i32, this means there's almost 9672 // certainly going to be a zext in the DAG which we can eliminate. 9673 static SDValue tryCombineCRC32(unsigned Mask, SDNode *N, SelectionDAG &DAG) { 9674 SDValue AndN = N->getOperand(2); 9675 if (AndN.getOpcode() != ISD::AND) 9676 return SDValue(); 9677 9678 ConstantSDNode *CMask = dyn_cast<ConstantSDNode>(AndN.getOperand(1)); 9679 if (!CMask || CMask->getZExtValue() != Mask) 9680 return SDValue(); 9681 9682 return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, SDLoc(N), MVT::i32, 9683 N->getOperand(0), N->getOperand(1), AndN.getOperand(0)); 9684 } 9685 9686 static SDValue combineAcrossLanesIntrinsic(unsigned Opc, SDNode *N, 9687 SelectionDAG &DAG) { 9688 SDLoc dl(N); 9689 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl, N->getValueType(0), 9690 DAG.getNode(Opc, dl, 9691 N->getOperand(1).getSimpleValueType(), 9692 N->getOperand(1)), 9693 DAG.getConstant(0, dl, MVT::i64)); 9694 } 9695 9696 static SDValue performIntrinsicCombine(SDNode *N, 9697 TargetLowering::DAGCombinerInfo &DCI, 9698 const AArch64Subtarget *Subtarget) { 9699 SelectionDAG &DAG = DCI.DAG; 9700 unsigned IID = getIntrinsicID(N); 9701 switch (IID) { 9702 default: 9703 break; 9704 case Intrinsic::aarch64_neon_vcvtfxs2fp: 9705 case Intrinsic::aarch64_neon_vcvtfxu2fp: 9706 return tryCombineFixedPointConvert(N, DCI, DAG); 9707 case Intrinsic::aarch64_neon_saddv: 9708 return combineAcrossLanesIntrinsic(AArch64ISD::SADDV, N, DAG); 9709 case Intrinsic::aarch64_neon_uaddv: 9710 return combineAcrossLanesIntrinsic(AArch64ISD::UADDV, N, DAG); 9711 case Intrinsic::aarch64_neon_sminv: 9712 return combineAcrossLanesIntrinsic(AArch64ISD::SMINV, N, DAG); 9713 case Intrinsic::aarch64_neon_uminv: 9714 return combineAcrossLanesIntrinsic(AArch64ISD::UMINV, N, DAG); 9715 case Intrinsic::aarch64_neon_smaxv: 9716 return combineAcrossLanesIntrinsic(AArch64ISD::SMAXV, N, DAG); 9717 case Intrinsic::aarch64_neon_umaxv: 9718 return combineAcrossLanesIntrinsic(AArch64ISD::UMAXV, N, DAG); 9719 case Intrinsic::aarch64_neon_fmax: 9720 return DAG.getNode(ISD::FMAXNAN, SDLoc(N), N->getValueType(0), 9721 N->getOperand(1), N->getOperand(2)); 9722 case Intrinsic::aarch64_neon_fmin: 9723 return DAG.getNode(ISD::FMINNAN, SDLoc(N), N->getValueType(0), 9724 N->getOperand(1), N->getOperand(2)); 9725 case Intrinsic::aarch64_neon_fmaxnm: 9726 return DAG.getNode(ISD::FMAXNUM, SDLoc(N), N->getValueType(0), 9727 N->getOperand(1), N->getOperand(2)); 9728 case Intrinsic::aarch64_neon_fminnm: 9729 return DAG.getNode(ISD::FMINNUM, SDLoc(N), N->getValueType(0), 9730 N->getOperand(1), N->getOperand(2)); 9731 case Intrinsic::aarch64_neon_smull: 9732 case Intrinsic::aarch64_neon_umull: 9733 case Intrinsic::aarch64_neon_pmull: 9734 case Intrinsic::aarch64_neon_sqdmull: 9735 return tryCombineLongOpWithDup(IID, N, DCI, DAG); 9736 case Intrinsic::aarch64_neon_sqshl: 9737 case Intrinsic::aarch64_neon_uqshl: 9738 case Intrinsic::aarch64_neon_sqshlu: 9739 case Intrinsic::aarch64_neon_srshl: 9740 case Intrinsic::aarch64_neon_urshl: 9741 return tryCombineShiftImm(IID, N, DAG); 9742 case Intrinsic::aarch64_crc32b: 9743 case Intrinsic::aarch64_crc32cb: 9744 return tryCombineCRC32(0xff, N, DAG); 9745 case Intrinsic::aarch64_crc32h: 9746 case Intrinsic::aarch64_crc32ch: 9747 return tryCombineCRC32(0xffff, N, DAG); 9748 } 9749 return SDValue(); 9750 } 9751 9752 static SDValue performExtendCombine(SDNode *N, 9753 TargetLowering::DAGCombinerInfo &DCI, 9754 SelectionDAG &DAG) { 9755 // If we see something like (zext (sabd (extract_high ...), (DUP ...))) then 9756 // we can convert that DUP into another extract_high (of a bigger DUP), which 9757 // helps the backend to decide that an sabdl2 would be useful, saving a real 9758 // extract_high operation. 9759 if (!DCI.isBeforeLegalizeOps() && N->getOpcode() == ISD::ZERO_EXTEND && 9760 N->getOperand(0).getOpcode() == ISD::INTRINSIC_WO_CHAIN) { 9761 SDNode *ABDNode = N->getOperand(0).getNode(); 9762 unsigned IID = getIntrinsicID(ABDNode); 9763 if (IID == Intrinsic::aarch64_neon_sabd || 9764 IID == Intrinsic::aarch64_neon_uabd) { 9765 SDValue NewABD = tryCombineLongOpWithDup(IID, ABDNode, DCI, DAG); 9766 if (!NewABD.getNode()) 9767 return SDValue(); 9768 9769 return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), N->getValueType(0), 9770 NewABD); 9771 } 9772 } 9773 9774 // This is effectively a custom type legalization for AArch64. 9775 // 9776 // Type legalization will split an extend of a small, legal, type to a larger 9777 // illegal type by first splitting the destination type, often creating 9778 // illegal source types, which then get legalized in isel-confusing ways, 9779 // leading to really terrible codegen. E.g., 9780 // %result = v8i32 sext v8i8 %value 9781 // becomes 9782 // %losrc = extract_subreg %value, ... 9783 // %hisrc = extract_subreg %value, ... 9784 // %lo = v4i32 sext v4i8 %losrc 9785 // %hi = v4i32 sext v4i8 %hisrc 9786 // Things go rapidly downhill from there. 9787 // 9788 // For AArch64, the [sz]ext vector instructions can only go up one element 9789 // size, so we can, e.g., extend from i8 to i16, but to go from i8 to i32 9790 // take two instructions. 9791 // 9792 // This implies that the most efficient way to do the extend from v8i8 9793 // to two v4i32 values is to first extend the v8i8 to v8i16, then do 9794 // the normal splitting to happen for the v8i16->v8i32. 9795 9796 // This is pre-legalization to catch some cases where the default 9797 // type legalization will create ill-tempered code. 9798 if (!DCI.isBeforeLegalizeOps()) 9799 return SDValue(); 9800 9801 // We're only interested in cleaning things up for non-legal vector types 9802 // here. If both the source and destination are legal, things will just 9803 // work naturally without any fiddling. 9804 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 9805 EVT ResVT = N->getValueType(0); 9806 if (!ResVT.isVector() || TLI.isTypeLegal(ResVT)) 9807 return SDValue(); 9808 // If the vector type isn't a simple VT, it's beyond the scope of what 9809 // we're worried about here. Let legalization do its thing and hope for 9810 // the best. 9811 SDValue Src = N->getOperand(0); 9812 EVT SrcVT = Src->getValueType(0); 9813 if (!ResVT.isSimple() || !SrcVT.isSimple()) 9814 return SDValue(); 9815 9816 // If the source VT is a 64-bit vector, we can play games and get the 9817 // better results we want. 9818 if (SrcVT.getSizeInBits() != 64) 9819 return SDValue(); 9820 9821 unsigned SrcEltSize = SrcVT.getScalarSizeInBits(); 9822 unsigned ElementCount = SrcVT.getVectorNumElements(); 9823 SrcVT = MVT::getVectorVT(MVT::getIntegerVT(SrcEltSize * 2), ElementCount); 9824 SDLoc DL(N); 9825 Src = DAG.getNode(N->getOpcode(), DL, SrcVT, Src); 9826 9827 // Now split the rest of the operation into two halves, each with a 64 9828 // bit source. 9829 EVT LoVT, HiVT; 9830 SDValue Lo, Hi; 9831 unsigned NumElements = ResVT.getVectorNumElements(); 9832 assert(!(NumElements & 1) && "Splitting vector, but not in half!"); 9833 LoVT = HiVT = EVT::getVectorVT(*DAG.getContext(), 9834 ResVT.getVectorElementType(), NumElements / 2); 9835 9836 EVT InNVT = EVT::getVectorVT(*DAG.getContext(), SrcVT.getVectorElementType(), 9837 LoVT.getVectorNumElements()); 9838 Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, Src, 9839 DAG.getConstant(0, DL, MVT::i64)); 9840 Hi = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InNVT, Src, 9841 DAG.getConstant(InNVT.getVectorNumElements(), DL, MVT::i64)); 9842 Lo = DAG.getNode(N->getOpcode(), DL, LoVT, Lo); 9843 Hi = DAG.getNode(N->getOpcode(), DL, HiVT, Hi); 9844 9845 // Now combine the parts back together so we still have a single result 9846 // like the combiner expects. 9847 return DAG.getNode(ISD::CONCAT_VECTORS, DL, ResVT, Lo, Hi); 9848 } 9849 9850 static SDValue splitStoreSplat(SelectionDAG &DAG, StoreSDNode &St, 9851 SDValue SplatVal, unsigned NumVecElts) { 9852 unsigned OrigAlignment = St.getAlignment(); 9853 unsigned EltOffset = SplatVal.getValueType().getSizeInBits() / 8; 9854 9855 // Create scalar stores. This is at least as good as the code sequence for a 9856 // split unaligned store which is a dup.s, ext.b, and two stores. 9857 // Most of the time the three stores should be replaced by store pair 9858 // instructions (stp). 9859 SDLoc DL(&St); 9860 SDValue BasePtr = St.getBasePtr(); 9861 uint64_t BaseOffset = 0; 9862 9863 const MachinePointerInfo &PtrInfo = St.getPointerInfo(); 9864 SDValue NewST1 = 9865 DAG.getStore(St.getChain(), DL, SplatVal, BasePtr, PtrInfo, 9866 OrigAlignment, St.getMemOperand()->getFlags()); 9867 9868 // As this in ISel, we will not merge this add which may degrade results. 9869 if (BasePtr->getOpcode() == ISD::ADD && 9870 isa<ConstantSDNode>(BasePtr->getOperand(1))) { 9871 BaseOffset = cast<ConstantSDNode>(BasePtr->getOperand(1))->getSExtValue(); 9872 BasePtr = BasePtr->getOperand(0); 9873 } 9874 9875 unsigned Offset = EltOffset; 9876 while (--NumVecElts) { 9877 unsigned Alignment = MinAlign(OrigAlignment, Offset); 9878 SDValue OffsetPtr = 9879 DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, 9880 DAG.getConstant(BaseOffset + Offset, DL, MVT::i64)); 9881 NewST1 = DAG.getStore(NewST1.getValue(0), DL, SplatVal, OffsetPtr, 9882 PtrInfo.getWithOffset(Offset), Alignment, 9883 St.getMemOperand()->getFlags()); 9884 Offset += EltOffset; 9885 } 9886 return NewST1; 9887 } 9888 9889 /// Replace a splat of zeros to a vector store by scalar stores of WZR/XZR. The 9890 /// load store optimizer pass will merge them to store pair stores. This should 9891 /// be better than a movi to create the vector zero followed by a vector store 9892 /// if the zero constant is not re-used, since one instructions and one register 9893 /// live range will be removed. 9894 /// 9895 /// For example, the final generated code should be: 9896 /// 9897 /// stp xzr, xzr, [x0] 9898 /// 9899 /// instead of: 9900 /// 9901 /// movi v0.2d, #0 9902 /// str q0, [x0] 9903 /// 9904 static SDValue replaceZeroVectorStore(SelectionDAG &DAG, StoreSDNode &St) { 9905 SDValue StVal = St.getValue(); 9906 EVT VT = StVal.getValueType(); 9907 9908 // It is beneficial to scalarize a zero splat store for 2 or 3 i64 elements or 9909 // 2, 3 or 4 i32 elements. 9910 int NumVecElts = VT.getVectorNumElements(); 9911 if (!(((NumVecElts == 2 || NumVecElts == 3) && 9912 VT.getVectorElementType().getSizeInBits() == 64) || 9913 ((NumVecElts == 2 || NumVecElts == 3 || NumVecElts == 4) && 9914 VT.getVectorElementType().getSizeInBits() == 32))) 9915 return SDValue(); 9916 9917 if (StVal.getOpcode() != ISD::BUILD_VECTOR) 9918 return SDValue(); 9919 9920 // If the zero constant has more than one use then the vector store could be 9921 // better since the constant mov will be amortized and stp q instructions 9922 // should be able to be formed. 9923 if (!StVal.hasOneUse()) 9924 return SDValue(); 9925 9926 // If the immediate offset of the address operand is too large for the stp 9927 // instruction, then bail out. 9928 if (DAG.isBaseWithConstantOffset(St.getBasePtr())) { 9929 int64_t Offset = St.getBasePtr()->getConstantOperandVal(1); 9930 if (Offset < -512 || Offset > 504) 9931 return SDValue(); 9932 } 9933 9934 for (int I = 0; I < NumVecElts; ++I) { 9935 SDValue EltVal = StVal.getOperand(I); 9936 if (!isNullConstant(EltVal) && !isNullFPConstant(EltVal)) 9937 return SDValue(); 9938 } 9939 9940 // Use a CopyFromReg WZR/XZR here to prevent 9941 // DAGCombiner::MergeConsecutiveStores from undoing this transformation. 9942 SDLoc DL(&St); 9943 unsigned ZeroReg; 9944 EVT ZeroVT; 9945 if (VT.getVectorElementType().getSizeInBits() == 32) { 9946 ZeroReg = AArch64::WZR; 9947 ZeroVT = MVT::i32; 9948 } else { 9949 ZeroReg = AArch64::XZR; 9950 ZeroVT = MVT::i64; 9951 } 9952 SDValue SplatVal = 9953 DAG.getCopyFromReg(DAG.getEntryNode(), DL, ZeroReg, ZeroVT); 9954 return splitStoreSplat(DAG, St, SplatVal, NumVecElts); 9955 } 9956 9957 /// Replace a splat of a scalar to a vector store by scalar stores of the scalar 9958 /// value. The load store optimizer pass will merge them to store pair stores. 9959 /// This has better performance than a splat of the scalar followed by a split 9960 /// vector store. Even if the stores are not merged it is four stores vs a dup, 9961 /// followed by an ext.b and two stores. 9962 static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode &St) { 9963 SDValue StVal = St.getValue(); 9964 EVT VT = StVal.getValueType(); 9965 9966 // Don't replace floating point stores, they possibly won't be transformed to 9967 // stp because of the store pair suppress pass. 9968 if (VT.isFloatingPoint()) 9969 return SDValue(); 9970 9971 // We can express a splat as store pair(s) for 2 or 4 elements. 9972 unsigned NumVecElts = VT.getVectorNumElements(); 9973 if (NumVecElts != 4 && NumVecElts != 2) 9974 return SDValue(); 9975 9976 // Check that this is a splat. 9977 // Make sure that each of the relevant vector element locations are inserted 9978 // to, i.e. 0 and 1 for v2i64 and 0, 1, 2, 3 for v4i32. 9979 std::bitset<4> IndexNotInserted((1 << NumVecElts) - 1); 9980 SDValue SplatVal; 9981 for (unsigned I = 0; I < NumVecElts; ++I) { 9982 // Check for insert vector elements. 9983 if (StVal.getOpcode() != ISD::INSERT_VECTOR_ELT) 9984 return SDValue(); 9985 9986 // Check that same value is inserted at each vector element. 9987 if (I == 0) 9988 SplatVal = StVal.getOperand(1); 9989 else if (StVal.getOperand(1) != SplatVal) 9990 return SDValue(); 9991 9992 // Check insert element index. 9993 ConstantSDNode *CIndex = dyn_cast<ConstantSDNode>(StVal.getOperand(2)); 9994 if (!CIndex) 9995 return SDValue(); 9996 uint64_t IndexVal = CIndex->getZExtValue(); 9997 if (IndexVal >= NumVecElts) 9998 return SDValue(); 9999 IndexNotInserted.reset(IndexVal); 10000 10001 StVal = StVal.getOperand(0); 10002 } 10003 // Check that all vector element locations were inserted to. 10004 if (IndexNotInserted.any()) 10005 return SDValue(); 10006 10007 return splitStoreSplat(DAG, St, SplatVal, NumVecElts); 10008 } 10009 10010 static SDValue splitStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, 10011 SelectionDAG &DAG, 10012 const AArch64Subtarget *Subtarget) { 10013 10014 StoreSDNode *S = cast<StoreSDNode>(N); 10015 if (S->isVolatile() || S->isIndexed()) 10016 return SDValue(); 10017 10018 SDValue StVal = S->getValue(); 10019 EVT VT = StVal.getValueType(); 10020 if (!VT.isVector()) 10021 return SDValue(); 10022 10023 // If we get a splat of zeros, convert this vector store to a store of 10024 // scalars. They will be merged into store pairs of xzr thereby removing one 10025 // instruction and one register. 10026 if (SDValue ReplacedZeroSplat = replaceZeroVectorStore(DAG, *S)) 10027 return ReplacedZeroSplat; 10028 10029 // FIXME: The logic for deciding if an unaligned store should be split should 10030 // be included in TLI.allowsMisalignedMemoryAccesses(), and there should be 10031 // a call to that function here. 10032 10033 if (!Subtarget->isMisaligned128StoreSlow()) 10034 return SDValue(); 10035 10036 // Don't split at -Oz. 10037 if (DAG.getMachineFunction().getFunction().optForMinSize()) 10038 return SDValue(); 10039 10040 // Don't split v2i64 vectors. Memcpy lowering produces those and splitting 10041 // those up regresses performance on micro-benchmarks and olden/bh. 10042 if (VT.getVectorNumElements() < 2 || VT == MVT::v2i64) 10043 return SDValue(); 10044 10045 // Split unaligned 16B stores. They are terrible for performance. 10046 // Don't split stores with alignment of 1 or 2. Code that uses clang vector 10047 // extensions can use this to mark that it does not want splitting to happen 10048 // (by underspecifying alignment to be 1 or 2). Furthermore, the chance of 10049 // eliminating alignment hazards is only 1 in 8 for alignment of 2. 10050 if (VT.getSizeInBits() != 128 || S->getAlignment() >= 16 || 10051 S->getAlignment() <= 2) 10052 return SDValue(); 10053 10054 // If we get a splat of a scalar convert this vector store to a store of 10055 // scalars. They will be merged into store pairs thereby removing two 10056 // instructions. 10057 if (SDValue ReplacedSplat = replaceSplatVectorStore(DAG, *S)) 10058 return ReplacedSplat; 10059 10060 SDLoc DL(S); 10061 unsigned NumElts = VT.getVectorNumElements() / 2; 10062 // Split VT into two. 10063 EVT HalfVT = 10064 EVT::getVectorVT(*DAG.getContext(), VT.getVectorElementType(), NumElts); 10065 SDValue SubVector0 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal, 10066 DAG.getConstant(0, DL, MVT::i64)); 10067 SDValue SubVector1 = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HalfVT, StVal, 10068 DAG.getConstant(NumElts, DL, MVT::i64)); 10069 SDValue BasePtr = S->getBasePtr(); 10070 SDValue NewST1 = 10071 DAG.getStore(S->getChain(), DL, SubVector0, BasePtr, S->getPointerInfo(), 10072 S->getAlignment(), S->getMemOperand()->getFlags()); 10073 SDValue OffsetPtr = DAG.getNode(ISD::ADD, DL, MVT::i64, BasePtr, 10074 DAG.getConstant(8, DL, MVT::i64)); 10075 return DAG.getStore(NewST1.getValue(0), DL, SubVector1, OffsetPtr, 10076 S->getPointerInfo(), S->getAlignment(), 10077 S->getMemOperand()->getFlags()); 10078 } 10079 10080 /// Target-specific DAG combine function for post-increment LD1 (lane) and 10081 /// post-increment LD1R. 10082 static SDValue performPostLD1Combine(SDNode *N, 10083 TargetLowering::DAGCombinerInfo &DCI, 10084 bool IsLaneOp) { 10085 if (DCI.isBeforeLegalizeOps()) 10086 return SDValue(); 10087 10088 SelectionDAG &DAG = DCI.DAG; 10089 EVT VT = N->getValueType(0); 10090 10091 unsigned LoadIdx = IsLaneOp ? 1 : 0; 10092 SDNode *LD = N->getOperand(LoadIdx).getNode(); 10093 // If it is not LOAD, can not do such combine. 10094 if (LD->getOpcode() != ISD::LOAD) 10095 return SDValue(); 10096 10097 // The vector lane must be a constant in the LD1LANE opcode. 10098 SDValue Lane; 10099 if (IsLaneOp) { 10100 Lane = N->getOperand(2); 10101 auto *LaneC = dyn_cast<ConstantSDNode>(Lane); 10102 if (!LaneC || LaneC->getZExtValue() >= VT.getVectorNumElements()) 10103 return SDValue(); 10104 } 10105 10106 LoadSDNode *LoadSDN = cast<LoadSDNode>(LD); 10107 EVT MemVT = LoadSDN->getMemoryVT(); 10108 // Check if memory operand is the same type as the vector element. 10109 if (MemVT != VT.getVectorElementType()) 10110 return SDValue(); 10111 10112 // Check if there are other uses. If so, do not combine as it will introduce 10113 // an extra load. 10114 for (SDNode::use_iterator UI = LD->use_begin(), UE = LD->use_end(); UI != UE; 10115 ++UI) { 10116 if (UI.getUse().getResNo() == 1) // Ignore uses of the chain result. 10117 continue; 10118 if (*UI != N) 10119 return SDValue(); 10120 } 10121 10122 SDValue Addr = LD->getOperand(1); 10123 SDValue Vector = N->getOperand(0); 10124 // Search for a use of the address operand that is an increment. 10125 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), UE = 10126 Addr.getNode()->use_end(); UI != UE; ++UI) { 10127 SDNode *User = *UI; 10128 if (User->getOpcode() != ISD::ADD 10129 || UI.getUse().getResNo() != Addr.getResNo()) 10130 continue; 10131 10132 // Check that the add is independent of the load. Otherwise, folding it 10133 // would create a cycle. 10134 if (User->isPredecessorOf(LD) || LD->isPredecessorOf(User)) 10135 continue; 10136 // Also check that add is not used in the vector operand. This would also 10137 // create a cycle. 10138 if (User->isPredecessorOf(Vector.getNode())) 10139 continue; 10140 10141 // If the increment is a constant, it must match the memory ref size. 10142 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 10143 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 10144 uint32_t IncVal = CInc->getZExtValue(); 10145 unsigned NumBytes = VT.getScalarSizeInBits() / 8; 10146 if (IncVal != NumBytes) 10147 continue; 10148 Inc = DAG.getRegister(AArch64::XZR, MVT::i64); 10149 } 10150 10151 // Finally, check that the vector doesn't depend on the load. 10152 // Again, this would create a cycle. 10153 // The load depending on the vector is fine, as that's the case for the 10154 // LD1*post we'll eventually generate anyway. 10155 if (LoadSDN->isPredecessorOf(Vector.getNode())) 10156 continue; 10157 10158 SmallVector<SDValue, 8> Ops; 10159 Ops.push_back(LD->getOperand(0)); // Chain 10160 if (IsLaneOp) { 10161 Ops.push_back(Vector); // The vector to be inserted 10162 Ops.push_back(Lane); // The lane to be inserted in the vector 10163 } 10164 Ops.push_back(Addr); 10165 Ops.push_back(Inc); 10166 10167 EVT Tys[3] = { VT, MVT::i64, MVT::Other }; 10168 SDVTList SDTys = DAG.getVTList(Tys); 10169 unsigned NewOp = IsLaneOp ? AArch64ISD::LD1LANEpost : AArch64ISD::LD1DUPpost; 10170 SDValue UpdN = DAG.getMemIntrinsicNode(NewOp, SDLoc(N), SDTys, Ops, 10171 MemVT, 10172 LoadSDN->getMemOperand()); 10173 10174 // Update the uses. 10175 SDValue NewResults[] = { 10176 SDValue(LD, 0), // The result of load 10177 SDValue(UpdN.getNode(), 2) // Chain 10178 }; 10179 DCI.CombineTo(LD, NewResults); 10180 DCI.CombineTo(N, SDValue(UpdN.getNode(), 0)); // Dup/Inserted Result 10181 DCI.CombineTo(User, SDValue(UpdN.getNode(), 1)); // Write back register 10182 10183 break; 10184 } 10185 return SDValue(); 10186 } 10187 10188 /// Simplify ``Addr`` given that the top byte of it is ignored by HW during 10189 /// address translation. 10190 static bool performTBISimplification(SDValue Addr, 10191 TargetLowering::DAGCombinerInfo &DCI, 10192 SelectionDAG &DAG) { 10193 APInt DemandedMask = APInt::getLowBitsSet(64, 56); 10194 KnownBits Known; 10195 TargetLowering::TargetLoweringOpt TLO(DAG, !DCI.isBeforeLegalize(), 10196 !DCI.isBeforeLegalizeOps()); 10197 const TargetLowering &TLI = DAG.getTargetLoweringInfo(); 10198 if (TLI.SimplifyDemandedBits(Addr, DemandedMask, Known, TLO)) { 10199 DCI.CommitTargetLoweringOpt(TLO); 10200 return true; 10201 } 10202 return false; 10203 } 10204 10205 static SDValue performSTORECombine(SDNode *N, 10206 TargetLowering::DAGCombinerInfo &DCI, 10207 SelectionDAG &DAG, 10208 const AArch64Subtarget *Subtarget) { 10209 if (SDValue Split = splitStores(N, DCI, DAG, Subtarget)) 10210 return Split; 10211 10212 if (Subtarget->supportsAddressTopByteIgnored() && 10213 performTBISimplification(N->getOperand(2), DCI, DAG)) 10214 return SDValue(N, 0); 10215 10216 return SDValue(); 10217 } 10218 10219 10220 /// Target-specific DAG combine function for NEON load/store intrinsics 10221 /// to merge base address updates. 10222 static SDValue performNEONPostLDSTCombine(SDNode *N, 10223 TargetLowering::DAGCombinerInfo &DCI, 10224 SelectionDAG &DAG) { 10225 if (DCI.isBeforeLegalize() || DCI.isCalledByLegalizer()) 10226 return SDValue(); 10227 10228 unsigned AddrOpIdx = N->getNumOperands() - 1; 10229 SDValue Addr = N->getOperand(AddrOpIdx); 10230 10231 // Search for a use of the address operand that is an increment. 10232 for (SDNode::use_iterator UI = Addr.getNode()->use_begin(), 10233 UE = Addr.getNode()->use_end(); UI != UE; ++UI) { 10234 SDNode *User = *UI; 10235 if (User->getOpcode() != ISD::ADD || 10236 UI.getUse().getResNo() != Addr.getResNo()) 10237 continue; 10238 10239 // Check that the add is independent of the load/store. Otherwise, folding 10240 // it would create a cycle. 10241 if (User->isPredecessorOf(N) || N->isPredecessorOf(User)) 10242 continue; 10243 10244 // Find the new opcode for the updating load/store. 10245 bool IsStore = false; 10246 bool IsLaneOp = false; 10247 bool IsDupOp = false; 10248 unsigned NewOpc = 0; 10249 unsigned NumVecs = 0; 10250 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(1))->getZExtValue(); 10251 switch (IntNo) { 10252 default: llvm_unreachable("unexpected intrinsic for Neon base update"); 10253 case Intrinsic::aarch64_neon_ld2: NewOpc = AArch64ISD::LD2post; 10254 NumVecs = 2; break; 10255 case Intrinsic::aarch64_neon_ld3: NewOpc = AArch64ISD::LD3post; 10256 NumVecs = 3; break; 10257 case Intrinsic::aarch64_neon_ld4: NewOpc = AArch64ISD::LD4post; 10258 NumVecs = 4; break; 10259 case Intrinsic::aarch64_neon_st2: NewOpc = AArch64ISD::ST2post; 10260 NumVecs = 2; IsStore = true; break; 10261 case Intrinsic::aarch64_neon_st3: NewOpc = AArch64ISD::ST3post; 10262 NumVecs = 3; IsStore = true; break; 10263 case Intrinsic::aarch64_neon_st4: NewOpc = AArch64ISD::ST4post; 10264 NumVecs = 4; IsStore = true; break; 10265 case Intrinsic::aarch64_neon_ld1x2: NewOpc = AArch64ISD::LD1x2post; 10266 NumVecs = 2; break; 10267 case Intrinsic::aarch64_neon_ld1x3: NewOpc = AArch64ISD::LD1x3post; 10268 NumVecs = 3; break; 10269 case Intrinsic::aarch64_neon_ld1x4: NewOpc = AArch64ISD::LD1x4post; 10270 NumVecs = 4; break; 10271 case Intrinsic::aarch64_neon_st1x2: NewOpc = AArch64ISD::ST1x2post; 10272 NumVecs = 2; IsStore = true; break; 10273 case Intrinsic::aarch64_neon_st1x3: NewOpc = AArch64ISD::ST1x3post; 10274 NumVecs = 3; IsStore = true; break; 10275 case Intrinsic::aarch64_neon_st1x4: NewOpc = AArch64ISD::ST1x4post; 10276 NumVecs = 4; IsStore = true; break; 10277 case Intrinsic::aarch64_neon_ld2r: NewOpc = AArch64ISD::LD2DUPpost; 10278 NumVecs = 2; IsDupOp = true; break; 10279 case Intrinsic::aarch64_neon_ld3r: NewOpc = AArch64ISD::LD3DUPpost; 10280 NumVecs = 3; IsDupOp = true; break; 10281 case Intrinsic::aarch64_neon_ld4r: NewOpc = AArch64ISD::LD4DUPpost; 10282 NumVecs = 4; IsDupOp = true; break; 10283 case Intrinsic::aarch64_neon_ld2lane: NewOpc = AArch64ISD::LD2LANEpost; 10284 NumVecs = 2; IsLaneOp = true; break; 10285 case Intrinsic::aarch64_neon_ld3lane: NewOpc = AArch64ISD::LD3LANEpost; 10286 NumVecs = 3; IsLaneOp = true; break; 10287 case Intrinsic::aarch64_neon_ld4lane: NewOpc = AArch64ISD::LD4LANEpost; 10288 NumVecs = 4; IsLaneOp = true; break; 10289 case Intrinsic::aarch64_neon_st2lane: NewOpc = AArch64ISD::ST2LANEpost; 10290 NumVecs = 2; IsStore = true; IsLaneOp = true; break; 10291 case Intrinsic::aarch64_neon_st3lane: NewOpc = AArch64ISD::ST3LANEpost; 10292 NumVecs = 3; IsStore = true; IsLaneOp = true; break; 10293 case Intrinsic::aarch64_neon_st4lane: NewOpc = AArch64ISD::ST4LANEpost; 10294 NumVecs = 4; IsStore = true; IsLaneOp = true; break; 10295 } 10296 10297 EVT VecTy; 10298 if (IsStore) 10299 VecTy = N->getOperand(2).getValueType(); 10300 else 10301 VecTy = N->getValueType(0); 10302 10303 // If the increment is a constant, it must match the memory ref size. 10304 SDValue Inc = User->getOperand(User->getOperand(0) == Addr ? 1 : 0); 10305 if (ConstantSDNode *CInc = dyn_cast<ConstantSDNode>(Inc.getNode())) { 10306 uint32_t IncVal = CInc->getZExtValue(); 10307 unsigned NumBytes = NumVecs * VecTy.getSizeInBits() / 8; 10308 if (IsLaneOp || IsDupOp) 10309 NumBytes /= VecTy.getVectorNumElements(); 10310 if (IncVal != NumBytes) 10311 continue; 10312 Inc = DAG.getRegister(AArch64::XZR, MVT::i64); 10313 } 10314 SmallVector<SDValue, 8> Ops; 10315 Ops.push_back(N->getOperand(0)); // Incoming chain 10316 // Load lane and store have vector list as input. 10317 if (IsLaneOp || IsStore) 10318 for (unsigned i = 2; i < AddrOpIdx; ++i) 10319 Ops.push_back(N->getOperand(i)); 10320 Ops.push_back(Addr); // Base register 10321 Ops.push_back(Inc); 10322 10323 // Return Types. 10324 EVT Tys[6]; 10325 unsigned NumResultVecs = (IsStore ? 0 : NumVecs); 10326 unsigned n; 10327 for (n = 0; n < NumResultVecs; ++n) 10328 Tys[n] = VecTy; 10329 Tys[n++] = MVT::i64; // Type of write back register 10330 Tys[n] = MVT::Other; // Type of the chain 10331 SDVTList SDTys = DAG.getVTList(makeArrayRef(Tys, NumResultVecs + 2)); 10332 10333 MemIntrinsicSDNode *MemInt = cast<MemIntrinsicSDNode>(N); 10334 SDValue UpdN = DAG.getMemIntrinsicNode(NewOpc, SDLoc(N), SDTys, Ops, 10335 MemInt->getMemoryVT(), 10336 MemInt->getMemOperand()); 10337 10338 // Update the uses. 10339 std::vector<SDValue> NewResults; 10340 for (unsigned i = 0; i < NumResultVecs; ++i) { 10341 NewResults.push_back(SDValue(UpdN.getNode(), i)); 10342 } 10343 NewResults.push_back(SDValue(UpdN.getNode(), NumResultVecs + 1)); 10344 DCI.CombineTo(N, NewResults); 10345 DCI.CombineTo(User, SDValue(UpdN.getNode(), NumResultVecs)); 10346 10347 break; 10348 } 10349 return SDValue(); 10350 } 10351 10352 // Checks to see if the value is the prescribed width and returns information 10353 // about its extension mode. 10354 static 10355 bool checkValueWidth(SDValue V, unsigned width, ISD::LoadExtType &ExtType) { 10356 ExtType = ISD::NON_EXTLOAD; 10357 switch(V.getNode()->getOpcode()) { 10358 default: 10359 return false; 10360 case ISD::LOAD: { 10361 LoadSDNode *LoadNode = cast<LoadSDNode>(V.getNode()); 10362 if ((LoadNode->getMemoryVT() == MVT::i8 && width == 8) 10363 || (LoadNode->getMemoryVT() == MVT::i16 && width == 16)) { 10364 ExtType = LoadNode->getExtensionType(); 10365 return true; 10366 } 10367 return false; 10368 } 10369 case ISD::AssertSext: { 10370 VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1)); 10371 if ((TypeNode->getVT() == MVT::i8 && width == 8) 10372 || (TypeNode->getVT() == MVT::i16 && width == 16)) { 10373 ExtType = ISD::SEXTLOAD; 10374 return true; 10375 } 10376 return false; 10377 } 10378 case ISD::AssertZext: { 10379 VTSDNode *TypeNode = cast<VTSDNode>(V.getNode()->getOperand(1)); 10380 if ((TypeNode->getVT() == MVT::i8 && width == 8) 10381 || (TypeNode->getVT() == MVT::i16 && width == 16)) { 10382 ExtType = ISD::ZEXTLOAD; 10383 return true; 10384 } 10385 return false; 10386 } 10387 case ISD::Constant: 10388 case ISD::TargetConstant: { 10389 return std::abs(cast<ConstantSDNode>(V.getNode())->getSExtValue()) < 10390 1LL << (width - 1); 10391 } 10392 } 10393 10394 return true; 10395 } 10396 10397 // This function does a whole lot of voodoo to determine if the tests are 10398 // equivalent without and with a mask. Essentially what happens is that given a 10399 // DAG resembling: 10400 // 10401 // +-------------+ +-------------+ +-------------+ +-------------+ 10402 // | Input | | AddConstant | | CompConstant| | CC | 10403 // +-------------+ +-------------+ +-------------+ +-------------+ 10404 // | | | | 10405 // V V | +----------+ 10406 // +-------------+ +----+ | | 10407 // | ADD | |0xff| | | 10408 // +-------------+ +----+ | | 10409 // | | | | 10410 // V V | | 10411 // +-------------+ | | 10412 // | AND | | | 10413 // +-------------+ | | 10414 // | | | 10415 // +-----+ | | 10416 // | | | 10417 // V V V 10418 // +-------------+ 10419 // | CMP | 10420 // +-------------+ 10421 // 10422 // The AND node may be safely removed for some combinations of inputs. In 10423 // particular we need to take into account the extension type of the Input, 10424 // the exact values of AddConstant, CompConstant, and CC, along with the nominal 10425 // width of the input (this can work for any width inputs, the above graph is 10426 // specific to 8 bits. 10427 // 10428 // The specific equations were worked out by generating output tables for each 10429 // AArch64CC value in terms of and AddConstant (w1), CompConstant(w2). The 10430 // problem was simplified by working with 4 bit inputs, which means we only 10431 // needed to reason about 24 distinct bit patterns: 8 patterns unique to zero 10432 // extension (8,15), 8 patterns unique to sign extensions (-8,-1), and 8 10433 // patterns present in both extensions (0,7). For every distinct set of 10434 // AddConstant and CompConstants bit patterns we can consider the masked and 10435 // unmasked versions to be equivalent if the result of this function is true for 10436 // all 16 distinct bit patterns of for the current extension type of Input (w0). 10437 // 10438 // sub w8, w0, w1 10439 // and w10, w8, #0x0f 10440 // cmp w8, w2 10441 // cset w9, AArch64CC 10442 // cmp w10, w2 10443 // cset w11, AArch64CC 10444 // cmp w9, w11 10445 // cset w0, eq 10446 // ret 10447 // 10448 // Since the above function shows when the outputs are equivalent it defines 10449 // when it is safe to remove the AND. Unfortunately it only runs on AArch64 and 10450 // would be expensive to run during compiles. The equations below were written 10451 // in a test harness that confirmed they gave equivalent outputs to the above 10452 // for all inputs function, so they can be used determine if the removal is 10453 // legal instead. 10454 // 10455 // isEquivalentMaskless() is the code for testing if the AND can be removed 10456 // factored out of the DAG recognition as the DAG can take several forms. 10457 10458 static bool isEquivalentMaskless(unsigned CC, unsigned width, 10459 ISD::LoadExtType ExtType, int AddConstant, 10460 int CompConstant) { 10461 // By being careful about our equations and only writing the in term 10462 // symbolic values and well known constants (0, 1, -1, MaxUInt) we can 10463 // make them generally applicable to all bit widths. 10464 int MaxUInt = (1 << width); 10465 10466 // For the purposes of these comparisons sign extending the type is 10467 // equivalent to zero extending the add and displacing it by half the integer 10468 // width. Provided we are careful and make sure our equations are valid over 10469 // the whole range we can just adjust the input and avoid writing equations 10470 // for sign extended inputs. 10471 if (ExtType == ISD::SEXTLOAD) 10472 AddConstant -= (1 << (width-1)); 10473 10474 switch(CC) { 10475 case AArch64CC::LE: 10476 case AArch64CC::GT: 10477 if ((AddConstant == 0) || 10478 (CompConstant == MaxUInt - 1 && AddConstant < 0) || 10479 (AddConstant >= 0 && CompConstant < 0) || 10480 (AddConstant <= 0 && CompConstant <= 0 && CompConstant < AddConstant)) 10481 return true; 10482 break; 10483 case AArch64CC::LT: 10484 case AArch64CC::GE: 10485 if ((AddConstant == 0) || 10486 (AddConstant >= 0 && CompConstant <= 0) || 10487 (AddConstant <= 0 && CompConstant <= 0 && CompConstant <= AddConstant)) 10488 return true; 10489 break; 10490 case AArch64CC::HI: 10491 case AArch64CC::LS: 10492 if ((AddConstant >= 0 && CompConstant < 0) || 10493 (AddConstant <= 0 && CompConstant >= -1 && 10494 CompConstant < AddConstant + MaxUInt)) 10495 return true; 10496 break; 10497 case AArch64CC::PL: 10498 case AArch64CC::MI: 10499 if ((AddConstant == 0) || 10500 (AddConstant > 0 && CompConstant <= 0) || 10501 (AddConstant < 0 && CompConstant <= AddConstant)) 10502 return true; 10503 break; 10504 case AArch64CC::LO: 10505 case AArch64CC::HS: 10506 if ((AddConstant >= 0 && CompConstant <= 0) || 10507 (AddConstant <= 0 && CompConstant >= 0 && 10508 CompConstant <= AddConstant + MaxUInt)) 10509 return true; 10510 break; 10511 case AArch64CC::EQ: 10512 case AArch64CC::NE: 10513 if ((AddConstant > 0 && CompConstant < 0) || 10514 (AddConstant < 0 && CompConstant >= 0 && 10515 CompConstant < AddConstant + MaxUInt) || 10516 (AddConstant >= 0 && CompConstant >= 0 && 10517 CompConstant >= AddConstant) || 10518 (AddConstant <= 0 && CompConstant < 0 && CompConstant < AddConstant)) 10519 return true; 10520 break; 10521 case AArch64CC::VS: 10522 case AArch64CC::VC: 10523 case AArch64CC::AL: 10524 case AArch64CC::NV: 10525 return true; 10526 case AArch64CC::Invalid: 10527 break; 10528 } 10529 10530 return false; 10531 } 10532 10533 static 10534 SDValue performCONDCombine(SDNode *N, 10535 TargetLowering::DAGCombinerInfo &DCI, 10536 SelectionDAG &DAG, unsigned CCIndex, 10537 unsigned CmpIndex) { 10538 unsigned CC = cast<ConstantSDNode>(N->getOperand(CCIndex))->getSExtValue(); 10539 SDNode *SubsNode = N->getOperand(CmpIndex).getNode(); 10540 unsigned CondOpcode = SubsNode->getOpcode(); 10541 10542 if (CondOpcode != AArch64ISD::SUBS) 10543 return SDValue(); 10544 10545 // There is a SUBS feeding this condition. Is it fed by a mask we can 10546 // use? 10547 10548 SDNode *AndNode = SubsNode->getOperand(0).getNode(); 10549 unsigned MaskBits = 0; 10550 10551 if (AndNode->getOpcode() != ISD::AND) 10552 return SDValue(); 10553 10554 if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(AndNode->getOperand(1))) { 10555 uint32_t CNV = CN->getZExtValue(); 10556 if (CNV == 255) 10557 MaskBits = 8; 10558 else if (CNV == 65535) 10559 MaskBits = 16; 10560 } 10561 10562 if (!MaskBits) 10563 return SDValue(); 10564 10565 SDValue AddValue = AndNode->getOperand(0); 10566 10567 if (AddValue.getOpcode() != ISD::ADD) 10568 return SDValue(); 10569 10570 // The basic dag structure is correct, grab the inputs and validate them. 10571 10572 SDValue AddInputValue1 = AddValue.getNode()->getOperand(0); 10573 SDValue AddInputValue2 = AddValue.getNode()->getOperand(1); 10574 SDValue SubsInputValue = SubsNode->getOperand(1); 10575 10576 // The mask is present and the provenance of all the values is a smaller type, 10577 // lets see if the mask is superfluous. 10578 10579 if (!isa<ConstantSDNode>(AddInputValue2.getNode()) || 10580 !isa<ConstantSDNode>(SubsInputValue.getNode())) 10581 return SDValue(); 10582 10583 ISD::LoadExtType ExtType; 10584 10585 if (!checkValueWidth(SubsInputValue, MaskBits, ExtType) || 10586 !checkValueWidth(AddInputValue2, MaskBits, ExtType) || 10587 !checkValueWidth(AddInputValue1, MaskBits, ExtType) ) 10588 return SDValue(); 10589 10590 if(!isEquivalentMaskless(CC, MaskBits, ExtType, 10591 cast<ConstantSDNode>(AddInputValue2.getNode())->getSExtValue(), 10592 cast<ConstantSDNode>(SubsInputValue.getNode())->getSExtValue())) 10593 return SDValue(); 10594 10595 // The AND is not necessary, remove it. 10596 10597 SDVTList VTs = DAG.getVTList(SubsNode->getValueType(0), 10598 SubsNode->getValueType(1)); 10599 SDValue Ops[] = { AddValue, SubsNode->getOperand(1) }; 10600 10601 SDValue NewValue = DAG.getNode(CondOpcode, SDLoc(SubsNode), VTs, Ops); 10602 DAG.ReplaceAllUsesWith(SubsNode, NewValue.getNode()); 10603 10604 return SDValue(N, 0); 10605 } 10606 10607 // Optimize compare with zero and branch. 10608 static SDValue performBRCONDCombine(SDNode *N, 10609 TargetLowering::DAGCombinerInfo &DCI, 10610 SelectionDAG &DAG) { 10611 if (SDValue NV = performCONDCombine(N, DCI, DAG, 2, 3)) 10612 N = NV.getNode(); 10613 SDValue Chain = N->getOperand(0); 10614 SDValue Dest = N->getOperand(1); 10615 SDValue CCVal = N->getOperand(2); 10616 SDValue Cmp = N->getOperand(3); 10617 10618 assert(isa<ConstantSDNode>(CCVal) && "Expected a ConstantSDNode here!"); 10619 unsigned CC = cast<ConstantSDNode>(CCVal)->getZExtValue(); 10620 if (CC != AArch64CC::EQ && CC != AArch64CC::NE) 10621 return SDValue(); 10622 10623 unsigned CmpOpc = Cmp.getOpcode(); 10624 if (CmpOpc != AArch64ISD::ADDS && CmpOpc != AArch64ISD::SUBS) 10625 return SDValue(); 10626 10627 // Only attempt folding if there is only one use of the flag and no use of the 10628 // value. 10629 if (!Cmp->hasNUsesOfValue(0, 0) || !Cmp->hasNUsesOfValue(1, 1)) 10630 return SDValue(); 10631 10632 SDValue LHS = Cmp.getOperand(0); 10633 SDValue RHS = Cmp.getOperand(1); 10634 10635 assert(LHS.getValueType() == RHS.getValueType() && 10636 "Expected the value type to be the same for both operands!"); 10637 if (LHS.getValueType() != MVT::i32 && LHS.getValueType() != MVT::i64) 10638 return SDValue(); 10639 10640 if (isNullConstant(LHS)) 10641 std::swap(LHS, RHS); 10642 10643 if (!isNullConstant(RHS)) 10644 return SDValue(); 10645 10646 if (LHS.getOpcode() == ISD::SHL || LHS.getOpcode() == ISD::SRA || 10647 LHS.getOpcode() == ISD::SRL) 10648 return SDValue(); 10649 10650 // Fold the compare into the branch instruction. 10651 SDValue BR; 10652 if (CC == AArch64CC::EQ) 10653 BR = DAG.getNode(AArch64ISD::CBZ, SDLoc(N), MVT::Other, Chain, LHS, Dest); 10654 else 10655 BR = DAG.getNode(AArch64ISD::CBNZ, SDLoc(N), MVT::Other, Chain, LHS, Dest); 10656 10657 // Do not add new nodes to DAG combiner worklist. 10658 DCI.CombineTo(N, BR, false); 10659 10660 return SDValue(); 10661 } 10662 10663 // Optimize some simple tbz/tbnz cases. Returns the new operand and bit to test 10664 // as well as whether the test should be inverted. This code is required to 10665 // catch these cases (as opposed to standard dag combines) because 10666 // AArch64ISD::TBZ is matched during legalization. 10667 static SDValue getTestBitOperand(SDValue Op, unsigned &Bit, bool &Invert, 10668 SelectionDAG &DAG) { 10669 10670 if (!Op->hasOneUse()) 10671 return Op; 10672 10673 // We don't handle undef/constant-fold cases below, as they should have 10674 // already been taken care of (e.g. and of 0, test of undefined shifted bits, 10675 // etc.) 10676 10677 // (tbz (trunc x), b) -> (tbz x, b) 10678 // This case is just here to enable more of the below cases to be caught. 10679 if (Op->getOpcode() == ISD::TRUNCATE && 10680 Bit < Op->getValueType(0).getSizeInBits()) { 10681 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); 10682 } 10683 10684 if (Op->getNumOperands() != 2) 10685 return Op; 10686 10687 auto *C = dyn_cast<ConstantSDNode>(Op->getOperand(1)); 10688 if (!C) 10689 return Op; 10690 10691 switch (Op->getOpcode()) { 10692 default: 10693 return Op; 10694 10695 // (tbz (and x, m), b) -> (tbz x, b) 10696 case ISD::AND: 10697 if ((C->getZExtValue() >> Bit) & 1) 10698 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); 10699 return Op; 10700 10701 // (tbz (shl x, c), b) -> (tbz x, b-c) 10702 case ISD::SHL: 10703 if (C->getZExtValue() <= Bit && 10704 (Bit - C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) { 10705 Bit = Bit - C->getZExtValue(); 10706 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); 10707 } 10708 return Op; 10709 10710 // (tbz (sra x, c), b) -> (tbz x, b+c) or (tbz x, msb) if b+c is > # bits in x 10711 case ISD::SRA: 10712 Bit = Bit + C->getZExtValue(); 10713 if (Bit >= Op->getValueType(0).getSizeInBits()) 10714 Bit = Op->getValueType(0).getSizeInBits() - 1; 10715 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); 10716 10717 // (tbz (srl x, c), b) -> (tbz x, b+c) 10718 case ISD::SRL: 10719 if ((Bit + C->getZExtValue()) < Op->getValueType(0).getSizeInBits()) { 10720 Bit = Bit + C->getZExtValue(); 10721 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); 10722 } 10723 return Op; 10724 10725 // (tbz (xor x, -1), b) -> (tbnz x, b) 10726 case ISD::XOR: 10727 if ((C->getZExtValue() >> Bit) & 1) 10728 Invert = !Invert; 10729 return getTestBitOperand(Op->getOperand(0), Bit, Invert, DAG); 10730 } 10731 } 10732 10733 // Optimize test single bit zero/non-zero and branch. 10734 static SDValue performTBZCombine(SDNode *N, 10735 TargetLowering::DAGCombinerInfo &DCI, 10736 SelectionDAG &DAG) { 10737 unsigned Bit = cast<ConstantSDNode>(N->getOperand(2))->getZExtValue(); 10738 bool Invert = false; 10739 SDValue TestSrc = N->getOperand(1); 10740 SDValue NewTestSrc = getTestBitOperand(TestSrc, Bit, Invert, DAG); 10741 10742 if (TestSrc == NewTestSrc) 10743 return SDValue(); 10744 10745 unsigned NewOpc = N->getOpcode(); 10746 if (Invert) { 10747 if (NewOpc == AArch64ISD::TBZ) 10748 NewOpc = AArch64ISD::TBNZ; 10749 else { 10750 assert(NewOpc == AArch64ISD::TBNZ); 10751 NewOpc = AArch64ISD::TBZ; 10752 } 10753 } 10754 10755 SDLoc DL(N); 10756 return DAG.getNode(NewOpc, DL, MVT::Other, N->getOperand(0), NewTestSrc, 10757 DAG.getConstant(Bit, DL, MVT::i64), N->getOperand(3)); 10758 } 10759 10760 // vselect (v1i1 setcc) -> 10761 // vselect (v1iXX setcc) (XX is the size of the compared operand type) 10762 // FIXME: Currently the type legalizer can't handle VSELECT having v1i1 as 10763 // condition. If it can legalize "VSELECT v1i1" correctly, no need to combine 10764 // such VSELECT. 10765 static SDValue performVSelectCombine(SDNode *N, SelectionDAG &DAG) { 10766 SDValue N0 = N->getOperand(0); 10767 EVT CCVT = N0.getValueType(); 10768 10769 if (N0.getOpcode() != ISD::SETCC || CCVT.getVectorNumElements() != 1 || 10770 CCVT.getVectorElementType() != MVT::i1) 10771 return SDValue(); 10772 10773 EVT ResVT = N->getValueType(0); 10774 EVT CmpVT = N0.getOperand(0).getValueType(); 10775 // Only combine when the result type is of the same size as the compared 10776 // operands. 10777 if (ResVT.getSizeInBits() != CmpVT.getSizeInBits()) 10778 return SDValue(); 10779 10780 SDValue IfTrue = N->getOperand(1); 10781 SDValue IfFalse = N->getOperand(2); 10782 SDValue SetCC = 10783 DAG.getSetCC(SDLoc(N), CmpVT.changeVectorElementTypeToInteger(), 10784 N0.getOperand(0), N0.getOperand(1), 10785 cast<CondCodeSDNode>(N0.getOperand(2))->get()); 10786 return DAG.getNode(ISD::VSELECT, SDLoc(N), ResVT, SetCC, 10787 IfTrue, IfFalse); 10788 } 10789 10790 /// A vector select: "(select vL, vR, (setcc LHS, RHS))" is best performed with 10791 /// the compare-mask instructions rather than going via NZCV, even if LHS and 10792 /// RHS are really scalar. This replaces any scalar setcc in the above pattern 10793 /// with a vector one followed by a DUP shuffle on the result. 10794 static SDValue performSelectCombine(SDNode *N, 10795 TargetLowering::DAGCombinerInfo &DCI) { 10796 SelectionDAG &DAG = DCI.DAG; 10797 SDValue N0 = N->getOperand(0); 10798 EVT ResVT = N->getValueType(0); 10799 10800 if (N0.getOpcode() != ISD::SETCC) 10801 return SDValue(); 10802 10803 // Make sure the SETCC result is either i1 (initial DAG), or i32, the lowered 10804 // scalar SetCCResultType. We also don't expect vectors, because we assume 10805 // that selects fed by vector SETCCs are canonicalized to VSELECT. 10806 assert((N0.getValueType() == MVT::i1 || N0.getValueType() == MVT::i32) && 10807 "Scalar-SETCC feeding SELECT has unexpected result type!"); 10808 10809 // If NumMaskElts == 0, the comparison is larger than select result. The 10810 // largest real NEON comparison is 64-bits per lane, which means the result is 10811 // at most 32-bits and an illegal vector. Just bail out for now. 10812 EVT SrcVT = N0.getOperand(0).getValueType(); 10813 10814 // Don't try to do this optimization when the setcc itself has i1 operands. 10815 // There are no legal vectors of i1, so this would be pointless. 10816 if (SrcVT == MVT::i1) 10817 return SDValue(); 10818 10819 int NumMaskElts = ResVT.getSizeInBits() / SrcVT.getSizeInBits(); 10820 if (!ResVT.isVector() || NumMaskElts == 0) 10821 return SDValue(); 10822 10823 SrcVT = EVT::getVectorVT(*DAG.getContext(), SrcVT, NumMaskElts); 10824 EVT CCVT = SrcVT.changeVectorElementTypeToInteger(); 10825 10826 // Also bail out if the vector CCVT isn't the same size as ResVT. 10827 // This can happen if the SETCC operand size doesn't divide the ResVT size 10828 // (e.g., f64 vs v3f32). 10829 if (CCVT.getSizeInBits() != ResVT.getSizeInBits()) 10830 return SDValue(); 10831 10832 // Make sure we didn't create illegal types, if we're not supposed to. 10833 assert(DCI.isBeforeLegalize() || 10834 DAG.getTargetLoweringInfo().isTypeLegal(SrcVT)); 10835 10836 // First perform a vector comparison, where lane 0 is the one we're interested 10837 // in. 10838 SDLoc DL(N0); 10839 SDValue LHS = 10840 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(0)); 10841 SDValue RHS = 10842 DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, SrcVT, N0.getOperand(1)); 10843 SDValue SetCC = DAG.getNode(ISD::SETCC, DL, CCVT, LHS, RHS, N0.getOperand(2)); 10844 10845 // Now duplicate the comparison mask we want across all other lanes. 10846 SmallVector<int, 8> DUPMask(CCVT.getVectorNumElements(), 0); 10847 SDValue Mask = DAG.getVectorShuffle(CCVT, DL, SetCC, SetCC, DUPMask); 10848 Mask = DAG.getNode(ISD::BITCAST, DL, 10849 ResVT.changeVectorElementTypeToInteger(), Mask); 10850 10851 return DAG.getSelect(DL, ResVT, Mask, N->getOperand(1), N->getOperand(2)); 10852 } 10853 10854 /// Get rid of unnecessary NVCASTs (that don't change the type). 10855 static SDValue performNVCASTCombine(SDNode *N) { 10856 if (N->getValueType(0) == N->getOperand(0).getValueType()) 10857 return N->getOperand(0); 10858 10859 return SDValue(); 10860 } 10861 10862 // If all users of the globaladdr are of the form (globaladdr + constant), find 10863 // the smallest constant, fold it into the globaladdr's offset and rewrite the 10864 // globaladdr as (globaladdr + constant) - constant. 10865 static SDValue performGlobalAddressCombine(SDNode *N, SelectionDAG &DAG, 10866 const AArch64Subtarget *Subtarget, 10867 const TargetMachine &TM) { 10868 auto *GN = dyn_cast<GlobalAddressSDNode>(N); 10869 if (!GN || Subtarget->ClassifyGlobalReference(GN->getGlobal(), TM) != 10870 AArch64II::MO_NO_FLAG) 10871 return SDValue(); 10872 10873 uint64_t MinOffset = -1ull; 10874 for (SDNode *N : GN->uses()) { 10875 if (N->getOpcode() != ISD::ADD) 10876 return SDValue(); 10877 auto *C = dyn_cast<ConstantSDNode>(N->getOperand(0)); 10878 if (!C) 10879 C = dyn_cast<ConstantSDNode>(N->getOperand(1)); 10880 if (!C) 10881 return SDValue(); 10882 MinOffset = std::min(MinOffset, C->getZExtValue()); 10883 } 10884 uint64_t Offset = MinOffset + GN->getOffset(); 10885 10886 // Require that the new offset is larger than the existing one. Otherwise, we 10887 // can end up oscillating between two possible DAGs, for example, 10888 // (add (add globaladdr + 10, -1), 1) and (add globaladdr + 9, 1). 10889 if (Offset <= uint64_t(GN->getOffset())) 10890 return SDValue(); 10891 10892 // Check whether folding this offset is legal. It must not go out of bounds of 10893 // the referenced object to avoid violating the code model, and must be 10894 // smaller than 2^21 because this is the largest offset expressible in all 10895 // object formats. 10896 // 10897 // This check also prevents us from folding negative offsets, which will end 10898 // up being treated in the same way as large positive ones. They could also 10899 // cause code model violations, and aren't really common enough to matter. 10900 if (Offset >= (1 << 21)) 10901 return SDValue(); 10902 10903 const GlobalValue *GV = GN->getGlobal(); 10904 Type *T = GV->getValueType(); 10905 if (!T->isSized() || 10906 Offset > GV->getParent()->getDataLayout().getTypeAllocSize(T)) 10907 return SDValue(); 10908 10909 SDLoc DL(GN); 10910 SDValue Result = DAG.getGlobalAddress(GV, DL, MVT::i64, Offset); 10911 return DAG.getNode(ISD::SUB, DL, MVT::i64, Result, 10912 DAG.getConstant(MinOffset, DL, MVT::i64)); 10913 } 10914 10915 SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N, 10916 DAGCombinerInfo &DCI) const { 10917 SelectionDAG &DAG = DCI.DAG; 10918 switch (N->getOpcode()) { 10919 default: 10920 LLVM_DEBUG(dbgs() << "Custom combining: skipping\n"); 10921 break; 10922 case ISD::ADD: 10923 case ISD::SUB: 10924 return performAddSubLongCombine(N, DCI, DAG); 10925 case ISD::XOR: 10926 return performXorCombine(N, DAG, DCI, Subtarget); 10927 case ISD::MUL: 10928 return performMulCombine(N, DAG, DCI, Subtarget); 10929 case ISD::SINT_TO_FP: 10930 case ISD::UINT_TO_FP: 10931 return performIntToFpCombine(N, DAG, Subtarget); 10932 case ISD::FP_TO_SINT: 10933 case ISD::FP_TO_UINT: 10934 return performFpToIntCombine(N, DAG, DCI, Subtarget); 10935 case ISD::FDIV: 10936 return performFDivCombine(N, DAG, DCI, Subtarget); 10937 case ISD::OR: 10938 return performORCombine(N, DCI, Subtarget); 10939 case ISD::SRL: 10940 return performSRLCombine(N, DCI); 10941 case ISD::INTRINSIC_WO_CHAIN: 10942 return performIntrinsicCombine(N, DCI, Subtarget); 10943 case ISD::ANY_EXTEND: 10944 case ISD::ZERO_EXTEND: 10945 case ISD::SIGN_EXTEND: 10946 return performExtendCombine(N, DCI, DAG); 10947 case ISD::BITCAST: 10948 return performBitcastCombine(N, DCI, DAG); 10949 case ISD::CONCAT_VECTORS: 10950 return performConcatVectorsCombine(N, DCI, DAG); 10951 case ISD::SELECT: 10952 return performSelectCombine(N, DCI); 10953 case ISD::VSELECT: 10954 return performVSelectCombine(N, DCI.DAG); 10955 case ISD::LOAD: 10956 if (performTBISimplification(N->getOperand(1), DCI, DAG)) 10957 return SDValue(N, 0); 10958 break; 10959 case ISD::STORE: 10960 return performSTORECombine(N, DCI, DAG, Subtarget); 10961 case AArch64ISD::BRCOND: 10962 return performBRCONDCombine(N, DCI, DAG); 10963 case AArch64ISD::TBNZ: 10964 case AArch64ISD::TBZ: 10965 return performTBZCombine(N, DCI, DAG); 10966 case AArch64ISD::CSEL: 10967 return performCONDCombine(N, DCI, DAG, 2, 3); 10968 case AArch64ISD::DUP: 10969 return performPostLD1Combine(N, DCI, false); 10970 case AArch64ISD::NVCAST: 10971 return performNVCASTCombine(N); 10972 case ISD::INSERT_VECTOR_ELT: 10973 return performPostLD1Combine(N, DCI, true); 10974 case ISD::INTRINSIC_VOID: 10975 case ISD::INTRINSIC_W_CHAIN: 10976 switch (cast<ConstantSDNode>(N->getOperand(1))->getZExtValue()) { 10977 case Intrinsic::aarch64_neon_ld2: 10978 case Intrinsic::aarch64_neon_ld3: 10979 case Intrinsic::aarch64_neon_ld4: 10980 case Intrinsic::aarch64_neon_ld1x2: 10981 case Intrinsic::aarch64_neon_ld1x3: 10982 case Intrinsic::aarch64_neon_ld1x4: 10983 case Intrinsic::aarch64_neon_ld2lane: 10984 case Intrinsic::aarch64_neon_ld3lane: 10985 case Intrinsic::aarch64_neon_ld4lane: 10986 case Intrinsic::aarch64_neon_ld2r: 10987 case Intrinsic::aarch64_neon_ld3r: 10988 case Intrinsic::aarch64_neon_ld4r: 10989 case Intrinsic::aarch64_neon_st2: 10990 case Intrinsic::aarch64_neon_st3: 10991 case Intrinsic::aarch64_neon_st4: 10992 case Intrinsic::aarch64_neon_st1x2: 10993 case Intrinsic::aarch64_neon_st1x3: 10994 case Intrinsic::aarch64_neon_st1x4: 10995 case Intrinsic::aarch64_neon_st2lane: 10996 case Intrinsic::aarch64_neon_st3lane: 10997 case Intrinsic::aarch64_neon_st4lane: 10998 return performNEONPostLDSTCombine(N, DCI, DAG); 10999 default: 11000 break; 11001 } 11002 break; 11003 case ISD::GlobalAddress: 11004 return performGlobalAddressCombine(N, DAG, Subtarget, getTargetMachine()); 11005 } 11006 return SDValue(); 11007 } 11008 11009 // Check if the return value is used as only a return value, as otherwise 11010 // we can't perform a tail-call. In particular, we need to check for 11011 // target ISD nodes that are returns and any other "odd" constructs 11012 // that the generic analysis code won't necessarily catch. 11013 bool AArch64TargetLowering::isUsedByReturnOnly(SDNode *N, 11014 SDValue &Chain) const { 11015 if (N->getNumValues() != 1) 11016 return false; 11017 if (!N->hasNUsesOfValue(1, 0)) 11018 return false; 11019 11020 SDValue TCChain = Chain; 11021 SDNode *Copy = *N->use_begin(); 11022 if (Copy->getOpcode() == ISD::CopyToReg) { 11023 // If the copy has a glue operand, we conservatively assume it isn't safe to 11024 // perform a tail call. 11025 if (Copy->getOperand(Copy->getNumOperands() - 1).getValueType() == 11026 MVT::Glue) 11027 return false; 11028 TCChain = Copy->getOperand(0); 11029 } else if (Copy->getOpcode() != ISD::FP_EXTEND) 11030 return false; 11031 11032 bool HasRet = false; 11033 for (SDNode *Node : Copy->uses()) { 11034 if (Node->getOpcode() != AArch64ISD::RET_FLAG) 11035 return false; 11036 HasRet = true; 11037 } 11038 11039 if (!HasRet) 11040 return false; 11041 11042 Chain = TCChain; 11043 return true; 11044 } 11045 11046 // Return whether the an instruction can potentially be optimized to a tail 11047 // call. This will cause the optimizers to attempt to move, or duplicate, 11048 // return instructions to help enable tail call optimizations for this 11049 // instruction. 11050 bool AArch64TargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const { 11051 return CI->isTailCall(); 11052 } 11053 11054 bool AArch64TargetLowering::getIndexedAddressParts(SDNode *Op, SDValue &Base, 11055 SDValue &Offset, 11056 ISD::MemIndexedMode &AM, 11057 bool &IsInc, 11058 SelectionDAG &DAG) const { 11059 if (Op->getOpcode() != ISD::ADD && Op->getOpcode() != ISD::SUB) 11060 return false; 11061 11062 Base = Op->getOperand(0); 11063 // All of the indexed addressing mode instructions take a signed 11064 // 9 bit immediate offset. 11065 if (ConstantSDNode *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1))) { 11066 int64_t RHSC = RHS->getSExtValue(); 11067 if (Op->getOpcode() == ISD::SUB) 11068 RHSC = -(uint64_t)RHSC; 11069 if (!isInt<9>(RHSC)) 11070 return false; 11071 IsInc = (Op->getOpcode() == ISD::ADD); 11072 Offset = Op->getOperand(1); 11073 return true; 11074 } 11075 return false; 11076 } 11077 11078 bool AArch64TargetLowering::getPreIndexedAddressParts(SDNode *N, SDValue &Base, 11079 SDValue &Offset, 11080 ISD::MemIndexedMode &AM, 11081 SelectionDAG &DAG) const { 11082 EVT VT; 11083 SDValue Ptr; 11084 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 11085 VT = LD->getMemoryVT(); 11086 Ptr = LD->getBasePtr(); 11087 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 11088 VT = ST->getMemoryVT(); 11089 Ptr = ST->getBasePtr(); 11090 } else 11091 return false; 11092 11093 bool IsInc; 11094 if (!getIndexedAddressParts(Ptr.getNode(), Base, Offset, AM, IsInc, DAG)) 11095 return false; 11096 AM = IsInc ? ISD::PRE_INC : ISD::PRE_DEC; 11097 return true; 11098 } 11099 11100 bool AArch64TargetLowering::getPostIndexedAddressParts( 11101 SDNode *N, SDNode *Op, SDValue &Base, SDValue &Offset, 11102 ISD::MemIndexedMode &AM, SelectionDAG &DAG) const { 11103 EVT VT; 11104 SDValue Ptr; 11105 if (LoadSDNode *LD = dyn_cast<LoadSDNode>(N)) { 11106 VT = LD->getMemoryVT(); 11107 Ptr = LD->getBasePtr(); 11108 } else if (StoreSDNode *ST = dyn_cast<StoreSDNode>(N)) { 11109 VT = ST->getMemoryVT(); 11110 Ptr = ST->getBasePtr(); 11111 } else 11112 return false; 11113 11114 bool IsInc; 11115 if (!getIndexedAddressParts(Op, Base, Offset, AM, IsInc, DAG)) 11116 return false; 11117 // Post-indexing updates the base, so it's not a valid transform 11118 // if that's not the same as the load's pointer. 11119 if (Ptr != Base) 11120 return false; 11121 AM = IsInc ? ISD::POST_INC : ISD::POST_DEC; 11122 return true; 11123 } 11124 11125 static void ReplaceBITCASTResults(SDNode *N, SmallVectorImpl<SDValue> &Results, 11126 SelectionDAG &DAG) { 11127 SDLoc DL(N); 11128 SDValue Op = N->getOperand(0); 11129 11130 if (N->getValueType(0) != MVT::i16 || Op.getValueType() != MVT::f16) 11131 return; 11132 11133 Op = SDValue( 11134 DAG.getMachineNode(TargetOpcode::INSERT_SUBREG, DL, MVT::f32, 11135 DAG.getUNDEF(MVT::i32), Op, 11136 DAG.getTargetConstant(AArch64::hsub, DL, MVT::i32)), 11137 0); 11138 Op = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Op); 11139 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Op)); 11140 } 11141 11142 static void ReplaceReductionResults(SDNode *N, 11143 SmallVectorImpl<SDValue> &Results, 11144 SelectionDAG &DAG, unsigned InterOp, 11145 unsigned AcrossOp) { 11146 EVT LoVT, HiVT; 11147 SDValue Lo, Hi; 11148 SDLoc dl(N); 11149 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(N->getValueType(0)); 11150 std::tie(Lo, Hi) = DAG.SplitVectorOperand(N, 0); 11151 SDValue InterVal = DAG.getNode(InterOp, dl, LoVT, Lo, Hi); 11152 SDValue SplitVal = DAG.getNode(AcrossOp, dl, LoVT, InterVal); 11153 Results.push_back(SplitVal); 11154 } 11155 11156 static std::pair<SDValue, SDValue> splitInt128(SDValue N, SelectionDAG &DAG) { 11157 SDLoc DL(N); 11158 SDValue Lo = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64, N); 11159 SDValue Hi = DAG.getNode(ISD::TRUNCATE, DL, MVT::i64, 11160 DAG.getNode(ISD::SRL, DL, MVT::i128, N, 11161 DAG.getConstant(64, DL, MVT::i64))); 11162 return std::make_pair(Lo, Hi); 11163 } 11164 11165 // Create an even/odd pair of X registers holding integer value V. 11166 static SDValue createGPRPairNode(SelectionDAG &DAG, SDValue V) { 11167 SDLoc dl(V.getNode()); 11168 SDValue VLo = DAG.getAnyExtOrTrunc(V, dl, MVT::i64); 11169 SDValue VHi = DAG.getAnyExtOrTrunc( 11170 DAG.getNode(ISD::SRL, dl, MVT::i128, V, DAG.getConstant(64, dl, MVT::i64)), 11171 dl, MVT::i64); 11172 if (DAG.getDataLayout().isBigEndian()) 11173 std::swap (VLo, VHi); 11174 SDValue RegClass = 11175 DAG.getTargetConstant(AArch64::XSeqPairsClassRegClassID, dl, MVT::i32); 11176 SDValue SubReg0 = DAG.getTargetConstant(AArch64::sube64, dl, MVT::i32); 11177 SDValue SubReg1 = DAG.getTargetConstant(AArch64::subo64, dl, MVT::i32); 11178 const SDValue Ops[] = { RegClass, VLo, SubReg0, VHi, SubReg1 }; 11179 return SDValue( 11180 DAG.getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, Ops), 0); 11181 } 11182 11183 static void ReplaceCMP_SWAP_128Results(SDNode *N, 11184 SmallVectorImpl<SDValue> &Results, 11185 SelectionDAG &DAG, 11186 const AArch64Subtarget *Subtarget) { 11187 assert(N->getValueType(0) == MVT::i128 && 11188 "AtomicCmpSwap on types less than 128 should be legal"); 11189 11190 if (Subtarget->hasLSE()) { 11191 // LSE has a 128-bit compare and swap (CASP), but i128 is not a legal type, 11192 // so lower it here, wrapped in REG_SEQUENCE and EXTRACT_SUBREG. 11193 SDValue Ops[] = { 11194 createGPRPairNode(DAG, N->getOperand(2)), // Compare value 11195 createGPRPairNode(DAG, N->getOperand(3)), // Store value 11196 N->getOperand(1), // Ptr 11197 N->getOperand(0), // Chain in 11198 }; 11199 11200 MachineFunction &MF = DAG.getMachineFunction(); 11201 MachineSDNode::mmo_iterator MemOp = MF.allocateMemRefsArray(1); 11202 MemOp[0] = cast<MemSDNode>(N)->getMemOperand(); 11203 11204 unsigned Opcode; 11205 switch (MemOp[0]->getOrdering()) { 11206 case AtomicOrdering::Monotonic: 11207 Opcode = AArch64::CASPX; 11208 break; 11209 case AtomicOrdering::Acquire: 11210 Opcode = AArch64::CASPAX; 11211 break; 11212 case AtomicOrdering::Release: 11213 Opcode = AArch64::CASPLX; 11214 break; 11215 case AtomicOrdering::AcquireRelease: 11216 case AtomicOrdering::SequentiallyConsistent: 11217 Opcode = AArch64::CASPALX; 11218 break; 11219 default: 11220 llvm_unreachable("Unexpected ordering!"); 11221 } 11222 11223 MachineSDNode *CmpSwap = DAG.getMachineNode( 11224 Opcode, SDLoc(N), DAG.getVTList(MVT::Untyped, MVT::Other), Ops); 11225 CmpSwap->setMemRefs(MemOp, MemOp + 1); 11226 11227 unsigned SubReg1 = AArch64::sube64, SubReg2 = AArch64::subo64; 11228 if (DAG.getDataLayout().isBigEndian()) 11229 std::swap(SubReg1, SubReg2); 11230 Results.push_back(DAG.getTargetExtractSubreg(SubReg1, SDLoc(N), MVT::i64, 11231 SDValue(CmpSwap, 0))); 11232 Results.push_back(DAG.getTargetExtractSubreg(SubReg2, SDLoc(N), MVT::i64, 11233 SDValue(CmpSwap, 0))); 11234 Results.push_back(SDValue(CmpSwap, 1)); // Chain out 11235 return; 11236 } 11237 11238 auto Desired = splitInt128(N->getOperand(2), DAG); 11239 auto New = splitInt128(N->getOperand(3), DAG); 11240 SDValue Ops[] = {N->getOperand(1), Desired.first, Desired.second, 11241 New.first, New.second, N->getOperand(0)}; 11242 SDNode *CmpSwap = DAG.getMachineNode( 11243 AArch64::CMP_SWAP_128, SDLoc(N), 11244 DAG.getVTList(MVT::i64, MVT::i64, MVT::i32, MVT::Other), Ops); 11245 11246 MachineFunction &MF = DAG.getMachineFunction(); 11247 MachineSDNode::mmo_iterator MemOp = MF.allocateMemRefsArray(1); 11248 MemOp[0] = cast<MemSDNode>(N)->getMemOperand(); 11249 cast<MachineSDNode>(CmpSwap)->setMemRefs(MemOp, MemOp + 1); 11250 11251 Results.push_back(SDValue(CmpSwap, 0)); 11252 Results.push_back(SDValue(CmpSwap, 1)); 11253 Results.push_back(SDValue(CmpSwap, 3)); 11254 } 11255 11256 void AArch64TargetLowering::ReplaceNodeResults( 11257 SDNode *N, SmallVectorImpl<SDValue> &Results, SelectionDAG &DAG) const { 11258 switch (N->getOpcode()) { 11259 default: 11260 llvm_unreachable("Don't know how to custom expand this"); 11261 case ISD::BITCAST: 11262 ReplaceBITCASTResults(N, Results, DAG); 11263 return; 11264 case ISD::VECREDUCE_ADD: 11265 case ISD::VECREDUCE_SMAX: 11266 case ISD::VECREDUCE_SMIN: 11267 case ISD::VECREDUCE_UMAX: 11268 case ISD::VECREDUCE_UMIN: 11269 Results.push_back(LowerVECREDUCE(SDValue(N, 0), DAG)); 11270 return; 11271 11272 case AArch64ISD::SADDV: 11273 ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::SADDV); 11274 return; 11275 case AArch64ISD::UADDV: 11276 ReplaceReductionResults(N, Results, DAG, ISD::ADD, AArch64ISD::UADDV); 11277 return; 11278 case AArch64ISD::SMINV: 11279 ReplaceReductionResults(N, Results, DAG, ISD::SMIN, AArch64ISD::SMINV); 11280 return; 11281 case AArch64ISD::UMINV: 11282 ReplaceReductionResults(N, Results, DAG, ISD::UMIN, AArch64ISD::UMINV); 11283 return; 11284 case AArch64ISD::SMAXV: 11285 ReplaceReductionResults(N, Results, DAG, ISD::SMAX, AArch64ISD::SMAXV); 11286 return; 11287 case AArch64ISD::UMAXV: 11288 ReplaceReductionResults(N, Results, DAG, ISD::UMAX, AArch64ISD::UMAXV); 11289 return; 11290 case ISD::FP_TO_UINT: 11291 case ISD::FP_TO_SINT: 11292 assert(N->getValueType(0) == MVT::i128 && "unexpected illegal conversion"); 11293 // Let normal code take care of it by not adding anything to Results. 11294 return; 11295 case ISD::ATOMIC_CMP_SWAP: 11296 ReplaceCMP_SWAP_128Results(N, Results, DAG, Subtarget); 11297 return; 11298 } 11299 } 11300 11301 bool AArch64TargetLowering::useLoadStackGuardNode() const { 11302 if (Subtarget->isTargetAndroid() || Subtarget->isTargetFuchsia()) 11303 return TargetLowering::useLoadStackGuardNode(); 11304 return true; 11305 } 11306 11307 unsigned AArch64TargetLowering::combineRepeatedFPDivisors() const { 11308 // Combine multiple FDIVs with the same divisor into multiple FMULs by the 11309 // reciprocal if there are three or more FDIVs. 11310 return 3; 11311 } 11312 11313 TargetLoweringBase::LegalizeTypeAction 11314 AArch64TargetLowering::getPreferredVectorAction(EVT VT) const { 11315 MVT SVT = VT.getSimpleVT(); 11316 // During type legalization, we prefer to widen v1i8, v1i16, v1i32 to v8i8, 11317 // v4i16, v2i32 instead of to promote. 11318 if (SVT == MVT::v1i8 || SVT == MVT::v1i16 || SVT == MVT::v1i32 11319 || SVT == MVT::v1f32) 11320 return TypeWidenVector; 11321 11322 return TargetLoweringBase::getPreferredVectorAction(VT); 11323 } 11324 11325 // Loads and stores less than 128-bits are already atomic; ones above that 11326 // are doomed anyway, so defer to the default libcall and blame the OS when 11327 // things go wrong. 11328 bool AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const { 11329 unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits(); 11330 return Size == 128; 11331 } 11332 11333 // Loads and stores less than 128-bits are already atomic; ones above that 11334 // are doomed anyway, so defer to the default libcall and blame the OS when 11335 // things go wrong. 11336 TargetLowering::AtomicExpansionKind 11337 AArch64TargetLowering::shouldExpandAtomicLoadInIR(LoadInst *LI) const { 11338 unsigned Size = LI->getType()->getPrimitiveSizeInBits(); 11339 return Size == 128 ? AtomicExpansionKind::LLSC : AtomicExpansionKind::None; 11340 } 11341 11342 // For the real atomic operations, we have ldxr/stxr up to 128 bits, 11343 TargetLowering::AtomicExpansionKind 11344 AArch64TargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const { 11345 unsigned Size = AI->getType()->getPrimitiveSizeInBits(); 11346 if (Size > 128) return AtomicExpansionKind::None; 11347 // Nand not supported in LSE. 11348 if (AI->getOperation() == AtomicRMWInst::Nand) return AtomicExpansionKind::LLSC; 11349 // Leave 128 bits to LLSC. 11350 return (Subtarget->hasLSE() && Size < 128) ? AtomicExpansionKind::None : AtomicExpansionKind::LLSC; 11351 } 11352 11353 bool AArch64TargetLowering::shouldExpandAtomicCmpXchgInIR( 11354 AtomicCmpXchgInst *AI) const { 11355 // If subtarget has LSE, leave cmpxchg intact for codegen. 11356 if (Subtarget->hasLSE()) return false; 11357 // At -O0, fast-regalloc cannot cope with the live vregs necessary to 11358 // implement cmpxchg without spilling. If the address being exchanged is also 11359 // on the stack and close enough to the spill slot, this can lead to a 11360 // situation where the monitor always gets cleared and the atomic operation 11361 // can never succeed. So at -O0 we need a late-expanded pseudo-inst instead. 11362 return getTargetMachine().getOptLevel() != 0; 11363 } 11364 11365 Value *AArch64TargetLowering::emitLoadLinked(IRBuilder<> &Builder, Value *Addr, 11366 AtomicOrdering Ord) const { 11367 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 11368 Type *ValTy = cast<PointerType>(Addr->getType())->getElementType(); 11369 bool IsAcquire = isAcquireOrStronger(Ord); 11370 11371 // Since i128 isn't legal and intrinsics don't get type-lowered, the ldrexd 11372 // intrinsic must return {i64, i64} and we have to recombine them into a 11373 // single i128 here. 11374 if (ValTy->getPrimitiveSizeInBits() == 128) { 11375 Intrinsic::ID Int = 11376 IsAcquire ? Intrinsic::aarch64_ldaxp : Intrinsic::aarch64_ldxp; 11377 Function *Ldxr = Intrinsic::getDeclaration(M, Int); 11378 11379 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); 11380 Value *LoHi = Builder.CreateCall(Ldxr, Addr, "lohi"); 11381 11382 Value *Lo = Builder.CreateExtractValue(LoHi, 0, "lo"); 11383 Value *Hi = Builder.CreateExtractValue(LoHi, 1, "hi"); 11384 Lo = Builder.CreateZExt(Lo, ValTy, "lo64"); 11385 Hi = Builder.CreateZExt(Hi, ValTy, "hi64"); 11386 return Builder.CreateOr( 11387 Lo, Builder.CreateShl(Hi, ConstantInt::get(ValTy, 64)), "val64"); 11388 } 11389 11390 Type *Tys[] = { Addr->getType() }; 11391 Intrinsic::ID Int = 11392 IsAcquire ? Intrinsic::aarch64_ldaxr : Intrinsic::aarch64_ldxr; 11393 Function *Ldxr = Intrinsic::getDeclaration(M, Int, Tys); 11394 11395 return Builder.CreateTruncOrBitCast( 11396 Builder.CreateCall(Ldxr, Addr), 11397 cast<PointerType>(Addr->getType())->getElementType()); 11398 } 11399 11400 void AArch64TargetLowering::emitAtomicCmpXchgNoStoreLLBalance( 11401 IRBuilder<> &Builder) const { 11402 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 11403 Builder.CreateCall(Intrinsic::getDeclaration(M, Intrinsic::aarch64_clrex)); 11404 } 11405 11406 Value *AArch64TargetLowering::emitStoreConditional(IRBuilder<> &Builder, 11407 Value *Val, Value *Addr, 11408 AtomicOrdering Ord) const { 11409 Module *M = Builder.GetInsertBlock()->getParent()->getParent(); 11410 bool IsRelease = isReleaseOrStronger(Ord); 11411 11412 // Since the intrinsics must have legal type, the i128 intrinsics take two 11413 // parameters: "i64, i64". We must marshal Val into the appropriate form 11414 // before the call. 11415 if (Val->getType()->getPrimitiveSizeInBits() == 128) { 11416 Intrinsic::ID Int = 11417 IsRelease ? Intrinsic::aarch64_stlxp : Intrinsic::aarch64_stxp; 11418 Function *Stxr = Intrinsic::getDeclaration(M, Int); 11419 Type *Int64Ty = Type::getInt64Ty(M->getContext()); 11420 11421 Value *Lo = Builder.CreateTrunc(Val, Int64Ty, "lo"); 11422 Value *Hi = Builder.CreateTrunc(Builder.CreateLShr(Val, 64), Int64Ty, "hi"); 11423 Addr = Builder.CreateBitCast(Addr, Type::getInt8PtrTy(M->getContext())); 11424 return Builder.CreateCall(Stxr, {Lo, Hi, Addr}); 11425 } 11426 11427 Intrinsic::ID Int = 11428 IsRelease ? Intrinsic::aarch64_stlxr : Intrinsic::aarch64_stxr; 11429 Type *Tys[] = { Addr->getType() }; 11430 Function *Stxr = Intrinsic::getDeclaration(M, Int, Tys); 11431 11432 return Builder.CreateCall(Stxr, 11433 {Builder.CreateZExtOrBitCast( 11434 Val, Stxr->getFunctionType()->getParamType(0)), 11435 Addr}); 11436 } 11437 11438 bool AArch64TargetLowering::functionArgumentNeedsConsecutiveRegisters( 11439 Type *Ty, CallingConv::ID CallConv, bool isVarArg) const { 11440 return Ty->isArrayTy(); 11441 } 11442 11443 bool AArch64TargetLowering::shouldNormalizeToSelectSequence(LLVMContext &, 11444 EVT) const { 11445 return false; 11446 } 11447 11448 static Value *UseTlsOffset(IRBuilder<> &IRB, unsigned Offset) { 11449 Module *M = IRB.GetInsertBlock()->getParent()->getParent(); 11450 Function *ThreadPointerFunc = 11451 Intrinsic::getDeclaration(M, Intrinsic::thread_pointer); 11452 return IRB.CreatePointerCast( 11453 IRB.CreateConstGEP1_32(IRB.CreateCall(ThreadPointerFunc), Offset), 11454 Type::getInt8PtrTy(IRB.getContext())->getPointerTo(0)); 11455 } 11456 11457 Value *AArch64TargetLowering::getIRStackGuard(IRBuilder<> &IRB) const { 11458 // Android provides a fixed TLS slot for the stack cookie. See the definition 11459 // of TLS_SLOT_STACK_GUARD in 11460 // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h 11461 if (Subtarget->isTargetAndroid()) 11462 return UseTlsOffset(IRB, 0x28); 11463 11464 // Fuchsia is similar. 11465 // <zircon/tls.h> defines ZX_TLS_STACK_GUARD_OFFSET with this value. 11466 if (Subtarget->isTargetFuchsia()) 11467 return UseTlsOffset(IRB, -0x10); 11468 11469 return TargetLowering::getIRStackGuard(IRB); 11470 } 11471 11472 Value *AArch64TargetLowering::getSafeStackPointerLocation(IRBuilder<> &IRB) const { 11473 // Android provides a fixed TLS slot for the SafeStack pointer. See the 11474 // definition of TLS_SLOT_SAFESTACK in 11475 // https://android.googlesource.com/platform/bionic/+/master/libc/private/bionic_tls.h 11476 if (Subtarget->isTargetAndroid()) 11477 return UseTlsOffset(IRB, 0x48); 11478 11479 // Fuchsia is similar. 11480 // <zircon/tls.h> defines ZX_TLS_UNSAFE_SP_OFFSET with this value. 11481 if (Subtarget->isTargetFuchsia()) 11482 return UseTlsOffset(IRB, -0x8); 11483 11484 return TargetLowering::getSafeStackPointerLocation(IRB); 11485 } 11486 11487 bool AArch64TargetLowering::isMaskAndCmp0FoldingBeneficial( 11488 const Instruction &AndI) const { 11489 // Only sink 'and' mask to cmp use block if it is masking a single bit, since 11490 // this is likely to be fold the and/cmp/br into a single tbz instruction. It 11491 // may be beneficial to sink in other cases, but we would have to check that 11492 // the cmp would not get folded into the br to form a cbz for these to be 11493 // beneficial. 11494 ConstantInt* Mask = dyn_cast<ConstantInt>(AndI.getOperand(1)); 11495 if (!Mask) 11496 return false; 11497 return Mask->getValue().isPowerOf2(); 11498 } 11499 11500 void AArch64TargetLowering::initializeSplitCSR(MachineBasicBlock *Entry) const { 11501 // Update IsSplitCSR in AArch64unctionInfo. 11502 AArch64FunctionInfo *AFI = Entry->getParent()->getInfo<AArch64FunctionInfo>(); 11503 AFI->setIsSplitCSR(true); 11504 } 11505 11506 void AArch64TargetLowering::insertCopiesSplitCSR( 11507 MachineBasicBlock *Entry, 11508 const SmallVectorImpl<MachineBasicBlock *> &Exits) const { 11509 const AArch64RegisterInfo *TRI = Subtarget->getRegisterInfo(); 11510 const MCPhysReg *IStart = TRI->getCalleeSavedRegsViaCopy(Entry->getParent()); 11511 if (!IStart) 11512 return; 11513 11514 const TargetInstrInfo *TII = Subtarget->getInstrInfo(); 11515 MachineRegisterInfo *MRI = &Entry->getParent()->getRegInfo(); 11516 MachineBasicBlock::iterator MBBI = Entry->begin(); 11517 for (const MCPhysReg *I = IStart; *I; ++I) { 11518 const TargetRegisterClass *RC = nullptr; 11519 if (AArch64::GPR64RegClass.contains(*I)) 11520 RC = &AArch64::GPR64RegClass; 11521 else if (AArch64::FPR64RegClass.contains(*I)) 11522 RC = &AArch64::FPR64RegClass; 11523 else 11524 llvm_unreachable("Unexpected register class in CSRsViaCopy!"); 11525 11526 unsigned NewVR = MRI->createVirtualRegister(RC); 11527 // Create copy from CSR to a virtual register. 11528 // FIXME: this currently does not emit CFI pseudo-instructions, it works 11529 // fine for CXX_FAST_TLS since the C++-style TLS access functions should be 11530 // nounwind. If we want to generalize this later, we may need to emit 11531 // CFI pseudo-instructions. 11532 assert(Entry->getParent()->getFunction().hasFnAttribute( 11533 Attribute::NoUnwind) && 11534 "Function should be nounwind in insertCopiesSplitCSR!"); 11535 Entry->addLiveIn(*I); 11536 BuildMI(*Entry, MBBI, DebugLoc(), TII->get(TargetOpcode::COPY), NewVR) 11537 .addReg(*I); 11538 11539 // Insert the copy-back instructions right before the terminator. 11540 for (auto *Exit : Exits) 11541 BuildMI(*Exit, Exit->getFirstTerminator(), DebugLoc(), 11542 TII->get(TargetOpcode::COPY), *I) 11543 .addReg(NewVR); 11544 } 11545 } 11546 11547 bool AArch64TargetLowering::isIntDivCheap(EVT VT, AttributeList Attr) const { 11548 // Integer division on AArch64 is expensive. However, when aggressively 11549 // optimizing for code size, we prefer to use a div instruction, as it is 11550 // usually smaller than the alternative sequence. 11551 // The exception to this is vector division. Since AArch64 doesn't have vector 11552 // integer division, leaving the division as-is is a loss even in terms of 11553 // size, because it will have to be scalarized, while the alternative code 11554 // sequence can be performed in vector form. 11555 bool OptSize = 11556 Attr.hasAttribute(AttributeList::FunctionIndex, Attribute::MinSize); 11557 return OptSize && !VT.isVector(); 11558 } 11559 11560 bool AArch64TargetLowering::enableAggressiveFMAFusion(EVT VT) const { 11561 return Subtarget->hasAggressiveFMA() && VT.isFloatingPoint(); 11562 } 11563 11564 unsigned 11565 AArch64TargetLowering::getVaListSizeInBits(const DataLayout &DL) const { 11566 if (Subtarget->isTargetDarwin() || Subtarget->isTargetWindows()) 11567 return getPointerTy(DL).getSizeInBits(); 11568 11569 return 3 * getPointerTy(DL).getSizeInBits() + 2 * 32; 11570 } 11571 11572 void AArch64TargetLowering::finalizeLowering(MachineFunction &MF) const { 11573 MF.getFrameInfo().computeMaxCallFrameSize(MF); 11574 TargetLoweringBase::finalizeLowering(MF); 11575 } 11576